Chactun dataset: Example of the aitlas toolbox for image segmentation#

This notebook shows a sample implementation of a image segmentation using the aitlas toolbox.

Import the required packages#

[ ]:
from aitlas.datasets import ChactunDataset
from aitlas.models import DeepLabV3
from aitlas.transforms import MinMaxNormTranspose
from aitlas.utils import image_loader

Visualize images and masks#

[2]:
dataset_config = {
    "data_dir": "../data/chactun/train"
}
dataset = ChactunDataset(dataset_config)

print(f"Total number of patches: {len(dataset)}")
dataset.show_image(897);
dataset.show_image(10);
Total number of patches: 1764
../_images/examples_semantic_segmentation_example_chactun_4_1.png
../_images/examples_semantic_segmentation_example_chactun_4_2.png
[6]:
dataset.data_distribution_table()
[6]:
Number of pixels
Aguada 1357783.0
Building 6904853.0
Platform 8656330.0
[7]:
dataset.data_distribution_barchart();
../_images/examples_semantic_segmentation_example_chactun_6_0.png

Load train data#

[3]:
train_dataset_config = {
    "batch_size": 16,
    "shuffle": True,
    "data_dir": "../data/chactun/train",
    "joint_transforms": ["aitlas.transforms.FlipHVRandomRotate"],
    "transforms": ["aitlas.transforms.MinMaxNormTranspose"],
    "target_transforms": ["aitlas.transforms.Transpose"]
}
train_dataset = ChactunDataset(train_dataset_config)
len(train_dataset)
[3]:
1764

Create the model#

[4]:
epochs = 50
model_directory = "./experiments/chactun"
model_config = {
    "num_classes": 3,
    "learning_rate": 0.0001,
    "pretrained": True,
    "threshold": 0.5,
    "metrics": ["iou"]
}

model = DeepLabV3(model_config)
model.prepare()

Start the training#

[ ]:
model.train_model(
    train_dataset=train_dataset,
    epochs=epochs,
    model_directory=model_directory,
    run_id='1'
)

Evalute the model using test data#

[ ]:
test_dataset_config = {
    "batch_size": 4,
    "shuffle": False,
    "num_workers": 4,
    "data_dir": "../data/chactun/test",
    "transforms": ["aitlas.transforms.MinMaxNormTranspose"],
    "target_transforms": ["aitlas.transforms.Transpose"]
}

test_dataset = ChactunDataset(test_dataset_config)
len(test_dataset)

model = DeepLabV3(model_config)
model.prepare()
model.running_metrics.reset()
model_path = "./experiments/chactun/checkpoint.pth.tar"
model.evaluate(dataset=test_dataset, model_path=model_path)
model.running_metrics.get_scores(model.metrics)

Predictions#

[10]:
model_path = "./experiments/chactun/checkpoint.pth.tar"
#labels = ChactunDataset.labels
labels = ["Aguada", "Building", "Platform"]
transform = MinMaxNormTranspose()
model.load_model(model_path)

image = image_loader('../data/chactun/predict/tile_1741_lidar.tif')
fig = model.predict_masks(image, labels, transform)

image = image_loader('../data/chactun/predict/tile_1763_lidar.tif')
fig = model.predict_masks(image, labels, transform)

image = image_loader('../data/chactun/predict/tile_1730_lidar.tif')
fig = model.predict_masks(image, labels, transform)

image = image_loader('../data/chactun/predict/tile_1724_lidar.tif')
fig = model.predict_masks(image, labels, transform)
2022-10-31 12:10:13,006 INFO Loading checkpoint ./experiments/chactun/checkpoint.pth.tar
2022-10-31 12:10:13,557 INFO Loaded checkpoint ./experiments/chactun/checkpoint.pth.tar at epoch 51
../_images/examples_semantic_segmentation_example_chactun_16_1.png
../_images/examples_semantic_segmentation_example_chactun_16_2.png
../_images/examples_semantic_segmentation_example_chactun_16_3.png
../_images/examples_semantic_segmentation_example_chactun_16_4.png