AIRS Dataset: Example of the aitlas toolbox for semantic segmentation#

This notebook shows a sample implementation of a image segmentation using the aitlas toolbox.

Import the required packages#

[ ]:
from aitlas.datasets import AIRSDataset
from aitlas.models import DeepLabV3
from aitlas.utils import image_loader
from aitlas.transforms import MinMaxNormTransponse

Visualize images and masks#

[ ]:
dataset_config = {
    "data_dir": "./data/airs/images",
    "csv_file": "./data/airs/train.txt"
}
dataset = AIRSDataset(dataset_config)

print(f"Total number of patches: {len(dataset)}")
dataset.show_image(1567);
dataset.show_image(793);

Load train data#

[ ]:
train_dataset_config = {
    "batch_size": 4,
    "shuffle": True,
    "num_workers": 4,
    "data_dir": "./data/airs/images",
    "csv_file": "./data/airs/train.txt",
    "transforms": ["aitlas.transforms.MinMaxNormTransponse"],
    "target_transforms": ["aitlas.transforms.Transponse"]
}
train_dataset = AIRSDataset(train_dataset_config)
len(train_dataset)

Create the model#

[ ]:
epochs = 5
model_directory = "./data/experiments/airs"
model_config = {
    "num_classes": 5,
    "learning_rate": 0.0001,
    "pretrained": True,
    "threshold": 0.5,
    "metrics": ["iou"]
}

model = DeepLabV3(model_config)
model.prepare()

Start the training#

[ ]:
model.train_model(
    train_dataset=train_dataset,
    epochs=epochs,
    model_directory=model_directory,
    run_id='1'
)

Evalute the model using test data#

[ ]:
test_dataset_config = {
    "batch_size": 4,
    "shuffle": False,
    "num_workers": 4,
    "data_dir": "./data/airs/images",
    "csv_file": "./data/airs/test.txt",
    "transforms": ["aitlas.transforms.MinMaxNormTransponse"],
    "target_transforms": ["aitlas.transforms.Transponse"]
}

test_dataset = AIRSDataset(test_dataset_config)
len(test_dataset)

model = DeepLabV3(model_config)
model.prepare()
model_path = "./data/experiments/airs/checkpoint.pth.tar"
model.evaluate(dataset=test_dataset, model_path=model_path)
model.running_metrics.get_scores(["iou"])

Predictions#

[ ]:
model_path = "./data/experiments/airs/checkpoint.pth.tar"
#labels = AIRSDataset.labels
labels = ["Background", "Roof"]
transform = MinMaxNormTransponse()
model.load_model(model_path)

image = image_loader('./data/predict/image1.jpg')
fig = model.predict_masks(image, labels, transform)

image = image_loader('./data/predict/image2.jpg')
fig = model.predict_masks(image, labels, transform)

image = image_loader('./data/predict/image3.jpg')
fig = model.predict_masks(image, labels, transform)
[ ]: