we normally do something like that - not sure what why it's freezing for you without more info
We use albumentations with scripts that execute remotely and have no issues. Good question from CostlyOstrich36
never tried to start it locally in clearml
` # dataset_class.py
from PIL import Image
from torch.utils.data import Dataset as BaseDataset
class Dataset(BaseDataset):
def __init__(
self,
images_fps,
masks_fps,
augmentation=None,
):
self.augmentation = augmentation
self.images_fps = images_fps
self.masks_fps = masks_fps
self.ids = len(images_fps)
def __getitem__(self, i):
# read data
img = Image.open(self.images_fps[i])
mask = Image.open(self.masks_fps[i])
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=img, mask=mask)
image_aug, mask_aug = sample["image"], sample["mask"]
return image_aug, mask_aug
def __len__(self):
return self.ids
training_script.py
from dataset_class import Dataset
import albumentations as albu
from torch.utils.data import DataLoader
usual clearml config etc
train_albs = [
albu.HorizontalFlip(p=0.5),
]
augs = albu.Compose(train_albs)
train_dataset = Dataset(images_fps, masks_fps, augmentations=augs)
train_loader_l = DataLoader(
train_dataset,
batch_size=16,
shuffle=True,
num_workers=0,
) `
When I run locally w/o clearml, it doesn't freeze