-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
virginiafdez
committed
Jul 28, 2023
0 parents
commit 563cda8
Showing
71 changed files
with
12,902 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
# Project exclude paths | ||
/venv/ |
Empty file.
Empty file.
Large diffs are not rendered by default.
Oops, something went wrong.
Large diffs are not rendered by default.
Oops, something went wrong.
Empty file.
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,42 @@ | ||
general: | ||
labels: | ||
0: 'background' | ||
1: 'csf' | ||
2: 'gm' | ||
3: 'wm' | ||
4: 'dgm' | ||
5: 'brainstem' | ||
6: 'wmh' | ||
7: 'tumour' | ||
8: 'edema' | ||
9: 'gdtumour' | ||
label_generator: | ||
cond_map: | ||
0: 'wmh' | ||
1: 'tumour' | ||
2: 'edema' | ||
3: 'gdtumour' | ||
vae_uri: '' | ||
ldm_uri: "" | ||
wanted_cond: ['wmh', 'tumour', 'edema', 'gdtumour'] | ||
save_to: "" | ||
cond_boundaries: | ||
'wmh': [0.0097, 0.0923] | ||
'tumour': [0.0224, 0.1833] | ||
'edema': [0.1408, 0.3595] | ||
'gdtumour': [0.0712, 0.4280] | ||
image_shape: [] | ||
n_labels: 200 | ||
formats: ['.nii.gz'] | ||
scheduler_type: 'pndm' | ||
kwargs_scheduler: "" | ||
scale_factor: 1.0 # Make sure that it's the one used for the training! | ||
image_generator: | ||
save_to: "" | ||
data_dict: "" | ||
style_dict: "" | ||
sequences: ['T1', 'FLAIR', 'T2'] | ||
checkpoints_path: "" | ||
formats: ['.nii.gz'] | ||
append_ims: "" | ||
n_passes: 3 |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
FROM nvcr.io/nvidia/pytorch:22.12-py3 | ||
ARG USER_ID | ||
ARG GROUP_ID | ||
ARG USER | ||
ARG DEBIAN_FRONTEND=noninteractive | ||
RUN addgroup --gid $GROUP_ID $USER | ||
RUN adduser --disabled-password --gecos "" --uid $USER_ID --gid $GROUP_ID $USER | ||
COPY requirements.txt . | ||
COPY requirements-dev.txt . | ||
COPY requirements-min.txt . | ||
RUN pip install --upgrade pip | ||
RUN apt-get update \ | ||
&& apt-get install -y sudo | ||
RUN pip3 install -r requirements.txt | ||
RUN pip3 install -r requirements-dev.txt | ||
USER root |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,98 @@ | ||
absl-py==1.4.0 | ||
alembic==1.9.2 | ||
antlr4-python3-runtime==4.9.3 | ||
beautifulsoup4==4.11.1 | ||
cachetools==5.3.0 | ||
certifi==2022.12.7 | ||
chardet==5.1.0 | ||
charset-normalizer==3.0.1 | ||
click==8.1.3 | ||
cloudpickle==2.2.1 | ||
contourpy==1.0.7 | ||
cycler==0.11.0 | ||
databricks-cli==0.17.4 | ||
dill==0.3.6 | ||
docker==6.0.1 | ||
dominate==2.7.0 | ||
entrypoints==0.4 | ||
filelock==3.9.0 | ||
Flask==2.2.2 | ||
fonttools==4.38.0 | ||
gdown==4.6.0 | ||
gitdb==4.0.10 | ||
GitPython==3.1.30 | ||
google-auth==2.16.0 | ||
google-auth-oauthlib==0.4.6 | ||
greenlet==2.0.2 | ||
grpcio==1.51.1 | ||
gunicorn==20.1.0 | ||
idna==3.4 | ||
imageio==2.25.0 | ||
importlib-metadata==5.2.0 | ||
itsdangerous==2.1.2 | ||
Jinja2==3.1.2 | ||
joblib==1.2.0 | ||
kiwisolver==1.4.4 | ||
llvmlite==0.39.1 | ||
lpips==0.1.4 | ||
Mako==1.2.4 | ||
Markdown==3.4.1 | ||
MarkupSafe==2.1.2 | ||
matplotlib==3.6.3 | ||
mlflow==2.1.1 | ||
monai-weekly==1.2.dev2304 | ||
networkx==3.0 | ||
nibabel==5.0.0 | ||
numba==0.56.4 | ||
numpy==1.23.5 | ||
nvidia-cublas-cu11==11.10.3.66 | ||
nvidia-cuda-nvrtc-cu11==11.7.99 | ||
nvidia-cuda-runtime-cu11==11.7.99 | ||
nvidia-cudnn-cu11==8.5.0.96 | ||
oauthlib==3.2.2 | ||
omegaconf==2.3.0 | ||
packaging==22.0 | ||
pandas==1.5.3 | ||
patsy==0.5.3 | ||
Pillow==9.4.0 | ||
protobuf==3.20.1 | ||
pyarrow==10.0.1 | ||
pyasn1==0.4.8 | ||
pyasn1-modules==0.2.8 | ||
PyJWT==2.6.0 | ||
pyparsing==3.0.9 | ||
PySocks==1.7.1 | ||
python-dateutil==2.8.2 | ||
pytz==2022.7.1 | ||
PyWavelets==1.4.1 | ||
PyYAML==6.0 | ||
querystring-parser==1.2.4 | ||
requests==2.28.2 | ||
requests-oauthlib==1.3.1 | ||
rsa==4.9 | ||
scikit-image==0.19.3 | ||
scikit-learn==1.2.1 | ||
scipy==1.10.0 | ||
shap==0.41.0 | ||
six==1.16.0 | ||
slicer==0.0.7 | ||
smmap==5.0.0 | ||
soupsieve==2.3.2.post1 | ||
SQLAlchemy==1.4.46 | ||
sqlparse==0.4.3 | ||
statsmodels==0.13.5 | ||
tabulate==0.9.0 | ||
tensorboard==2.11.2 | ||
tensorboard-data-server==0.6.1 | ||
tensorboard-plugin-wit==1.8.1 | ||
tensorboardX==2.5.1 | ||
threadpoolctl==3.1.0 | ||
tifffile==2023.1.23.1 | ||
torch==1.13.1 | ||
torchvision==0.14.1 | ||
tqdm==4.64.1 | ||
typing_extensions==4.4.0 | ||
urllib3==1.26.14 | ||
websocket-client==1.5.0 | ||
Werkzeug==2.2.2 | ||
zipp==3.11.0 |
Empty file.
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,231 @@ | ||
import monai | ||
import os | ||
import numpy as np | ||
import pandas as pd | ||
import monai.transforms as transforms | ||
from monai.data.dataset import PersistentDataset, CacheDataset, Dataset | ||
from monai.data import DataLoader | ||
|
||
|
||
def get_data_dicts( | ||
ids_path: str, | ||
shuffle: bool = False, | ||
conditioned: bool = False, | ||
conditionings=None, | ||
max_size = None | ||
): | ||
""" | ||
Get data dictionaries for label generator training. | ||
:param ids_path: path to TSV file | ||
:param shuffle: whether to shuffle the labels | ||
:param conditioned: if conditioning is required, conditioning columns will be present | ||
:param conditionings: list of conditioning keywords present on TSV file as columns | ||
:return: | ||
""" | ||
df = pd.read_csv(ids_path, sep="\t") | ||
if shuffle: | ||
df = df.sample(frac=1, random_state=1) | ||
|
||
data_dicts = [] | ||
for index, row in df.iterrows(): | ||
out_dict = { | ||
"label": row["label"], | ||
} | ||
|
||
if conditioned: | ||
for conditioning in conditionings: | ||
if conditioning in row.keys(): | ||
out_dict[conditioning] = float(row[conditioning]) | ||
|
||
data_dicts.append(out_dict) | ||
|
||
print(f"Found {len(data_dicts)} subjects.") | ||
if max_size is not None: | ||
return data_dicts[:max_size] | ||
else: | ||
return data_dicts | ||
|
||
|
||
def get_training_loaders( | ||
batch_size: int, | ||
training_ids: str, | ||
validation_ids: str, | ||
spatial_size: list, | ||
conditionings: list = [], | ||
only_val: bool = False, | ||
augmentation: bool = True, | ||
drop_last: bool = False, | ||
num_workers: int = 8, | ||
cache_dir=None, | ||
for_ldm=False, | ||
max_size = None, | ||
): | ||
""" | ||
Get data loaders for scenario with Partial Volume maps and conditioning. | ||
""" | ||
# Create cache directory | ||
if cache_dir is not None: | ||
if not os.path.isdir(os.path.join(cache_dir, 'cache')): | ||
os.makedirs(os.path.join(cache_dir, 'cache')) | ||
cache_dir = os.path.join(cache_dir, 'cache') | ||
|
||
# Define transformations | ||
base_transforms = [ | ||
transforms.LoadImaged(keys=['label']), # Niftis | ||
transforms.AsChannelFirstd(keys=['label'], channel_dim=-1), | ||
transforms.CenterSpatialCropd(keys=['label'], roi_size=spatial_size), | ||
transforms.SpatialPadd(keys=['label'], spatial_size=spatial_size, method='symmetric'), | ||
transforms.Lambdad(keys=['label'], func = lambda l: np.concatenate( | ||
[np.expand_dims(1-np.sum(l[1:, ...], 0),0), l[1:,...]], 0)), | ||
transforms.ToTensord(keys=["label", ] + conditionings) | ||
] | ||
|
||
val_transforms = transforms.Compose(base_transforms) | ||
|
||
if augmentation: | ||
if for_ldm: | ||
rotate_range = [-0.05, 0.05] | ||
shear_range = [0.001, 0.05] | ||
scale_range = [0, 0.05] | ||
else: | ||
rotate_range = [-0.1, 0.1] | ||
shear_range = [0.001, 0.15], | ||
scale_range = [0, 0.3] | ||
|
||
train_transforms = transforms.Compose( | ||
base_transforms[:-1] + \ | ||
[ | ||
transforms.RandAffined( | ||
keys=["label"], | ||
prob=0.0, | ||
rotate_range=rotate_range, | ||
shear_range=shear_range, | ||
scale_range=scale_range, | ||
padding_mode='border', | ||
mode='nearest', | ||
|
||
), | ||
] + | ||
[base_transforms[-1]] | ||
) | ||
|
||
else: | ||
train_transforms = val_transforms | ||
|
||
val_dicts = get_data_dicts( | ||
ids_path=validation_ids, | ||
shuffle=False, | ||
conditioned=True, | ||
conditionings=conditionings, | ||
|
||
) | ||
|
||
if cache_dir is not None: | ||
val_ds = PersistentDataset( | ||
cache_dir = cache_dir, | ||
data=val_dicts, | ||
transform=val_transforms, | ||
) | ||
else: | ||
val_ds = Dataset(data=val_dicts, transform=val_transforms) | ||
val_loader = DataLoader( | ||
val_ds, | ||
batch_size=batch_size, | ||
num_workers=num_workers, | ||
drop_last=drop_last, | ||
pin_memory=False | ||
) | ||
|
||
if only_val: | ||
return val_loader | ||
|
||
train_dicts = get_data_dicts( | ||
ids_path=training_ids, | ||
shuffle=False, | ||
conditioned=True, | ||
conditionings=conditionings, | ||
max_size=max_size | ||
) | ||
if cache_dir is not None: | ||
train_ds = PersistentDataset( | ||
cache_dir=cache_dir, | ||
data=train_dicts, | ||
transform=train_transforms, | ||
) | ||
else: | ||
train_ds = Dataset( | ||
data=train_dicts, | ||
transform=train_transforms, | ||
) | ||
|
||
train_loader = DataLoader( | ||
train_ds, | ||
batch_size=batch_size, | ||
shuffle=False, | ||
num_workers=num_workers, | ||
drop_last=drop_last, | ||
pin_memory=False | ||
) | ||
|
||
return train_loader, val_loader | ||
|
||
|
||
def get_testing_loader( | ||
batch_size: int, | ||
testing_ids: str, | ||
spatial_size: list, | ||
conditionings: list = [], | ||
drop_last: bool = False, | ||
num_workers: int = 8, | ||
cache_dir=None, | ||
|
||
): | ||
""" | ||
Get data loaders for scenario with Partial Volume maps and conditioning. | ||
""" | ||
|
||
# Create cache directory | ||
if cache_dir is not None: | ||
if not os.path.isdir(os.path.join(cache_dir, 'cache')): | ||
os.makedirs(os.path.join(cache_dir, 'cache')) | ||
cache_dir = os.path.join(cache_dir, 'cache') | ||
|
||
# Define transformations | ||
test_transforms = transforms.Compose([ | ||
transforms.LoadImaged(keys=['label']), # Niftis | ||
transforms.AsChannelFirstd(keys=['label'], channel_dim=-1), | ||
transforms.CenterSpatialCropd(keys=['label'], roi_size=spatial_size), | ||
transforms.SpatialPadd(keys=['label'], spatial_size=spatial_size, method='edge'), | ||
transforms.ToTensord(keys=["label", ] + conditionings) | ||
]) | ||
|
||
test_dicts = get_data_dicts( | ||
ids_path=testing_ids, | ||
shuffle=False, | ||
conditioned=True, | ||
conditionings=conditionings | ||
) | ||
|
||
if cache_dir is not None: | ||
test_ds = PersistentDataset( | ||
cache_dir=cache_dir, | ||
data=test_dicts, | ||
transform=test_transforms, | ||
) | ||
else: | ||
test_ds = Dataset( | ||
data=test_dicts, | ||
transform=test_transforms, | ||
) | ||
test_loader = DataLoader( | ||
test_ds, | ||
batch_size=batch_size, | ||
num_workers=num_workers, | ||
drop_last=drop_last, | ||
pin_memory=False | ||
) | ||
|
||
return test_loader | ||
|
||
|
||
|
Oops, something went wrong.