From 583a81e596890bf5008b046a56a7e539db997686 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Apr 2023 09:07:45 +0000 Subject: [PATCH] [pre-commit.ci] pre-commit suggestions (#1280) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/psf/black: 22.12.0 → 23.3.0](https://github.com/psf/black/compare/22.12.0...23.3.0) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Wenqi Li <831580+wyli@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 3d_classification/ignite/densenet_training_dict.py | 1 - 3d_registration/paired_lung_ct.ipynb | 1 - 3d_segmentation/brats_segmentation_3d.ipynb | 1 - .../swin_unetr_brats21_segmentation_3d.ipynb | 12 +++++++++--- .../distributed_training/brats_training_ddp.py | 1 - auto3dseg/notebooks/ensemble_byoc.ipynb | 1 - .../tasks/hecktor22/hecktor_crop_neck_region.py | 2 -- .../MICCAI/surgtoolloc/classification_files/train.py | 3 --- .../MICCAI/surgtoolloc/classification_files/utils.py | 2 -- .../RANZCR/4th_place_solution/data/seg_data.py | 2 -- .../RANZCR/4th_place_solution/models/seg_model.py | 2 -- .../kaggle/RANZCR/4th_place_solution/train.py | 6 ------ .../kaggle/RANZCR/4th_place_solution/utils.py | 5 ----- deep_atlas/deep_atlas_tutorial.ipynb | 2 -- deepedit/ignite/train.py | 2 -- deployment/Triton/client/client.py | 1 - deployment/Triton/client/client_mednist.py | 1 - deployment/Triton/models/mednist_class/1/model.py | 1 - deployment/Triton/models/monai_covid/1/model.py | 1 - detection/luna16_visualization/save_obj.py | 2 -- .../workspace/Monai_MedNIST.ipynb | 1 - .../substra/assets/objective/metrics.py | 2 +- .../TCIA_PROSTATEx_Prostate_MRI_Anatomy_Model.ipynb | 1 + modules/autoencoder_mednist.ipynb | 1 - .../randomizedPermutations.py | 1 - modules/interpretability/class_lung_lesion.ipynb | 1 - ...erse_transforms_and_test_time_augmentations.ipynb | 1 - modules/lazy_resampling_functional.ipynb | 1 - modules/learning_rate.ipynb | 1 - modules/load_medical_images.ipynb | 1 - modules/varautoencoder_mednist.ipynb | 1 - .../panda_mil_train_evaluate_pytorch_gpu.py | 10 ---------- performance_profiling/radiology/train_base_nvtx.py | 1 - .../MRI_reconstruction/unet_demo/fastmri_ssim.py | 1 + .../MRI_reconstruction/unet_demo/inference.ipynb | 1 - .../MRI_reconstruction/varnet_demo/fastmri_ssim.py | 1 + 37 files changed, 14 insertions(+), 63 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 34dec2f2c1..4f98969bdc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,7 +23,7 @@ repos: - id: detect-private-key - repo: https://github.com/psf/black - rev: "22.12.0" + rev: "23.3.0" hooks: - id: black - id: black-jupyter diff --git a/3d_classification/ignite/densenet_training_dict.py b/3d_classification/ignite/densenet_training_dict.py index 050f1d7516..99ad867979 100644 --- a/3d_classification/ignite/densenet_training_dict.py +++ b/3d_classification/ignite/densenet_training_dict.py @@ -93,7 +93,6 @@ def main(): # Ignite trainer expects batch=(img, label) and returns output=loss at every iteration, # user can add output_transform to return other values, like: y_pred, y, etc. def prepare_batch(batch, device=None, non_blocking=False): - return _prepare_batch((batch["img"], batch["label"]), device, non_blocking) trainer = create_supervised_trainer(net, opt, loss, device, False, prepare_batch=prepare_batch) diff --git a/3d_registration/paired_lung_ct.ipynb b/3d_registration/paired_lung_ct.ipynb index b298d9c155..20360fc8e3 100644 --- a/3d_registration/paired_lung_ct.ipynb +++ b/3d_registration/paired_lung_ct.ipynb @@ -676,7 +676,6 @@ " model.eval()\n", " with torch.no_grad():\n", " for val_data in val_loader:\n", - "\n", " val_ddf, val_pred_image, val_pred_label = forward(val_data, model)\n", " val_pred_label[val_pred_label > 1] = 1\n", "\n", diff --git a/3d_segmentation/brats_segmentation_3d.ipynb b/3d_segmentation/brats_segmentation_3d.ipynb index 9698abe340..f801461148 100644 --- a/3d_segmentation/brats_segmentation_3d.ipynb +++ b/3d_segmentation/brats_segmentation_3d.ipynb @@ -541,7 +541,6 @@ " if (epoch + 1) % val_interval == 0:\n", " model.eval()\n", " with torch.no_grad():\n", - "\n", " for val_data in val_loader:\n", " val_inputs, val_labels = (\n", " val_data[\"image\"].to(device),\n", diff --git a/3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb b/3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb index 9cbcc7c954..82b9b92b58 100644 --- a/3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb +++ b/3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb @@ -268,7 +268,6 @@ "\n", "\n", "def datafold_read(datalist, basedir, fold=0, key=\"training\"):\n", - "\n", " with open(datalist) as f:\n", " json_data = json.load(f)\n", "\n", @@ -610,7 +609,6 @@ " post_sigmoid=None,\n", " post_pred=None,\n", "):\n", - "\n", " val_acc_max = 0.0\n", " dices_tc = []\n", " dices_wt = []\n", @@ -705,7 +703,15 @@ "source": [ "start_epoch = 0\n", "\n", - "(val_acc_max, dices_tc, dices_wt, dices_et, dices_avg, loss_epochs, trains_epoch,) = trainer(\n", + "(\n", + " val_acc_max,\n", + " dices_tc,\n", + " dices_wt,\n", + " dices_et,\n", + " dices_avg,\n", + " loss_epochs,\n", + " trains_epoch,\n", + ") = trainer(\n", " model=model,\n", " train_loader=train_loader,\n", " val_loader=val_loader,\n", diff --git a/acceleration/distributed_training/brats_training_ddp.py b/acceleration/distributed_training/brats_training_ddp.py index f70851ba53..c8851b5ab3 100644 --- a/acceleration/distributed_training/brats_training_ddp.py +++ b/acceleration/distributed_training/brats_training_ddp.py @@ -132,7 +132,6 @@ def __init__( num_workers=0, shuffle=False, ) -> None: - if not os.path.isdir(root_dir): raise ValueError("root directory root_dir must be a directory.") self.section = section diff --git a/auto3dseg/notebooks/ensemble_byoc.ipynb b/auto3dseg/notebooks/ensemble_byoc.ipynb index 270409a6af..8e1b62308c 100644 --- a/auto3dseg/notebooks/ensemble_byoc.ipynb +++ b/auto3dseg/notebooks/ensemble_byoc.ipynb @@ -175,7 +175,6 @@ " \"\"\"\n", "\n", " def __init__(self, n_models=3):\n", - "\n", " super().__init__()\n", " self.n_models = n_models\n", "\n", diff --git a/auto3dseg/tasks/hecktor22/hecktor_crop_neck_region.py b/auto3dseg/tasks/hecktor22/hecktor_crop_neck_region.py index 3adf6e1d07..cc76c3d545 100644 --- a/auto3dseg/tasks/hecktor22/hecktor_crop_neck_region.py +++ b/auto3dseg/tasks/hecktor22/hecktor_crop_neck_region.py @@ -37,7 +37,6 @@ def __init__( self.box_size = box_size def __call__(self, data): - d = dict(data) im_pet = d["image2"][0] @@ -92,7 +91,6 @@ def __call__(self, data): return d def extract_roi(self, im_pet, box_size): - crop_len = int(0.75 * im_pet.shape[2]) im = im_pet[..., crop_len:] diff --git a/competitions/MICCAI/surgtoolloc/classification_files/train.py b/competitions/MICCAI/surgtoolloc/classification_files/train.py index 2a9db583b6..c7dc360ede 100644 --- a/competitions/MICCAI/surgtoolloc/classification_files/train.py +++ b/competitions/MICCAI/surgtoolloc/classification_files/train.py @@ -35,7 +35,6 @@ def main(cfg): - os.makedirs(str(cfg.output_dir + f"/fold{cfg.fold}/"), exist_ok=True) set_seed(cfg.seed) # set dataset, dataloader @@ -190,7 +189,6 @@ def run_train( def run_eval(model, val_dataloader, cfg, writer, epoch, metric): - model.eval() torch.set_grad_enabled(False) @@ -213,7 +211,6 @@ def run_eval(model, val_dataloader, cfg, writer, epoch, metric): if __name__ == "__main__": - sys.path.append("configs") sys.path.append("models") sys.path.append("data") diff --git a/competitions/MICCAI/surgtoolloc/classification_files/utils.py b/competitions/MICCAI/surgtoolloc/classification_files/utils.py index 601b3fdb95..19bf4442d7 100644 --- a/competitions/MICCAI/surgtoolloc/classification_files/utils.py +++ b/competitions/MICCAI/surgtoolloc/classification_files/utils.py @@ -45,7 +45,6 @@ def set_seed(seed): def get_train_dataloader(train_dataset, cfg): - train_dataloader = DataLoader( train_dataset, sampler=None, @@ -61,7 +60,6 @@ def get_train_dataloader(train_dataset, cfg): def get_val_dataloader(val_dataset, cfg): - val_dataloader = DataLoader( val_dataset, shuffle=False, diff --git a/competitions/kaggle/RANZCR/4th_place_solution/data/seg_data.py b/competitions/kaggle/RANZCR/4th_place_solution/data/seg_data.py index 40a190e2c7..a9f687375f 100644 --- a/competitions/kaggle/RANZCR/4th_place_solution/data/seg_data.py +++ b/competitions/kaggle/RANZCR/4th_place_solution/data/seg_data.py @@ -97,7 +97,6 @@ def get_mask(self, study_id: str, img_shape: Tuple, is_annotated: int): mask = np.zeros((img_shape[0], img_shape[1], self.cfg.seg_dim)) for idx, data in df.iterrows(): - xys = [np.array(ast.literal_eval(data["data"])).clip(0, np.inf).astype(np.int32)[:, None, :]] m = np.zeros(img_shape) @@ -120,7 +119,6 @@ def get_mask(self, study_id: str, img_shape: Tuple, is_annotated: int): return mask def __getitem__(self, idx): - study_id = self.study_ids[idx] label = self.labels[idx] is_annotated = self.is_annotated[idx] diff --git a/competitions/kaggle/RANZCR/4th_place_solution/models/seg_model.py b/competitions/kaggle/RANZCR/4th_place_solution/models/seg_model.py index 66926b3566..aaca335eba 100644 --- a/competitions/kaggle/RANZCR/4th_place_solution/models/seg_model.py +++ b/competitions/kaggle/RANZCR/4th_place_solution/models/seg_model.py @@ -92,7 +92,6 @@ def __init__( self.blocks = nn.ModuleList(blocks) def forward(self, *features: Sequence[torch.Tensor]): - features = features[1:][::-1] skips = features[1:] x = features[0] @@ -215,7 +214,6 @@ def __init__(self, cfg): print("weights loaded from", cfg.pretrained_weights) def forward(self, batch): - x_in = batch["input"] enc_out = self.encoder(x_in) diff --git a/competitions/kaggle/RANZCR/4th_place_solution/train.py b/competitions/kaggle/RANZCR/4th_place_solution/train.py index 6b0f03775a..ebe0754077 100644 --- a/competitions/kaggle/RANZCR/4th_place_solution/train.py +++ b/competitions/kaggle/RANZCR/4th_place_solution/train.py @@ -42,7 +42,6 @@ def main(cfg): - os.makedirs(str(cfg.output_dir + f"/fold{cfg.fold}/"), exist_ok=True) # set random seed, works when use all data to train @@ -194,12 +193,10 @@ def run_train( scheduler.step() if step % cfg.batch_size == 0: - progress_bar.set_description(f"loss: {np.mean(losses[-10:]):.2f}") def run_eval(model, val_dataloader, cfg, writer, epoch): - model.eval() torch.set_grad_enabled(False) @@ -228,7 +225,6 @@ def run_eval(model, val_dataloader, cfg, writer, epoch): val_loss = np.mean(val_losses) if cfg.compute_auc is True: - val_preds = torch.cat(val_preds) val_targets = torch.cat(val_targets) avg_auc = compute_roc_auc(val_preds, val_targets, average="macro") @@ -240,7 +236,6 @@ def run_eval(model, val_dataloader, cfg, writer, epoch): def run_infer(weights_folder_path, cfg): - cfg.pretrained = False # for local test, please modify the following path into actual path. cfg.data_folder = cfg.data_dir + "test/" @@ -296,7 +291,6 @@ def run_infer(weights_folder_path, cfg): if __name__ == "__main__": - sys.path.append("configs") sys.path.append("models") sys.path.append("data") diff --git a/competitions/kaggle/RANZCR/4th_place_solution/utils.py b/competitions/kaggle/RANZCR/4th_place_solution/utils.py index 571b41af45..0213b56015 100644 --- a/competitions/kaggle/RANZCR/4th_place_solution/utils.py +++ b/competitions/kaggle/RANZCR/4th_place_solution/utils.py @@ -33,7 +33,6 @@ def get_train_dataset(train_df, cfg): def get_train_dataloader(train_dataset, cfg): - train_dataloader = DataLoader( train_dataset, sampler=None, @@ -54,7 +53,6 @@ def get_val_dataset(val_df, cfg): def get_val_dataloader(val_dataset, cfg): - val_dataloader = DataLoader( val_dataset, sampler=SequentialSampler(val_dataset), @@ -73,7 +71,6 @@ def get_test_dataset(test_df, cfg): def get_test_dataloader(test_dataset, cfg): - test_dataloader = DataLoader( test_dataset, shuffle=False, @@ -84,7 +81,6 @@ def get_test_dataloader(test_dataset, cfg): def get_optimizer(model, cfg): - params = model.parameters() optimizer = optim.Adam(params, lr=cfg.lr, weight_decay=cfg.weight_decay) @@ -92,7 +88,6 @@ def get_optimizer(model, cfg): def get_scheduler(cfg, optimizer, total_steps): - scheduler = WarmupCosineSchedule( optimizer, warmup_steps=cfg.warmup * (total_steps // cfg.batch_size), diff --git a/deep_atlas/deep_atlas_tutorial.ipynb b/deep_atlas/deep_atlas_tutorial.ipynb index 38f444fd3a..62ade98ecf 100644 --- a/deep_atlas/deep_atlas_tutorial.ipynb +++ b/deep_atlas/deep_atlas_tutorial.ipynb @@ -1251,7 +1251,6 @@ "val_interval = 5\n", "\n", "for epoch_number in range(max_epochs):\n", - "\n", " print(f\"Epoch {epoch_number+1}/{max_epochs}:\")\n", "\n", " seg_net.train()\n", @@ -1794,7 +1793,6 @@ "best_reg_validation_loss = float(\"inf\")\n", "\n", "for epoch_number in range(max_epochs):\n", - "\n", " print(f\"Epoch {epoch_number+1}/{max_epochs}:\")\n", "\n", " # ------------------------------------------------\n", diff --git a/deepedit/ignite/train.py b/deepedit/ignite/train.py index 953cc6a1c6..b3f5952611 100644 --- a/deepedit/ignite/train.py +++ b/deepedit/ignite/train.py @@ -194,7 +194,6 @@ def get_loaders(args, pre_transforms): def create_trainer(args): - set_determinism(seed=args.seed) multi_gpu = args.multi_gpu @@ -431,7 +430,6 @@ def main(): if __name__ == "__main__": - logging.basicConfig( stream=sys.stdout, level=logging.INFO, diff --git a/deployment/Triton/client/client.py b/deployment/Triton/client/client.py index 8bead5afeb..868a8bb7f5 100644 --- a/deployment/Triton/client/client.py +++ b/deployment/Triton/client/client.py @@ -61,7 +61,6 @@ def open_nifti_files(input_path): if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Triton CLI for COVID classification inference from NIFTI data") parser.add_argument( "input", diff --git a/deployment/Triton/client/client_mednist.py b/deployment/Triton/client/client_mednist.py index fd56159c64..c525bdab33 100644 --- a/deployment/Triton/client/client_mednist.py +++ b/deployment/Triton/client/client_mednist.py @@ -65,7 +65,6 @@ def open_jpeg_files(input_path): if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Triton CLI for MedNist classification inference from JPEG data") parser.add_argument( "input", diff --git a/deployment/Triton/models/mednist_class/1/model.py b/deployment/Triton/models/mednist_class/1/model.py index a1d22832ad..9b6a8de8a0 100644 --- a/deployment/Triton/models/mednist_class/1/model.py +++ b/deployment/Triton/models/mednist_class/1/model.py @@ -158,7 +158,6 @@ def execute(self, requests): batched_img = [] print("starting request") for request in requests: - # get the input by name (as configured in config.pbtxt) input_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0") diff --git a/deployment/Triton/models/monai_covid/1/model.py b/deployment/Triton/models/monai_covid/1/model.py index 09974bd24d..3516dd32a9 100644 --- a/deployment/Triton/models/monai_covid/1/model.py +++ b/deployment/Triton/models/monai_covid/1/model.py @@ -152,7 +152,6 @@ def execute(self, requests): responses = [] for request in requests: - # get the input by name (as configured in config.pbtxt) input_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0") diff --git a/detection/luna16_visualization/save_obj.py b/detection/luna16_visualization/save_obj.py index 9ac2a9d0e7..0317fd34f0 100644 --- a/detection/luna16_visualization/save_obj.py +++ b/detection/luna16_visualization/save_obj.py @@ -23,9 +23,7 @@ def save_obj(vertices, faces, filename): - with open(filename, "w") as f: - for v in vertices: f.write("v {} {} {}\n".format(*np.array(v))) diff --git a/federated_learning/openfl/openfl_mednist_2d_registration/workspace/Monai_MedNIST.ipynb b/federated_learning/openfl/openfl_mednist_2d_registration/workspace/Monai_MedNIST.ipynb index c38a51e726..c8029367c8 100644 --- a/federated_learning/openfl/openfl_mednist_2d_registration/workspace/Monai_MedNIST.ipynb +++ b/federated_learning/openfl/openfl_mednist_2d_registration/workspace/Monai_MedNIST.ipynb @@ -380,7 +380,6 @@ " loss_fn=image_loss,\n", " affine_transform=warp_layer,\n", "):\n", - "\n", " train_loader = tqdm.tqdm(train_loader, desc=\"train\")\n", " net_model.train()\n", " net_model.to(device)\n", diff --git a/federated_learning/substra/assets/objective/metrics.py b/federated_learning/substra/assets/objective/metrics.py index f924c52a47..3b14015333 100644 --- a/federated_learning/substra/assets/objective/metrics.py +++ b/federated_learning/substra/assets/objective/metrics.py @@ -22,7 +22,7 @@ def score(self, y_true, y_pred): metric_sum = 0.0 metric_count = 0 with torch.no_grad(): - for (val_true, val_pred) in zip(y_true, y_pred): + for val_true, val_pred in zip(y_true, y_pred): val_true, _ = val_true val_pred, _ = val_pred value = self.dice_metric( diff --git a/model_zoo/TCIA_PROSTATEx_Prostate_MRI_Anatomy_Model.ipynb b/model_zoo/TCIA_PROSTATEx_Prostate_MRI_Anatomy_Model.ipynb index 05f3ab3c2c..a47da0e217 100644 --- a/model_zoo/TCIA_PROSTATEx_Prostate_MRI_Anatomy_Model.ipynb +++ b/model_zoo/TCIA_PROSTATEx_Prostate_MRI_Anatomy_Model.ipynb @@ -324,6 +324,7 @@ "# scripts_dir = os.path.join(zoo_dir, model_name, \"scripts\")\n", "# sys.path.insert(1, scripts_dir)\n", "\n", + "\n", "# Compact alternative implementation of this model's specific cropping step.\n", "# Ideally this would have been accomplished using MONAI's transforms\n", "# for data pre-processing / augmentation instead of using a separate\n", diff --git a/modules/autoencoder_mednist.ipynb b/modules/autoencoder_mednist.ipynb index 38ca494a33..4d045ce1ef 100644 --- a/modules/autoencoder_mednist.ipynb +++ b/modules/autoencoder_mednist.ipynb @@ -364,7 +364,6 @@ "outputs": [], "source": [ "def train(dict_key_for_training, max_epochs=10, learning_rate=1e-3):\n", - "\n", " model = AutoEncoder(\n", " spatial_dims=2,\n", " in_channels=1,\n", diff --git a/modules/generate_random_permutations/randomizedPermutations.py b/modules/generate_random_permutations/randomizedPermutations.py index 220207e6db..ccd0062a77 100644 --- a/modules/generate_random_permutations/randomizedPermutations.py +++ b/modules/generate_random_permutations/randomizedPermutations.py @@ -118,7 +118,6 @@ def __call__(self, image_file_list, *args, **kwargs): def main(): - image_dir = "./exampleImages" image_file_list = glob.glob(image_dir + "/*.png") output_size = (400, 400) diff --git a/modules/interpretability/class_lung_lesion.ipynb b/modules/interpretability/class_lung_lesion.ipynb index fd78ce8003..5e0dae7ab6 100644 --- a/modules/interpretability/class_lung_lesion.ipynb +++ b/modules/interpretability/class_lung_lesion.ipynb @@ -605,7 +605,6 @@ "\n", "example = 0\n", "for item in items:\n", - "\n", " data = train_ds[item] # this fetches training data with random augmentations\n", " image, label = data[\"image\"].to(device).unsqueeze(0), data[\"label\"][1]\n", " y_pred = model_3d(image)\n", diff --git a/modules/inverse_transforms_and_test_time_augmentations.ipynb b/modules/inverse_transforms_and_test_time_augmentations.ipynb index 4eeb6160e3..07b6b9e7af 100644 --- a/modules/inverse_transforms_and_test_time_augmentations.ipynb +++ b/modules/inverse_transforms_and_test_time_augmentations.ipynb @@ -729,7 +729,6 @@ "\n", "# Get images\n", "for file in np.random.choice(val_files, size=5, replace=False):\n", - "\n", " mode_tta, mean_tta, std_tta, vvc_tta = tt_aug(file, num_examples=10)\n", " unmodified_data = minimal_transforms(file)\n", "\n", diff --git a/modules/lazy_resampling_functional.ipynb b/modules/lazy_resampling_functional.ipynb index d520f7958e..bd8a7a9d67 100644 --- a/modules/lazy_resampling_functional.ipynb +++ b/modules/lazy_resampling_functional.ipynb @@ -91,7 +91,6 @@ "outputs": [], "source": [ "def plot_img_operations(img):\n", - "\n", " if isinstance(img, list):\n", " print(f\"patch metadata of patch idx 0 out of {len(img)} samples.\\n\")\n", " img_0 = img[0]\n", diff --git a/modules/learning_rate.ipynb b/modules/learning_rate.ipynb index aacf450a14..1e01a8b075 100644 --- a/modules/learning_rate.ipynb +++ b/modules/learning_rate.ipynb @@ -432,7 +432,6 @@ " val_interval = 1\n", "\n", " for epoch in plot_range(data, trange(max_epochs)):\n", - "\n", " for d in data.keys():\n", " data[d][\"epoch_loss\"] = 0\n", " for batch_data in train_loader:\n", diff --git a/modules/load_medical_images.ipynb b/modules/load_medical_images.ipynb index e0b3a78476..120245b7a9 100644 --- a/modules/load_medical_images.ipynb +++ b/modules/load_medical_images.ipynb @@ -371,7 +371,6 @@ ], "source": [ "for fn in (get_dcm_image(), get_dcm_images(), get_dcm_folder()):\n", - "\n", " data = LoadImage(image_only=True)(fn)\n", " print(f\"image data shape: {data.shape}\")" ] diff --git a/modules/varautoencoder_mednist.ipynb b/modules/varautoencoder_mednist.ipynb index 39c4daff03..fae115c833 100644 --- a/modules/varautoencoder_mednist.ipynb +++ b/modules/varautoencoder_mednist.ipynb @@ -391,7 +391,6 @@ "\n", "\n", "def train(in_shape, max_epochs, latent_size, learning_rate, beta):\n", - "\n", " model = VarAutoEncoder(\n", " spatial_dims=2,\n", " in_shape=in_shape,\n", diff --git a/pathology/multiple_instance_learning/panda_mil_train_evaluate_pytorch_gpu.py b/pathology/multiple_instance_learning/panda_mil_train_evaluate_pytorch_gpu.py index bea9ffeaa3..58a859397a 100644 --- a/pathology/multiple_instance_learning/panda_mil_train_evaluate_pytorch_gpu.py +++ b/pathology/multiple_instance_learning/panda_mil_train_evaluate_pytorch_gpu.py @@ -58,7 +58,6 @@ def train_epoch(model, loader, optimizer, scaler, epoch, args): loss, acc = 0.0, 0.0 for idx, batch_data in enumerate(loader): - data = batch_data["image"].as_subclass(torch.Tensor).cuda(args.rank) target = batch_data["label"].as_subclass(torch.Tensor).cuda(args.rank) @@ -113,14 +112,11 @@ def val_epoch(model, loader, epoch, args, max_tiles=None): loss, acc = 0.0, 0.0 with torch.no_grad(): - for idx, batch_data in enumerate(loader): - data = batch_data["image"].as_subclass(torch.Tensor).cuda(args.rank) target = batch_data["label"].as_subclass(torch.Tensor).cuda(args.rank) with autocast(enabled=args.amp): - if max_tiles is not None and data.shape[1] > max_tiles: # During validation, we want to use all instances/patches # and if its number is very big, we may run out of GPU memory @@ -224,7 +220,6 @@ def __init__( self.num_classes = num_classes def __call__(self, data): - d = dict(data) for key in self.keys: label = int(d[key]) @@ -254,7 +249,6 @@ def list_data_collate(batch: collections.abc.Sequence): def main_worker(gpu, args): - args.gpu = gpu if args.distributed: @@ -415,7 +409,6 @@ def main_worker(gpu, args): scaler = GradScaler(enabled=args.amp) for epoch in range(start_epoch, n_epochs): - if args.distributed: train_sampler.set_epoch(epoch) torch.distributed.barrier() @@ -440,7 +433,6 @@ def main_worker(gpu, args): b_new_best = False val_acc = 0 if (epoch + 1) % args.val_every == 0: - epoch_time = time.time() val_loss, val_acc, qwk = val_epoch(model, valid_loader, epoch=epoch, args=args, max_tiles=args.tile_count) if args.rank == 0: @@ -475,7 +467,6 @@ def main_worker(gpu, args): def parse_args(): - parser = argparse.ArgumentParser(description="Multiple Instance Learning (MIL) example of classification from WSI.") parser.add_argument( "--data_root", default="/PandaChallenge2020/train_images/", help="path to root folder of images" @@ -535,7 +526,6 @@ def parse_args(): if __name__ == "__main__": - args = parse_args() if args.dataset_json is None: diff --git a/performance_profiling/radiology/train_base_nvtx.py b/performance_profiling/radiology/train_base_nvtx.py index 66b00750f6..b2f6373e69 100644 --- a/performance_profiling/radiology/train_base_nvtx.py +++ b/performance_profiling/radiology/train_base_nvtx.py @@ -214,7 +214,6 @@ with torch.no_grad(): val_loader_iterator = iter(val_loader) for val_step in range(len(val_loader)): - with nvtx.annotate("dataload", color="red"): val_data = next(val_loader_iterator) val_inputs, val_labels = ( diff --git a/reconstruction/MRI_reconstruction/unet_demo/fastmri_ssim.py b/reconstruction/MRI_reconstruction/unet_demo/fastmri_ssim.py index 8d56bd845b..815e74594f 100644 --- a/reconstruction/MRI_reconstruction/unet_demo/fastmri_ssim.py +++ b/reconstruction/MRI_reconstruction/unet_demo/fastmri_ssim.py @@ -12,6 +12,7 @@ from skimage.metrics import structural_similarity as compare_ssim from numpy import ndarray + # monai.losses.ssim_loss can be used as a metric # but in order to match numbers with the fastMRI leaderboard, # we use scikit-image ssim metric diff --git a/reconstruction/MRI_reconstruction/unet_demo/inference.ipynb b/reconstruction/MRI_reconstruction/unet_demo/inference.ipynb index a71700c04b..301dd9cb2f 100644 --- a/reconstruction/MRI_reconstruction/unet_demo/inference.ipynb +++ b/reconstruction/MRI_reconstruction/unet_demo/inference.ipynb @@ -329,7 +329,6 @@ " slice_dim = 1 # change this if another dimension is your slice dimension\n", " num_slices = input.shape[slice_dim]\n", " for i in range(num_slices):\n", - "\n", " inp = input[:, i, ...].unsqueeze(slice_dim)\n", " tar = target[:, i, ...].unsqueeze(slice_dim)\n", "\n", diff --git a/reconstruction/MRI_reconstruction/varnet_demo/fastmri_ssim.py b/reconstruction/MRI_reconstruction/varnet_demo/fastmri_ssim.py index 8d56bd845b..815e74594f 100644 --- a/reconstruction/MRI_reconstruction/varnet_demo/fastmri_ssim.py +++ b/reconstruction/MRI_reconstruction/varnet_demo/fastmri_ssim.py @@ -12,6 +12,7 @@ from skimage.metrics import structural_similarity as compare_ssim from numpy import ndarray + # monai.losses.ssim_loss can be used as a metric # but in order to match numbers with the fastMRI leaderboard, # we use scikit-image ssim metric