Skip to content

Commit

Permalink
[pre-commit.ci] pre-commit suggestions (Project-MONAI#1280)
Browse files Browse the repository at this point in the history
<!--pre-commit.ci start-->
updates:
- [github.com/psf/black: 22.12.0 →
23.3.0](psf/black@22.12.0...23.3.0)
<!--pre-commit.ci end-->

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Wenqi Li <[email protected]>
  • Loading branch information
pre-commit-ci[bot] and wyli authored Apr 4, 2023
1 parent 72fbaf1 commit 583a81e
Show file tree
Hide file tree
Showing 37 changed files with 14 additions and 63 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ repos:
- id: detect-private-key

- repo: https://github.com/psf/black
rev: "22.12.0"
rev: "23.3.0"
hooks:
- id: black
- id: black-jupyter
Expand Down
1 change: 0 additions & 1 deletion 3d_classification/ignite/densenet_training_dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@ def main():
# Ignite trainer expects batch=(img, label) and returns output=loss at every iteration,
# user can add output_transform to return other values, like: y_pred, y, etc.
def prepare_batch(batch, device=None, non_blocking=False):

return _prepare_batch((batch["img"], batch["label"]), device, non_blocking)

trainer = create_supervised_trainer(net, opt, loss, device, False, prepare_batch=prepare_batch)
Expand Down
1 change: 0 additions & 1 deletion 3d_registration/paired_lung_ct.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -676,7 +676,6 @@
" model.eval()\n",
" with torch.no_grad():\n",
" for val_data in val_loader:\n",
"\n",
" val_ddf, val_pred_image, val_pred_label = forward(val_data, model)\n",
" val_pred_label[val_pred_label > 1] = 1\n",
"\n",
Expand Down
1 change: 0 additions & 1 deletion 3d_segmentation/brats_segmentation_3d.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -541,7 +541,6 @@
" if (epoch + 1) % val_interval == 0:\n",
" model.eval()\n",
" with torch.no_grad():\n",
"\n",
" for val_data in val_loader:\n",
" val_inputs, val_labels = (\n",
" val_data[\"image\"].to(device),\n",
Expand Down
12 changes: 9 additions & 3 deletions 3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,6 @@
"\n",
"\n",
"def datafold_read(datalist, basedir, fold=0, key=\"training\"):\n",
"\n",
" with open(datalist) as f:\n",
" json_data = json.load(f)\n",
"\n",
Expand Down Expand Up @@ -610,7 +609,6 @@
" post_sigmoid=None,\n",
" post_pred=None,\n",
"):\n",
"\n",
" val_acc_max = 0.0\n",
" dices_tc = []\n",
" dices_wt = []\n",
Expand Down Expand Up @@ -705,7 +703,15 @@
"source": [
"start_epoch = 0\n",
"\n",
"(val_acc_max, dices_tc, dices_wt, dices_et, dices_avg, loss_epochs, trains_epoch,) = trainer(\n",
"(\n",
" val_acc_max,\n",
" dices_tc,\n",
" dices_wt,\n",
" dices_et,\n",
" dices_avg,\n",
" loss_epochs,\n",
" trains_epoch,\n",
") = trainer(\n",
" model=model,\n",
" train_loader=train_loader,\n",
" val_loader=val_loader,\n",
Expand Down
1 change: 0 additions & 1 deletion acceleration/distributed_training/brats_training_ddp.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,6 @@ def __init__(
num_workers=0,
shuffle=False,
) -> None:

if not os.path.isdir(root_dir):
raise ValueError("root directory root_dir must be a directory.")
self.section = section
Expand Down
1 change: 0 additions & 1 deletion auto3dseg/notebooks/ensemble_byoc.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,6 @@
" \"\"\"\n",
"\n",
" def __init__(self, n_models=3):\n",
"\n",
" super().__init__()\n",
" self.n_models = n_models\n",
"\n",
Expand Down
2 changes: 0 additions & 2 deletions auto3dseg/tasks/hecktor22/hecktor_crop_neck_region.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ def __init__(
self.box_size = box_size

def __call__(self, data):

d = dict(data)

im_pet = d["image2"][0]
Expand Down Expand Up @@ -92,7 +91,6 @@ def __call__(self, data):
return d

def extract_roi(self, im_pet, box_size):

crop_len = int(0.75 * im_pet.shape[2])
im = im_pet[..., crop_len:]

Expand Down
3 changes: 0 additions & 3 deletions competitions/MICCAI/surgtoolloc/classification_files/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@


def main(cfg):

os.makedirs(str(cfg.output_dir + f"/fold{cfg.fold}/"), exist_ok=True)
set_seed(cfg.seed)
# set dataset, dataloader
Expand Down Expand Up @@ -190,7 +189,6 @@ def run_train(


def run_eval(model, val_dataloader, cfg, writer, epoch, metric):

model.eval()
torch.set_grad_enabled(False)

Expand All @@ -213,7 +211,6 @@ def run_eval(model, val_dataloader, cfg, writer, epoch, metric):


if __name__ == "__main__":

sys.path.append("configs")
sys.path.append("models")
sys.path.append("data")
Expand Down
2 changes: 0 additions & 2 deletions competitions/MICCAI/surgtoolloc/classification_files/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ def set_seed(seed):


def get_train_dataloader(train_dataset, cfg):

train_dataloader = DataLoader(
train_dataset,
sampler=None,
Expand All @@ -61,7 +60,6 @@ def get_train_dataloader(train_dataset, cfg):


def get_val_dataloader(val_dataset, cfg):

val_dataloader = DataLoader(
val_dataset,
shuffle=False,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ def get_mask(self, study_id: str, img_shape: Tuple, is_annotated: int):
mask = np.zeros((img_shape[0], img_shape[1], self.cfg.seg_dim))

for idx, data in df.iterrows():

xys = [np.array(ast.literal_eval(data["data"])).clip(0, np.inf).astype(np.int32)[:, None, :]]

m = np.zeros(img_shape)
Expand All @@ -120,7 +119,6 @@ def get_mask(self, study_id: str, img_shape: Tuple, is_annotated: int):
return mask

def __getitem__(self, idx):

study_id = self.study_ids[idx]
label = self.labels[idx]
is_annotated = self.is_annotated[idx]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ def __init__(
self.blocks = nn.ModuleList(blocks)

def forward(self, *features: Sequence[torch.Tensor]):

features = features[1:][::-1]
skips = features[1:]
x = features[0]
Expand Down Expand Up @@ -215,7 +214,6 @@ def __init__(self, cfg):
print("weights loaded from", cfg.pretrained_weights)

def forward(self, batch):

x_in = batch["input"]
enc_out = self.encoder(x_in)

Expand Down
6 changes: 0 additions & 6 deletions competitions/kaggle/RANZCR/4th_place_solution/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@


def main(cfg):

os.makedirs(str(cfg.output_dir + f"/fold{cfg.fold}/"), exist_ok=True)

# set random seed, works when use all data to train
Expand Down Expand Up @@ -194,12 +193,10 @@ def run_train(
scheduler.step()

if step % cfg.batch_size == 0:

progress_bar.set_description(f"loss: {np.mean(losses[-10:]):.2f}")


def run_eval(model, val_dataloader, cfg, writer, epoch):

model.eval()
torch.set_grad_enabled(False)

Expand Down Expand Up @@ -228,7 +225,6 @@ def run_eval(model, val_dataloader, cfg, writer, epoch):
val_loss = np.mean(val_losses)

if cfg.compute_auc is True:

val_preds = torch.cat(val_preds)
val_targets = torch.cat(val_targets)
avg_auc = compute_roc_auc(val_preds, val_targets, average="macro")
Expand All @@ -240,7 +236,6 @@ def run_eval(model, val_dataloader, cfg, writer, epoch):


def run_infer(weights_folder_path, cfg):

cfg.pretrained = False
# for local test, please modify the following path into actual path.
cfg.data_folder = cfg.data_dir + "test/"
Expand Down Expand Up @@ -296,7 +291,6 @@ def run_infer(weights_folder_path, cfg):


if __name__ == "__main__":

sys.path.append("configs")
sys.path.append("models")
sys.path.append("data")
Expand Down
5 changes: 0 additions & 5 deletions competitions/kaggle/RANZCR/4th_place_solution/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ def get_train_dataset(train_df, cfg):


def get_train_dataloader(train_dataset, cfg):

train_dataloader = DataLoader(
train_dataset,
sampler=None,
Expand All @@ -54,7 +53,6 @@ def get_val_dataset(val_df, cfg):


def get_val_dataloader(val_dataset, cfg):

val_dataloader = DataLoader(
val_dataset,
sampler=SequentialSampler(val_dataset),
Expand All @@ -73,7 +71,6 @@ def get_test_dataset(test_df, cfg):


def get_test_dataloader(test_dataset, cfg):

test_dataloader = DataLoader(
test_dataset,
shuffle=False,
Expand All @@ -84,15 +81,13 @@ def get_test_dataloader(test_dataset, cfg):


def get_optimizer(model, cfg):

params = model.parameters()
optimizer = optim.Adam(params, lr=cfg.lr, weight_decay=cfg.weight_decay)

return optimizer


def get_scheduler(cfg, optimizer, total_steps):

scheduler = WarmupCosineSchedule(
optimizer,
warmup_steps=cfg.warmup * (total_steps // cfg.batch_size),
Expand Down
2 changes: 0 additions & 2 deletions deep_atlas/deep_atlas_tutorial.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1251,7 +1251,6 @@
"val_interval = 5\n",
"\n",
"for epoch_number in range(max_epochs):\n",
"\n",
" print(f\"Epoch {epoch_number+1}/{max_epochs}:\")\n",
"\n",
" seg_net.train()\n",
Expand Down Expand Up @@ -1794,7 +1793,6 @@
"best_reg_validation_loss = float(\"inf\")\n",
"\n",
"for epoch_number in range(max_epochs):\n",
"\n",
" print(f\"Epoch {epoch_number+1}/{max_epochs}:\")\n",
"\n",
" # ------------------------------------------------\n",
Expand Down
2 changes: 0 additions & 2 deletions deepedit/ignite/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,6 @@ def get_loaders(args, pre_transforms):


def create_trainer(args):

set_determinism(seed=args.seed)

multi_gpu = args.multi_gpu
Expand Down Expand Up @@ -431,7 +430,6 @@ def main():


if __name__ == "__main__":

logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
Expand Down
1 change: 0 additions & 1 deletion deployment/Triton/client/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ def open_nifti_files(input_path):


if __name__ == "__main__":

parser = argparse.ArgumentParser(description="Triton CLI for COVID classification inference from NIFTI data")
parser.add_argument(
"input",
Expand Down
1 change: 0 additions & 1 deletion deployment/Triton/client/client_mednist.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ def open_jpeg_files(input_path):


if __name__ == "__main__":

parser = argparse.ArgumentParser(description="Triton CLI for MedNist classification inference from JPEG data")
parser.add_argument(
"input",
Expand Down
1 change: 0 additions & 1 deletion deployment/Triton/models/mednist_class/1/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,6 @@ def execute(self, requests):
batched_img = []
print("starting request")
for request in requests:

# get the input by name (as configured in config.pbtxt)
input_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0")

Expand Down
1 change: 0 additions & 1 deletion deployment/Triton/models/monai_covid/1/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,6 @@ def execute(self, requests):
responses = []

for request in requests:

# get the input by name (as configured in config.pbtxt)
input_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0")

Expand Down
2 changes: 0 additions & 2 deletions detection/luna16_visualization/save_obj.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@


def save_obj(vertices, faces, filename):

with open(filename, "w") as f:

for v in vertices:
f.write("v {} {} {}\n".format(*np.array(v)))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,6 @@
" loss_fn=image_loss,\n",
" affine_transform=warp_layer,\n",
"):\n",
"\n",
" train_loader = tqdm.tqdm(train_loader, desc=\"train\")\n",
" net_model.train()\n",
" net_model.to(device)\n",
Expand Down
2 changes: 1 addition & 1 deletion federated_learning/substra/assets/objective/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def score(self, y_true, y_pred):
metric_sum = 0.0
metric_count = 0
with torch.no_grad():
for (val_true, val_pred) in zip(y_true, y_pred):
for val_true, val_pred in zip(y_true, y_pred):
val_true, _ = val_true
val_pred, _ = val_pred
value = self.dice_metric(
Expand Down
1 change: 1 addition & 0 deletions model_zoo/TCIA_PROSTATEx_Prostate_MRI_Anatomy_Model.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -324,6 +324,7 @@
"# scripts_dir = os.path.join(zoo_dir, model_name, \"scripts\")\n",
"# sys.path.insert(1, scripts_dir)\n",
"\n",
"\n",
"# Compact alternative implementation of this model's specific cropping step.\n",
"# Ideally this would have been accomplished using MONAI's transforms\n",
"# for data pre-processing / augmentation instead of using a separate\n",
Expand Down
1 change: 0 additions & 1 deletion modules/autoencoder_mednist.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,6 @@
"outputs": [],
"source": [
"def train(dict_key_for_training, max_epochs=10, learning_rate=1e-3):\n",
"\n",
" model = AutoEncoder(\n",
" spatial_dims=2,\n",
" in_channels=1,\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ def __call__(self, image_file_list, *args, **kwargs):


def main():

image_dir = "./exampleImages"
image_file_list = glob.glob(image_dir + "/*.png")
output_size = (400, 400)
Expand Down
1 change: 0 additions & 1 deletion modules/interpretability/class_lung_lesion.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -605,7 +605,6 @@
"\n",
"example = 0\n",
"for item in items:\n",
"\n",
" data = train_ds[item] # this fetches training data with random augmentations\n",
" image, label = data[\"image\"].to(device).unsqueeze(0), data[\"label\"][1]\n",
" y_pred = model_3d(image)\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -729,7 +729,6 @@
"\n",
"# Get images\n",
"for file in np.random.choice(val_files, size=5, replace=False):\n",
"\n",
" mode_tta, mean_tta, std_tta, vvc_tta = tt_aug(file, num_examples=10)\n",
" unmodified_data = minimal_transforms(file)\n",
"\n",
Expand Down
1 change: 0 additions & 1 deletion modules/lazy_resampling_functional.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@
"outputs": [],
"source": [
"def plot_img_operations(img):\n",
"\n",
" if isinstance(img, list):\n",
" print(f\"patch metadata of patch idx 0 out of {len(img)} samples.\\n\")\n",
" img_0 = img[0]\n",
Expand Down
Loading

0 comments on commit 583a81e

Please sign in to comment.