diff --git a/mmseg_logs/chase_db1/20240614_171848/vis_data/20240614_171848.json b/mmseg_logs/chase_db1/20240614_171848/vis_data/20240614_171848.json new file mode 100644 index 0000000000..60401d76f0 --- /dev/null +++ b/mmseg_logs/chase_db1/20240614_171848/vis_data/20240614_171848.json @@ -0,0 +1,3 @@ +{"lr": 0.009989084308262066, "data_time": 0.0038670063018798827, "loss": 0.6592155277729035, "decode.loss_ce": 0.47149578332901, "decode.acc_seg": 89.7125244140625, "aux.loss_ce": 0.18771974593400956, "aux.acc_seg": 89.7125244140625, "time": 0.39728693962097167, "iter": 50, "memory": 1717, "step": 50} +{"lr": 0.00997794446709763, "data_time": 0.003805875778198242, "loss": 0.6025463283061981, "decode.loss_ce": 0.4323257476091385, "decode.acc_seg": 93.8629150390625, "aux.loss_ce": 0.17022058442234994, "aux.acc_seg": 93.8629150390625, "time": 0.3980520725250244, "iter": 100, "memory": 784, "step": 100} +{"lr": 0.009966803229875268, "data_time": 0.0038983821868896484, "loss": 0.6277212738990784, "decode.loss_ce": 0.448396360874176, "decode.acc_seg": 76.31072998046875, "aux.loss_ce": 0.17932491898536682, "aux.acc_seg": 75.860595703125, "time": 0.3982886791229248, "iter": 150, "memory": 784, "step": 150} diff --git a/mmseg_logs/chase_db1/20240614_171848/vis_data/config.py b/mmseg_logs/chase_db1/20240614_171848/vis_data/config.py new file mode 100644 index 0000000000..c064330b97 --- /dev/null +++ b/mmseg_logs/chase_db1/20240614_171848/vis_data/config.py @@ -0,0 +1,332 @@ +crop_size = ( + 128, + 128, +) +data_preprocessor = dict( + bgr_to_rgb=True, + mean=[ + 123.675, + 116.28, + 103.53, + ], + pad_val=0, + seg_pad_val=255, + size=( + 128, + 128, + ), + std=[ + 58.395, + 57.12, + 57.375, + ], + type='SegDataPreProcessor') +data_root = 'data/CHASE_DB1' +dataset_type = 'ChaseDB1Dataset' +default_hooks = dict( + checkpoint=dict(by_epoch=False, interval=4000, type='CheckpointHook'), + logger=dict(interval=50, log_metric_by_epoch=False, type='LoggerHook'), + param_scheduler=dict(type='ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='SegVisualizationHook')) +default_scope = 'mmseg' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +img_ratios = [ + 0.5, + 0.75, + 1.0, + 1.25, + 1.5, + 1.75, +] +img_scale = ( + 960, + 999, +) +launcher = 'none' +load_from = None +log_level = 'INFO' +log_processor = dict(by_epoch=False) +model = dict( + auxiliary_head=dict( + align_corners=False, + channels=64, + concat_input=False, + dropout_ratio=0.1, + in_channels=128, + in_index=3, + loss_decode=dict( + loss_weight=0.4, type='CrossEntropyLoss', use_sigmoid=False), + norm_cfg=dict(requires_grad=True, type='SyncBN'), + num_classes=2, + num_convs=1, + type='FCNHead'), + backbone=dict( + act_cfg=dict(type='ReLU'), + base_channels=64, + conv_cfg=None, + dec_dilations=( + 1, + 1, + 1, + 1, + ), + dec_num_convs=( + 2, + 2, + 2, + 2, + ), + downsamples=( + True, + True, + True, + True, + ), + enc_dilations=( + 1, + 1, + 1, + 1, + 1, + ), + enc_num_convs=( + 2, + 2, + 2, + 2, + 2, + ), + in_channels=3, + norm_cfg=dict(requires_grad=True, type='SyncBN'), + norm_eval=False, + num_stages=5, + strides=( + 1, + 1, + 1, + 1, + 1, + ), + type='UNet', + upsample_cfg=dict(type='InterpConv'), + with_cp=False), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 123.675, + 116.28, + 103.53, + ], + pad_val=0, + seg_pad_val=255, + size=( + 128, + 128, + ), + std=[ + 58.395, + 57.12, + 57.375, + ], + type='SegDataPreProcessor'), + decode_head=dict( + align_corners=False, + channels=64, + concat_input=False, + dropout_ratio=0.1, + in_channels=64, + in_index=4, + loss_decode=dict( + loss_weight=1.0, type='CrossEntropyLoss', use_sigmoid=False), + norm_cfg=dict(requires_grad=True, type='SyncBN'), + num_classes=2, + num_convs=1, + type='FCNHead'), + pretrained=None, + test_cfg=dict(crop_size=( + 128, + 128, + ), mode='slide', stride=( + 85, + 85, + )), + train_cfg=dict(), + type='EncoderDecoder') +norm_cfg = dict(requires_grad=True, type='SyncBN') +optim_wrapper = dict( + clip_grad=None, + optimizer=dict(lr=0.01, momentum=0.9, type='SGD', weight_decay=0.0005), + type='OptimWrapper') +optimizer = dict(lr=0.01, momentum=0.9, type='SGD', weight_decay=0.0005) +param_scheduler = [ + dict( + begin=0, + by_epoch=False, + end=40000, + eta_min=0.0001, + power=0.9, + type='PolyLR'), +] +resume = False +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + data_root='data/CHASE_DB1', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(keep_ratio=True, scale=( + 960, + 999, + ), type='Resize'), + dict(type='LoadAnnotations'), + dict(type='PackSegInputs'), + ], + type='ChaseDB1Dataset'), + num_workers=4, + persistent_workers=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + iou_metrics=[ + 'mDice', + ], type='IoUMetric') +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(keep_ratio=True, scale=( + 960, + 999, + ), type='Resize'), + dict(type='LoadAnnotations'), + dict(type='PackSegInputs'), +] +train_cfg = dict(max_iters=40000, type='IterBasedTrainLoop', val_interval=4000) +train_dataloader = dict( + batch_size=4, + dataset=dict( + dataset=dict( + data_prefix=dict( + img_path='images/training', + seg_map_path='annotations/training'), + data_root='data/CHASE_DB1', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + keep_ratio=True, + ratio_range=( + 0.5, + 2.0, + ), + scale=( + 960, + 999, + ), + type='RandomResize'), + dict( + cat_max_ratio=0.75, + crop_size=( + 128, + 128, + ), + type='RandomCrop'), + dict(prob=0.5, type='RandomFlip'), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs'), + ], + type='ChaseDB1Dataset'), + times=40000, + type='RepeatDataset'), + num_workers=4, + persistent_workers=True, + sampler=dict(shuffle=True, type='InfiniteSampler')) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + keep_ratio=True, + ratio_range=( + 0.5, + 2.0, + ), + scale=( + 960, + 999, + ), + type='RandomResize'), + dict(cat_max_ratio=0.75, crop_size=( + 128, + 128, + ), type='RandomCrop'), + dict(prob=0.5, type='RandomFlip'), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs'), +] +tta_model = dict(type='SegTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict(keep_ratio=True, scale_factor=0.5, type='Resize'), + dict(keep_ratio=True, scale_factor=0.75, type='Resize'), + dict(keep_ratio=True, scale_factor=1.0, type='Resize'), + dict(keep_ratio=True, scale_factor=1.25, type='Resize'), + dict(keep_ratio=True, scale_factor=1.5, type='Resize'), + dict(keep_ratio=True, scale_factor=1.75, type='Resize'), + ], + [ + dict(direction='horizontal', prob=0.0, type='RandomFlip'), + dict(direction='horizontal', prob=1.0, type='RandomFlip'), + ], + [ + dict(type='LoadAnnotations'), + ], + [ + dict(type='PackSegInputs'), + ], + ], + type='TestTimeAug'), +] +val_cfg = dict(type='ValLoop') +val_dataloader = dict( + batch_size=1, + dataset=dict( + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + data_root='data/CHASE_DB1', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(keep_ratio=True, scale=( + 960, + 999, + ), type='Resize'), + dict(type='LoadAnnotations'), + dict(type='PackSegInputs'), + ], + type='ChaseDB1Dataset'), + num_workers=4, + persistent_workers=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + iou_metrics=[ + 'mDice', + ], type='IoUMetric') +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='SegLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +work_dir = 'mmseg_logs/chase_db1/' diff --git a/mmseg_logs/chase_db1/20240614_171848/vis_data/scalars.json b/mmseg_logs/chase_db1/20240614_171848/vis_data/scalars.json new file mode 100644 index 0000000000..60401d76f0 --- /dev/null +++ b/mmseg_logs/chase_db1/20240614_171848/vis_data/scalars.json @@ -0,0 +1,3 @@ +{"lr": 0.009989084308262066, "data_time": 0.0038670063018798827, "loss": 0.6592155277729035, "decode.loss_ce": 0.47149578332901, "decode.acc_seg": 89.7125244140625, "aux.loss_ce": 0.18771974593400956, "aux.acc_seg": 89.7125244140625, "time": 0.39728693962097167, "iter": 50, "memory": 1717, "step": 50} +{"lr": 0.00997794446709763, "data_time": 0.003805875778198242, "loss": 0.6025463283061981, "decode.loss_ce": 0.4323257476091385, "decode.acc_seg": 93.8629150390625, "aux.loss_ce": 0.17022058442234994, "aux.acc_seg": 93.8629150390625, "time": 0.3980520725250244, "iter": 100, "memory": 784, "step": 100} +{"lr": 0.009966803229875268, "data_time": 0.0038983821868896484, "loss": 0.6277212738990784, "decode.loss_ce": 0.448396360874176, "decode.acc_seg": 76.31072998046875, "aux.loss_ce": 0.17932491898536682, "aux.acc_seg": 75.860595703125, "time": 0.3982886791229248, "iter": 150, "memory": 784, "step": 150} diff --git a/mmseg_logs/chase_db1/unet-s5-d16_fcn_4xb4-40k_chase-db1-128x128.py b/mmseg_logs/chase_db1/unet-s5-d16_fcn_4xb4-40k_chase-db1-128x128.py new file mode 100644 index 0000000000..c064330b97 --- /dev/null +++ b/mmseg_logs/chase_db1/unet-s5-d16_fcn_4xb4-40k_chase-db1-128x128.py @@ -0,0 +1,332 @@ +crop_size = ( + 128, + 128, +) +data_preprocessor = dict( + bgr_to_rgb=True, + mean=[ + 123.675, + 116.28, + 103.53, + ], + pad_val=0, + seg_pad_val=255, + size=( + 128, + 128, + ), + std=[ + 58.395, + 57.12, + 57.375, + ], + type='SegDataPreProcessor') +data_root = 'data/CHASE_DB1' +dataset_type = 'ChaseDB1Dataset' +default_hooks = dict( + checkpoint=dict(by_epoch=False, interval=4000, type='CheckpointHook'), + logger=dict(interval=50, log_metric_by_epoch=False, type='LoggerHook'), + param_scheduler=dict(type='ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='SegVisualizationHook')) +default_scope = 'mmseg' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +img_ratios = [ + 0.5, + 0.75, + 1.0, + 1.25, + 1.5, + 1.75, +] +img_scale = ( + 960, + 999, +) +launcher = 'none' +load_from = None +log_level = 'INFO' +log_processor = dict(by_epoch=False) +model = dict( + auxiliary_head=dict( + align_corners=False, + channels=64, + concat_input=False, + dropout_ratio=0.1, + in_channels=128, + in_index=3, + loss_decode=dict( + loss_weight=0.4, type='CrossEntropyLoss', use_sigmoid=False), + norm_cfg=dict(requires_grad=True, type='SyncBN'), + num_classes=2, + num_convs=1, + type='FCNHead'), + backbone=dict( + act_cfg=dict(type='ReLU'), + base_channels=64, + conv_cfg=None, + dec_dilations=( + 1, + 1, + 1, + 1, + ), + dec_num_convs=( + 2, + 2, + 2, + 2, + ), + downsamples=( + True, + True, + True, + True, + ), + enc_dilations=( + 1, + 1, + 1, + 1, + 1, + ), + enc_num_convs=( + 2, + 2, + 2, + 2, + 2, + ), + in_channels=3, + norm_cfg=dict(requires_grad=True, type='SyncBN'), + norm_eval=False, + num_stages=5, + strides=( + 1, + 1, + 1, + 1, + 1, + ), + type='UNet', + upsample_cfg=dict(type='InterpConv'), + with_cp=False), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 123.675, + 116.28, + 103.53, + ], + pad_val=0, + seg_pad_val=255, + size=( + 128, + 128, + ), + std=[ + 58.395, + 57.12, + 57.375, + ], + type='SegDataPreProcessor'), + decode_head=dict( + align_corners=False, + channels=64, + concat_input=False, + dropout_ratio=0.1, + in_channels=64, + in_index=4, + loss_decode=dict( + loss_weight=1.0, type='CrossEntropyLoss', use_sigmoid=False), + norm_cfg=dict(requires_grad=True, type='SyncBN'), + num_classes=2, + num_convs=1, + type='FCNHead'), + pretrained=None, + test_cfg=dict(crop_size=( + 128, + 128, + ), mode='slide', stride=( + 85, + 85, + )), + train_cfg=dict(), + type='EncoderDecoder') +norm_cfg = dict(requires_grad=True, type='SyncBN') +optim_wrapper = dict( + clip_grad=None, + optimizer=dict(lr=0.01, momentum=0.9, type='SGD', weight_decay=0.0005), + type='OptimWrapper') +optimizer = dict(lr=0.01, momentum=0.9, type='SGD', weight_decay=0.0005) +param_scheduler = [ + dict( + begin=0, + by_epoch=False, + end=40000, + eta_min=0.0001, + power=0.9, + type='PolyLR'), +] +resume = False +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + data_root='data/CHASE_DB1', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(keep_ratio=True, scale=( + 960, + 999, + ), type='Resize'), + dict(type='LoadAnnotations'), + dict(type='PackSegInputs'), + ], + type='ChaseDB1Dataset'), + num_workers=4, + persistent_workers=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + iou_metrics=[ + 'mDice', + ], type='IoUMetric') +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(keep_ratio=True, scale=( + 960, + 999, + ), type='Resize'), + dict(type='LoadAnnotations'), + dict(type='PackSegInputs'), +] +train_cfg = dict(max_iters=40000, type='IterBasedTrainLoop', val_interval=4000) +train_dataloader = dict( + batch_size=4, + dataset=dict( + dataset=dict( + data_prefix=dict( + img_path='images/training', + seg_map_path='annotations/training'), + data_root='data/CHASE_DB1', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + keep_ratio=True, + ratio_range=( + 0.5, + 2.0, + ), + scale=( + 960, + 999, + ), + type='RandomResize'), + dict( + cat_max_ratio=0.75, + crop_size=( + 128, + 128, + ), + type='RandomCrop'), + dict(prob=0.5, type='RandomFlip'), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs'), + ], + type='ChaseDB1Dataset'), + times=40000, + type='RepeatDataset'), + num_workers=4, + persistent_workers=True, + sampler=dict(shuffle=True, type='InfiniteSampler')) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + keep_ratio=True, + ratio_range=( + 0.5, + 2.0, + ), + scale=( + 960, + 999, + ), + type='RandomResize'), + dict(cat_max_ratio=0.75, crop_size=( + 128, + 128, + ), type='RandomCrop'), + dict(prob=0.5, type='RandomFlip'), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs'), +] +tta_model = dict(type='SegTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict(keep_ratio=True, scale_factor=0.5, type='Resize'), + dict(keep_ratio=True, scale_factor=0.75, type='Resize'), + dict(keep_ratio=True, scale_factor=1.0, type='Resize'), + dict(keep_ratio=True, scale_factor=1.25, type='Resize'), + dict(keep_ratio=True, scale_factor=1.5, type='Resize'), + dict(keep_ratio=True, scale_factor=1.75, type='Resize'), + ], + [ + dict(direction='horizontal', prob=0.0, type='RandomFlip'), + dict(direction='horizontal', prob=1.0, type='RandomFlip'), + ], + [ + dict(type='LoadAnnotations'), + ], + [ + dict(type='PackSegInputs'), + ], + ], + type='TestTimeAug'), +] +val_cfg = dict(type='ValLoop') +val_dataloader = dict( + batch_size=1, + dataset=dict( + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + data_root='data/CHASE_DB1', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(keep_ratio=True, scale=( + 960, + 999, + ), type='Resize'), + dict(type='LoadAnnotations'), + dict(type='PackSegInputs'), + ], + type='ChaseDB1Dataset'), + num_workers=4, + persistent_workers=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + iou_metrics=[ + 'mDice', + ], type='IoUMetric') +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='SegLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +work_dir = 'mmseg_logs/chase_db1/'