From 842941b300f49b1fbe481329d2b7fab87c4fd51e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 23 Dec 2024 14:48:22 -0300 Subject: [PATCH 01/39] Adding padding at the input when necessary MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/pixel_wise_model.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/terratorch/models/pixel_wise_model.py b/terratorch/models/pixel_wise_model.py index 6b9145c8..3a2a0ad4 100644 --- a/terratorch/models/pixel_wise_model.py +++ b/terratorch/models/pixel_wise_model.py @@ -7,7 +7,7 @@ from terratorch.models.heads import RegressionHead, SegmentationHead from terratorch.models.model import AuxiliaryHeadWithDecoderWithoutInstantiatedHead, Model, ModelOutput - +from terratorch.models.backbones.prithvi_vit import pad_images def freeze_module(module: nn.Module): for param in module.parameters(): @@ -70,6 +70,9 @@ def __init__( self.neck = neck self.rescale = rescale + # TODO Maybe it's better to pass it an input argument + self.patch_size = self.encoder._timm_module.patch_embed.patch_size[-1] + def freeze_encoder(self): freeze_module(self.encoder) @@ -77,9 +80,14 @@ def freeze_decoder(self): freeze_module(self.decoder) freeze_module(self.head) - # TODO: do this properly def check_input_shape(self, x: torch.Tensor) -> bool: # noqa: ARG002 - return True + + x_shape = x.shape[2:] + if all([i//self.patch_size==0 for i in x_shape]): + return x + else: + x = pad_images(x, self.patch_size, "constant") + return x @staticmethod def _check_for_single_channel_and_squeeze(x): From 65288615b8884e1aa01b66e7a8e9c4be3e3a6a35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Thu, 2 Jan 2025 14:21:17 -0300 Subject: [PATCH 02/39] patch_size as a explicit argument for PixelWiseModel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/encoder_decoder_factory.py | 17 ++ terratorch/models/pixel_wise_model.py | 5 +- .../manufactured-finetune_prithvi_pad.yaml | 151 ++++++++++++++++++ 3 files changed, 170 insertions(+), 3 deletions(-) create mode 100644 tests/resources/configs/manufactured-finetune_prithvi_pad.yaml diff --git a/terratorch/models/encoder_decoder_factory.py b/terratorch/models/encoder_decoder_factory.py index 04727265..daff58f8 100644 --- a/terratorch/models/encoder_decoder_factory.py +++ b/terratorch/models/encoder_decoder_factory.py @@ -65,6 +65,8 @@ def _check_all_args_used(kwargs): msg = f"arguments {kwargs} were passed but not used." raise ValueError(msg) +def _get_argument_from_instance(model, name): + return getattr(model._timm_module.patch_embed, name)[-1] @MODEL_FACTORY_REGISTRY.register class EncoderDecoderFactory(ModelFactory): @@ -128,6 +130,17 @@ def build_model( backbone_kwargs, kwargs = extract_prefix_keys(kwargs, "backbone_") backbone = _get_backbone(backbone, **backbone_kwargs) + # Getting necessary parameters + # Patch size + try: + patch_size = backbone_kwargs["patch_size"] + except KeyError: + print("Trying to get patch_size from the backbone") + patch_size = _get_argument_from_instance(backbone, "patch_size") + print(f"Found patch_size as {patch_size}") + else: + print("patch_size could not be found. Define it in the config file.") + if peft_config is not None: if not backbone_kwargs.get("pretrained", False): msg = ( @@ -166,6 +179,7 @@ def build_model( backbone, decoder, head_kwargs, + patch_size=patch_size, necks=neck_list, decoder_includes_head=decoder_includes_head, rescale=rescale, @@ -191,6 +205,7 @@ def build_model( backbone, decoder, head_kwargs, + patch_size=patch_size, necks=neck_list, decoder_includes_head=decoder_includes_head, rescale=rescale, @@ -203,6 +218,7 @@ def _build_appropriate_model( backbone: nn.Module, decoder: nn.Module, head_kwargs: dict, + patch_size: int, decoder_includes_head: bool = False, necks: list[Neck] | None = None, rescale: bool = True, # noqa: FBT001, FBT002 @@ -218,6 +234,7 @@ def _build_appropriate_model( backbone, decoder, head_kwargs, + patch_size=patch_size, decoder_includes_head=decoder_includes_head, neck=neck_module, rescale=rescale, diff --git a/terratorch/models/pixel_wise_model.py b/terratorch/models/pixel_wise_model.py index 3a2a0ad4..d9cb37cb 100644 --- a/terratorch/models/pixel_wise_model.py +++ b/terratorch/models/pixel_wise_model.py @@ -26,6 +26,7 @@ def __init__( encoder: nn.Module, decoder: nn.Module, head_kwargs: dict, + patch_size: int, decoder_includes_head: bool = False, auxiliary_heads: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None = None, neck: nn.Module | None = None, @@ -69,9 +70,7 @@ def __init__( self.neck = neck self.rescale = rescale - - # TODO Maybe it's better to pass it an input argument - self.patch_size = self.encoder._timm_module.patch_embed.patch_size[-1] + self.patch_size = patch_size def freeze_encoder(self): freeze_module(self.encoder) diff --git a/tests/resources/configs/manufactured-finetune_prithvi_pad.yaml b/tests/resources/configs/manufactured-finetune_prithvi_pad.yaml new file mode 100644 index 00000000..c0abd9f3 --- /dev/null +++ b/tests/resources/configs/manufactured-finetune_prithvi_pad.yaml @@ -0,0 +1,151 @@ +# lightning.pytorch==2.1.1 +seed_everything: 42 +trainer: + accelerator: cpu + strategy: auto + devices: auto + num_nodes: 1 + # precision: 16-mixed + logger: + class_path: TensorBoardLogger + init_args: + save_dir: tests/ + name: all_ecos_random + callbacks: + - class_path: RichProgressBar + - class_path: LearningRateMonitor + init_args: + logging_interval: epoch + - class_path: EarlyStopping + init_args: + monitor: val/loss + patience: 100 + max_epochs: 2 + check_val_every_n_epoch: 1 + log_every_n_steps: 20 + enable_checkpointing: true + default_root_dir: tests/ +data: + class_path: GenericNonGeoPixelwiseRegressionDataModule + init_args: + batch_size: 2 + num_workers: 4 + train_transform: + #- class_path: albumentations.HorizontalFlip + # init_args: + # p: 0.5 + #- class_path: albumentations.Rotate + # init_args: + # limit: 30 + # border_mode: 0 # cv2.BORDER_CONSTANT + # value: 0 + # # mask_value: 1 + # p: 0.5 + - class_path: ToTensorV2 + dataset_bands: + - 0 + - BLUE + - GREEN + - RED + - NIR_NARROW + - SWIR_1 + - SWIR_2 + - 1 + - 2 + - 3 + - 4 + output_bands: + - BLUE + - GREEN + - RED + - NIR_NARROW + - SWIR_1 + - SWIR_2 + rgb_indices: + - 2 + - 1 + - 0 + train_data_root: tests/resources/inputs + train_label_data_root: tests/resources/inputs + val_data_root: tests/resources/inputs + val_label_data_root: tests/resources/inputs + test_data_root: tests/resources/inputs + test_label_data_root: tests/resources/inputs + img_grep: "regression*input*.tif" + label_grep: "regression*label*.tif" + means: + - 547.36707 + - 898.5121 + - 1020.9082 + - 2665.5352 + - 2340.584 + - 1610.1407 + stds: + - 411.4701 + - 558.54065 + - 815.94025 + - 812.4403 + - 1113.7145 + - 1067.641 + no_label_replace: -1 + no_data_replace: 0 + +model: + class_path: terratorch.tasks.PixelwiseRegressionTask + init_args: + model_args: + decoder: UperNetDecoder + pretrained: false + backbone: prithvi_eo_v2_300 + # backbone_pretrained_cfg_overlay: + # file: tests/prithvi_vit_300.pt + backbone_drop_path_rate: 0.3 + # backbone_window_size: 8 + backbone_patch_size: 13 + decoder_channels: 64 + num_frames: 1 + in_channels: 6 + bands: + - BLUE + - GREEN + - RED + - NIR_NARROW + - SWIR_1 + - SWIR_2 + head_dropout: 0.5708022831486758 + head_final_act: torch.nn.ReLU + head_learned_upscale_layers: 2 + loss: rmse + #aux_heads: + # - name: aux_head + # decoder: IdentityDecoder + # decoder_args: + # decoder_out_index: 2 + # head_dropout: 0,5 + # head_channel_list: + # - 64 + # head_final_act: torch.nn.ReLU + #aux_loss: + # aux_head: 0.4 + ignore_index: -1 + freeze_backbone: true + freeze_decoder: false + model_factory: PrithviModelFactory + + # uncomment this block for tiled inference + # tiled_inference_parameters: + # h_crop: 224 + # h_stride: 192 + # w_crop: 224 + # w_stride: 192 + # average_patches: true +optimizer: + class_path: torch.optim.AdamW + init_args: + lr: 0.00013524680528283027 + weight_decay: 0.047782217873995426 +lr_scheduler: + class_path: ReduceLROnPlateau + init_args: + monitor: val/loss + From 8376a5ea14ae4df73682f1e846b936f5a5b41bca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Thu, 2 Jan 2025 15:17:29 -0300 Subject: [PATCH 03/39] logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/encoder_decoder_factory.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/terratorch/models/encoder_decoder_factory.py b/terratorch/models/encoder_decoder_factory.py index daff58f8..3c0fba5d 100644 --- a/terratorch/models/encoder_decoder_factory.py +++ b/terratorch/models/encoder_decoder_factory.py @@ -130,16 +130,16 @@ def build_model( backbone_kwargs, kwargs = extract_prefix_keys(kwargs, "backbone_") backbone = _get_backbone(backbone, **backbone_kwargs) - # Getting necessary parameters + # Getting some necessary parameters # Patch size try: patch_size = backbone_kwargs["patch_size"] except KeyError: - print("Trying to get patch_size from the backbone") + print("Trying to get `patch_size` from the backbone") patch_size = _get_argument_from_instance(backbone, "patch_size") - print(f"Found patch_size as {patch_size}") + print(f"Found `patch_size` as {patch_size}") else: - print("patch_size could not be found. Define it in the config file.") + print("The argument `patch_size` could not be found. Define it in the config file.") if peft_config is not None: if not backbone_kwargs.get("pretrained", False): From 8fa3bbaf9ba49dffc1fdd1784970d5cb862a20cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Thu, 2 Jan 2025 15:18:55 -0300 Subject: [PATCH 04/39] Cropping image MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/pixel_wise_model.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/terratorch/models/pixel_wise_model.py b/terratorch/models/pixel_wise_model.py index d9cb37cb..a5d7e77c 100644 --- a/terratorch/models/pixel_wise_model.py +++ b/terratorch/models/pixel_wise_model.py @@ -2,6 +2,7 @@ import torch import torch.nn.functional as F # noqa: N812 +import torchvision.transforms as transforms from segmentation_models_pytorch.base import SegmentationModel from torch import nn @@ -79,7 +80,7 @@ def freeze_decoder(self): freeze_module(self.decoder) freeze_module(self.head) - def check_input_shape(self, x: torch.Tensor) -> bool: # noqa: ARG002 + def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: # noqa: ARG002 x_shape = x.shape[2:] if all([i//self.patch_size==0 for i in x_shape]): @@ -88,6 +89,10 @@ def check_input_shape(self, x: torch.Tensor) -> bool: # noqa: ARG002 x = pad_images(x, self.patch_size, "constant") return x + def crop_image(self, x:torch.Tensor, size:tuple) -> torch.Tensor: + + return transforms.CenterCrop(size)(x) + @staticmethod def _check_for_single_channel_and_squeeze(x): if x.shape[1] == 1: @@ -96,7 +101,7 @@ def _check_for_single_channel_and_squeeze(x): def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: """Sequentially pass `x` through model`s encoder, decoder and heads""" - self.check_input_shape(x) + if isinstance(x, torch.Tensor): input_size = x.shape[-2:] elif hasattr(kwargs, 'image_size'): @@ -106,6 +111,8 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: input_size = list(x.values())[0].shape[-2:] else: ValueError('Could not infer input shape.') + + x = self.check_input_shape(x) features = self.encoder(x, **kwargs) ## only for backwards compatibility with pre-neck times. @@ -128,6 +135,10 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: aux_output = F.interpolate(aux_output, size=input_size, mode="bilinear") aux_output = self._check_for_single_channel_and_squeeze(aux_output) aux_outputs[name] = aux_output + + # Cropping image to reduce the effect of padding + mask = self.crop_image(mask, input_size) + return ModelOutput(output=mask, auxiliary_heads=aux_outputs) def _get_head(self, task: str, input_embed_dim: int, head_kwargs): From 6fb8c95c186e239d7fb6de64953fd55804722a02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Thu, 2 Jan 2025 15:19:32 -0300 Subject: [PATCH 05/39] cropping image for scaler model MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/scalar_output_model.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/terratorch/models/scalar_output_model.py b/terratorch/models/scalar_output_model.py index 92866e09..ecc5ce67 100644 --- a/terratorch/models/scalar_output_model.py +++ b/terratorch/models/scalar_output_model.py @@ -71,14 +71,23 @@ def freeze_decoder(self): freeze_module(self.decoder) freeze_module(self.head) - # TODO: do this properly - def check_input_shape(self, x: torch.Tensor) -> bool: # noqa: ARG002 - return True + def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: # noqa: ARG002 + + x_shape = x.shape[2:] + if all([i//self.patch_size==0 for i in x_shape]): + return x + else: + x = pad_images(x, self.patch_size, "constant") + return x + + def crop_image(self, x:torch.Tensor, size) -> torch.Tensor: + + return crop(x, size[0], size[1]) def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: """Sequentially pass `x` through model`s encoder, decoder and heads""" - self.check_input_shape(x) + x = self.check_input_shape(x) features = self.encoder(x, **kwargs) ## only for backwards compatibility with pre-neck times. @@ -92,6 +101,8 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: decoder_output = self.decoder([f.clone() for f in features]) mask = self.head(decoder_output) + mask = self.crop_image(x) + aux_outputs = {} for name, decoder in self.aux_heads.items(): aux_output = decoder([f.clone() for f in features]) From 5f37ba7835f04125233a444ae4db0008a21d3086 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Fri, 3 Jan 2025 10:10:21 -0300 Subject: [PATCH 06/39] patch_size could be None MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/encoder_decoder_factory.py | 17 +++++++------ terratorch/models/pixel_wise_model.py | 24 +++++++++++-------- terratorch/models/scalar_output_model.py | 18 +++++++++----- ...tured-finetune_prithvi_pixelwise_pad.yaml} | 0 4 files changed, 36 insertions(+), 23 deletions(-) rename tests/resources/configs/{manufactured-finetune_prithvi_pad.yaml => manufactured-finetune_prithvi_pixelwise_pad.yaml} (100%) diff --git a/terratorch/models/encoder_decoder_factory.py b/terratorch/models/encoder_decoder_factory.py index 3c0fba5d..aee54736 100644 --- a/terratorch/models/encoder_decoder_factory.py +++ b/terratorch/models/encoder_decoder_factory.py @@ -2,7 +2,7 @@ import warnings - +import logging from torch import nn from terratorch.models.model import ( @@ -132,14 +132,16 @@ def build_model( # Getting some necessary parameters # Patch size - try: + if "patch_size" in backbone_kwargs: patch_size = backbone_kwargs["patch_size"] - except KeyError: - print("Trying to get `patch_size` from the backbone") - patch_size = _get_argument_from_instance(backbone, "patch_size") - print(f"Found `patch_size` as {patch_size}") else: - print("The argument `patch_size` could not be found. Define it in the config file.") + # If the configs for the model are right and images have the proper + # sizes, it can still work, but there is no way to fix possible + # errors during execution if information about patch size is not + # explicitly provided. + logging.getLogger("terratorch").info(f"The argument `patch_size` could not be found. To avoid possible fails related to nondivisible images,\ + it's better to define it in the config file.") + patch_size = None if peft_config is not None: if not backbone_kwargs.get("pretrained", False): @@ -246,6 +248,7 @@ def _build_appropriate_model( backbone, decoder, head_kwargs, + patch_size=patch_size, decoder_includes_head=decoder_includes_head, neck=neck_module, auxiliary_heads=auxiliary_heads, diff --git a/terratorch/models/pixel_wise_model.py b/terratorch/models/pixel_wise_model.py index a5d7e77c..d04b81a9 100644 --- a/terratorch/models/pixel_wise_model.py +++ b/terratorch/models/pixel_wise_model.py @@ -80,14 +80,19 @@ def freeze_decoder(self): freeze_module(self.decoder) freeze_module(self.head) - def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: # noqa: ARG002 - - x_shape = x.shape[2:] - if all([i//self.patch_size==0 for i in x_shape]): - return x + def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: + + if self.patch_size: + x_shape = x.shape[2:] + if all([i//self.patch_size==0 for i in x_shape]): + return x + else: + x = pad_images(x, self.patch_size, "constant") + return x else: - x = pad_images(x, self.patch_size, "constant") - return x + # If patch size is not provided, the user should guarantee the + # dataset is properly configured to work with the model being used. + return x def crop_image(self, x:torch.Tensor, size:tuple) -> torch.Tensor: @@ -112,7 +117,9 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: else: ValueError('Could not infer input shape.') + # TODO make this verification optional to avoid unnecessary repetition x = self.check_input_shape(x) + features = self.encoder(x, **kwargs) ## only for backwards compatibility with pre-neck times. @@ -136,9 +143,6 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: aux_output = self._check_for_single_channel_and_squeeze(aux_output) aux_outputs[name] = aux_output - # Cropping image to reduce the effect of padding - mask = self.crop_image(mask, input_size) - return ModelOutput(output=mask, auxiliary_heads=aux_outputs) def _get_head(self, task: str, input_embed_dim: int, head_kwargs): diff --git a/terratorch/models/scalar_output_model.py b/terratorch/models/scalar_output_model.py index ecc5ce67..5081502e 100644 --- a/terratorch/models/scalar_output_model.py +++ b/terratorch/models/scalar_output_model.py @@ -25,6 +25,7 @@ def __init__( encoder: nn.Module, decoder: nn.Module, head_kwargs: dict, + patch_size:int = None, decoder_includes_head: bool = False, auxiliary_heads: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None = None, neck: nn.Module | None = None, @@ -63,6 +64,7 @@ def __init__( self.aux_heads = nn.ModuleDict(aux_heads) self.neck = neck + self.patch_size = patch_size def freeze_encoder(self): freeze_module(self.encoder) @@ -73,12 +75,17 @@ def freeze_decoder(self): def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: # noqa: ARG002 - x_shape = x.shape[2:] - if all([i//self.patch_size==0 for i in x_shape]): - return x + if self.patch_size: + x_shape = x.shape[2:] + if all([i//self.patch_size==0 for i in x_shape]): + return x + else: + x = pad_images(x, self.patch_size, "constant") + return x else: - x = pad_images(x, self.patch_size, "constant") - return x + # If patch size is not provided, the user should guarantee the + # dataset is properly configured to work with the model being used. + return x def crop_image(self, x:torch.Tensor, size) -> torch.Tensor: @@ -101,7 +108,6 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: decoder_output = self.decoder([f.clone() for f in features]) mask = self.head(decoder_output) - mask = self.crop_image(x) aux_outputs = {} for name, decoder in self.aux_heads.items(): diff --git a/tests/resources/configs/manufactured-finetune_prithvi_pad.yaml b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml similarity index 100% rename from tests/resources/configs/manufactured-finetune_prithvi_pad.yaml rename to tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml From 5cb27dc7e34d0ab4a5b8289f6b7c74a1bde0249a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Fri, 3 Jan 2025 11:24:42 -0300 Subject: [PATCH 07/39] Adapting the Clay factory to support patch_size and minor adjusts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/clay_model_factory.py | 20 ++++++++++++++++++- terratorch/models/encoder_decoder_factory.py | 2 +- terratorch/models/pixel_wise_model.py | 2 +- terratorch/models/scalar_output_model.py | 4 ++-- ...nufactured-finetune_prithvi_eo_v2_300.yaml | 1 + tests/test_finetune.py | 8 ++++++++ 6 files changed, 32 insertions(+), 5 deletions(-) diff --git a/terratorch/models/clay_model_factory.py b/terratorch/models/clay_model_factory.py index 82d1f183..41aa4e22 100644 --- a/terratorch/models/clay_model_factory.py +++ b/terratorch/models/clay_model_factory.py @@ -1,6 +1,7 @@ import importlib import sys from collections.abc import Callable +import logging import timm import torch @@ -122,6 +123,19 @@ def build_model( backbone_kwargs, kwargs = extract_prefix_keys(kwargs, "backbone_") + # Getting some necessary parameters + # Patch size + if "patch_size" in backbone_kwargs: + patch_size = backbone_kwargs["patch_size"] + else: + # If the configs for the model are right and images have the proper + # sizes, it can still work, but there is no way to fix possible + # errors during execution if information about patch size is not + # explicitly provided. + logging.getLogger("terratorch").info(f"The argument `patch_size` could not be found. To avoid possible errors related to nondivisible images,\ + it's better to define it in the config file.") + patch_size = None + # Trying to find the model on HuggingFace. try: backbone: nn.Module = timm.create_model( @@ -157,7 +171,7 @@ def build_model( head_kwargs["num_classes"] = num_classes if aux_decoders is None: return _build_appropriate_model( - task, backbone, decoder, head_kwargs, prepare_features_for_image_model, rescale=rescale + task, backbone, decoder, head_kwargs, prepare_features_for_image_model, patch_size=patch_size, rescale=rescale ) to_be_aux_decoders: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] = [] @@ -186,6 +200,7 @@ def build_model( decoder, head_kwargs, prepare_features_for_image_model, + patch_size=patch_size, rescale=rescale, auxiliary_heads=to_be_aux_decoders, ) @@ -197,6 +212,7 @@ def _build_appropriate_model( decoder: nn.Module, head_kwargs: dict, prepare_features_for_image_model: Callable, + patch_size:int=None, rescale: bool = True, # noqa: FBT001, FBT002 auxiliary_heads: dict | None = None, ): @@ -206,6 +222,7 @@ def _build_appropriate_model( backbone, decoder, head_kwargs, + patch_size=patch_size, rescale=rescale, auxiliary_heads=auxiliary_heads, ) @@ -215,6 +232,7 @@ def _build_appropriate_model( backbone, decoder, head_kwargs, + patch_size=patch_size, auxiliary_heads=auxiliary_heads, ) diff --git a/terratorch/models/encoder_decoder_factory.py b/terratorch/models/encoder_decoder_factory.py index aee54736..358cab70 100644 --- a/terratorch/models/encoder_decoder_factory.py +++ b/terratorch/models/encoder_decoder_factory.py @@ -139,7 +139,7 @@ def build_model( # sizes, it can still work, but there is no way to fix possible # errors during execution if information about patch size is not # explicitly provided. - logging.getLogger("terratorch").info(f"The argument `patch_size` could not be found. To avoid possible fails related to nondivisible images,\ + logging.getLogger("terratorch").info(f"The argument `patch_size` could not be found. To avoid possible errors related to nondivisible images,\ it's better to define it in the config file.") patch_size = None diff --git a/terratorch/models/pixel_wise_model.py b/terratorch/models/pixel_wise_model.py index d04b81a9..84bd296d 100644 --- a/terratorch/models/pixel_wise_model.py +++ b/terratorch/models/pixel_wise_model.py @@ -27,7 +27,7 @@ def __init__( encoder: nn.Module, decoder: nn.Module, head_kwargs: dict, - patch_size: int, + patch_size: int = None, decoder_includes_head: bool = False, auxiliary_heads: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None = None, neck: nn.Module | None = None, diff --git a/terratorch/models/scalar_output_model.py b/terratorch/models/scalar_output_model.py index 5081502e..edf9ad12 100644 --- a/terratorch/models/scalar_output_model.py +++ b/terratorch/models/scalar_output_model.py @@ -87,9 +87,9 @@ def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: # noqa: ARG002 # dataset is properly configured to work with the model being used. return x - def crop_image(self, x:torch.Tensor, size) -> torch.Tensor: + def crop_image(self, x:torch.Tensor, size:tuple) -> torch.Tensor: - return crop(x, size[0], size[1]) + return transforms.CenterCrop(size)(x) def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: """Sequentially pass `x` through model`s encoder, decoder and heads""" diff --git a/tests/resources/configs/manufactured-finetune_prithvi_eo_v2_300.yaml b/tests/resources/configs/manufactured-finetune_prithvi_eo_v2_300.yaml index 3e44a1c5..5bd0c5d1 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_eo_v2_300.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_eo_v2_300.yaml @@ -100,6 +100,7 @@ model: # backbone_pretrained_cfg_overlay: # file: tests/prithvi_vit_300.pt backbone_drop_path_rate: 0.3 + backbone_patch_size: 16 # backbone_window_size: 8 decoder_channels: 64 num_frames: 1 diff --git a/tests/test_finetune.py b/tests/test_finetune.py index 9c06e8da..76d4df15 100644 --- a/tests/test_finetune.py +++ b/tests/test_finetune.py @@ -46,6 +46,14 @@ def test_finetune_bands_str(model_name, case): gc.collect() +@pytest.mark.parametrize("model_name", ["prithvi_eo_v2_300"]) +@pytest.mark.parametrize("case", ["fit", "test", "validate"]) +def test_finetune_pad(case): + command_list = [case, "-c", f"tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml"] + _ = build_lightning_cli(command_list) + + gc.collect() + @pytest.mark.parametrize("model_name", ["prithvi_swin_B"]) def test_finetune_metrics_from_file(model_name): From ba43134dedbcce18a4c84d037fa3326fc8e80e1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Fri, 3 Jan 2025 12:34:32 -0300 Subject: [PATCH 08/39] Trying to reduce the cost of these tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- ...nufactured-finetune_prithvi_eo_v2_300.yaml | 2 +- ...nufactured-finetune_prithvi_eo_v2_600.yaml | 2 +- ...ctured-finetune_prithvi_pixelwise_pad.yaml | 2 +- .../manufactured-finetune_prithvi_swin_B.yaml | 2 +- ...finetune_prithvi_swin_B_band_interval.yaml | 2 +- ...tune_prithvi_swin_B_metrics_from_file.yaml | 2 +- ...ctured-finetune_prithvi_swin_B_string.yaml | 2 +- .../manufactured-finetune_prithvi_swin_L.yaml | 22 +++++++++---------- ...manufactured-finetune_prithvi_vit_100.yaml | 3 ++- ...manufactured-finetune_prithvi_vit_300.yaml | 2 +- 10 files changed, 21 insertions(+), 20 deletions(-) diff --git a/tests/resources/configs/manufactured-finetune_prithvi_eo_v2_300.yaml b/tests/resources/configs/manufactured-finetune_prithvi_eo_v2_300.yaml index 5bd0c5d1..a60517da 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_eo_v2_300.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_eo_v2_300.yaml @@ -20,7 +20,7 @@ trainer: init_args: monitor: val/loss patience: 100 - max_epochs: 2 + max_epochs: 1 check_val_every_n_epoch: 1 log_every_n_steps: 20 enable_checkpointing: true diff --git a/tests/resources/configs/manufactured-finetune_prithvi_eo_v2_600.yaml b/tests/resources/configs/manufactured-finetune_prithvi_eo_v2_600.yaml index 292d229c..f6652815 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_eo_v2_600.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_eo_v2_600.yaml @@ -20,7 +20,7 @@ trainer: init_args: monitor: val/loss patience: 100 - max_epochs: 2 + max_epochs: 1 check_val_every_n_epoch: 1 log_every_n_steps: 20 enable_checkpointing: true diff --git a/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml index c0abd9f3..7e8ef8b7 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml @@ -20,7 +20,7 @@ trainer: init_args: monitor: val/loss patience: 100 - max_epochs: 2 + max_epochs: 1 check_val_every_n_epoch: 1 log_every_n_steps: 20 enable_checkpointing: true diff --git a/tests/resources/configs/manufactured-finetune_prithvi_swin_B.yaml b/tests/resources/configs/manufactured-finetune_prithvi_swin_B.yaml index 065caa02..cea8a0ea 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_swin_B.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_swin_B.yaml @@ -20,7 +20,7 @@ trainer: init_args: monitor: val/loss patience: 100 - max_epochs: 3 + max_epochs: 1 check_val_every_n_epoch: 1 log_every_n_steps: 20 enable_checkpointing: true diff --git a/tests/resources/configs/manufactured-finetune_prithvi_swin_B_band_interval.yaml b/tests/resources/configs/manufactured-finetune_prithvi_swin_B_band_interval.yaml index a9d4145e..9f5fc50c 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_swin_B_band_interval.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_swin_B_band_interval.yaml @@ -20,7 +20,7 @@ trainer: init_args: monitor: val/loss patience: 100 - max_epochs: 2 + max_epochs: 1 check_val_every_n_epoch: 1 log_every_n_steps: 20 enable_checkpointing: true diff --git a/tests/resources/configs/manufactured-finetune_prithvi_swin_B_metrics_from_file.yaml b/tests/resources/configs/manufactured-finetune_prithvi_swin_B_metrics_from_file.yaml index 9005547b..95907310 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_swin_B_metrics_from_file.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_swin_B_metrics_from_file.yaml @@ -20,7 +20,7 @@ trainer: init_args: monitor: val/loss patience: 100 - max_epochs: 2 + max_epochs: 1 check_val_every_n_epoch: 1 log_every_n_steps: 20 enable_checkpointing: true diff --git a/tests/resources/configs/manufactured-finetune_prithvi_swin_B_string.yaml b/tests/resources/configs/manufactured-finetune_prithvi_swin_B_string.yaml index 73813b6d..746175a2 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_swin_B_string.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_swin_B_string.yaml @@ -20,7 +20,7 @@ trainer: init_args: monitor: val/loss patience: 100 - max_epochs: 2 + max_epochs: 1 check_val_every_n_epoch: 1 log_every_n_steps: 20 enable_checkpointing: true diff --git a/tests/resources/configs/manufactured-finetune_prithvi_swin_L.yaml b/tests/resources/configs/manufactured-finetune_prithvi_swin_L.yaml index 16729210..453a8b4a 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_swin_L.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_swin_L.yaml @@ -20,7 +20,7 @@ trainer: init_args: monitor: val/loss patience: 100 - max_epochs: 2 + max_epochs: 1 check_val_every_n_epoch: 1 log_every_n_steps: 20 enable_checkpointing: true @@ -31,16 +31,16 @@ data: batch_size: 2 num_workers: 4 train_transform: - #- class_path: albumentations.HorizontalFlip - # init_args: - # p: 0.5 - #- class_path: albumentations.Rotate - # init_args: - # limit: 30 - # border_mode: 0 # cv2.BORDER_CONSTANT - # value: 0 - # # mask_value: 1 - # p: 0.5 + - class_path: albumentations.HorizontalFlip + init_args: + p: 0.5 + - class_path: albumentations.Rotate + init_args: + limit: 30 + border_mode: 0 # cv2.BORDER_CONSTANT + value: 0 + # mask_value: 1 + p: 0.5 - class_path: ToTensorV2 dataset_bands: - 0 diff --git a/tests/resources/configs/manufactured-finetune_prithvi_vit_100.yaml b/tests/resources/configs/manufactured-finetune_prithvi_vit_100.yaml index bb652415..12c6b19f 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_vit_100.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_vit_100.yaml @@ -20,7 +20,7 @@ trainer: init_args: monitor: val/loss patience: 100 - max_epochs: 2 + max_epochs: 1 check_val_every_n_epoch: 1 log_every_n_steps: 20 enable_checkpointing: true @@ -97,6 +97,7 @@ model: decoder: UperNetDecoder pretrained: false backbone: prithvi_vit_100 + backbone_patch_size: 15 #backbone_pretrained_cfg_overlay: #file: tests/all_ecos_random/version_0/checkpoints/epoch=0_state_dict.ckpt #tests/prithvi_vit_100.pt backbone_drop_path_rate: 0.3 diff --git a/tests/resources/configs/manufactured-finetune_prithvi_vit_300.yaml b/tests/resources/configs/manufactured-finetune_prithvi_vit_300.yaml index 3e44a1c5..37294615 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_vit_300.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_vit_300.yaml @@ -20,7 +20,7 @@ trainer: init_args: monitor: val/loss patience: 100 - max_epochs: 2 + max_epochs: 1 check_val_every_n_epoch: 1 log_every_n_steps: 20 enable_checkpointing: true From 9c26eab4ea979c40f4bcf9b7da831d3e2f7efc8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Fri, 3 Jan 2025 15:06:54 -0300 Subject: [PATCH 09/39] pad_images must be in utils.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/backbones/prithvi_vit.py | 15 +-------------- terratorch/models/pixel_wise_model.py | 2 +- terratorch/models/scalar_output_model.py | 2 +- terratorch/models/utils.py | 16 ++++++++++++++++ 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/terratorch/models/backbones/prithvi_vit.py b/terratorch/models/backbones/prithvi_vit.py index 136c6513..7ebfd022 100644 --- a/terratorch/models/backbones/prithvi_vit.py +++ b/terratorch/models/backbones/prithvi_vit.py @@ -10,6 +10,7 @@ from terratorch.models.backbones.select_patch_embed_weights import select_patch_embed_weights from terratorch.datasets.utils import generate_bands_intervals from terratorch.models.backbones.prithvi_mae import PrithviViT, PrithviMAE +from terratorch.models.utils import pad_images logger = logging.getLogger(__name__) @@ -153,20 +154,6 @@ def checkpoint_filter_fn_mae( return state_dict - -def pad_images(imgs: Tensor,patch_size: int, padding:str) -> Tensor: - p = patch_size - # h, w = imgs.shape[3], imgs.shape[4] - t, h, w = imgs.shape[-3:] - h_pad, w_pad = (p - h % p) % p, (p - w % p) % p # Ensure padding is within bounds - if h_pad > 0 or w_pad > 0: - imgs = torch.stack([ - nn.functional.pad(img, (0, w_pad, 0, h_pad), mode=padding) - for img in imgs # Apply per image to avoid NotImplementedError from torch.nn.functional.pad - ]) - return imgs - - def _create_prithvi( variant: str, pretrained: bool = False, # noqa: FBT001, FBT002 diff --git a/terratorch/models/pixel_wise_model.py b/terratorch/models/pixel_wise_model.py index 84bd296d..28a8bdd3 100644 --- a/terratorch/models/pixel_wise_model.py +++ b/terratorch/models/pixel_wise_model.py @@ -8,7 +8,7 @@ from terratorch.models.heads import RegressionHead, SegmentationHead from terratorch.models.model import AuxiliaryHeadWithDecoderWithoutInstantiatedHead, Model, ModelOutput -from terratorch.models.backbones.prithvi_vit import pad_images +from terratorch.models.utils import pad_images def freeze_module(module: nn.Module): for param in module.parameters(): diff --git a/terratorch/models/scalar_output_model.py b/terratorch/models/scalar_output_model.py index edf9ad12..85098c2d 100644 --- a/terratorch/models/scalar_output_model.py +++ b/terratorch/models/scalar_output_model.py @@ -6,7 +6,7 @@ from terratorch.models.heads import ClassificationHead from terratorch.models.model import AuxiliaryHeadWithDecoderWithoutInstantiatedHead, Model, ModelOutput - +from terratorch.models.utils import pad_images def freeze_module(module: nn.Module): for param in module.parameters(): diff --git a/terratorch/models/utils.py b/terratorch/models/utils.py index cf0e3537..9815160a 100644 --- a/terratorch/models/utils.py +++ b/terratorch/models/utils.py @@ -1,3 +1,6 @@ +import torch +from torch import nn, Tensor + class DecoderNotFoundError(Exception): pass @@ -11,3 +14,16 @@ def extract_prefix_keys(d: dict, prefix: str) -> dict: remaining_dict[k] = v return extracted_dict, remaining_dict + +def pad_images(imgs: Tensor,patch_size: int, padding:str) -> Tensor: + p = patch_size + # h, w = imgs.shape[3], imgs.shape[4] + t, h, w = imgs.shape[-3:] + h_pad, w_pad = (p - h % p) % p, (p - w % p) % p # Ensure padding is within bounds + if h_pad > 0 or w_pad > 0: + imgs = torch.stack([ + nn.functional.pad(img, (0, w_pad, 0, h_pad), mode=padding) + for img in imgs # Apply per image to avoid NotImplementedError from torch.nn.functional.pad + ]) + return imgs + From c4fd736df29a84099928ad66fc47f8f63886b77e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 10:03:45 -0300 Subject: [PATCH 10/39] Cropping images could be a necessary operation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/pixel_wise_model.py | 3 ++- terratorch/models/scalar_output_model.py | 8 +++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/terratorch/models/pixel_wise_model.py b/terratorch/models/pixel_wise_model.py index 28a8bdd3..5b6f2d41 100644 --- a/terratorch/models/pixel_wise_model.py +++ b/terratorch/models/pixel_wise_model.py @@ -94,7 +94,7 @@ def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: # dataset is properly configured to work with the model being used. return x - def crop_image(self, x:torch.Tensor, size:tuple) -> torch.Tensor: + def _crop_image_when_necessary(self, x:torch.Tensor, size:tuple) -> torch.Tensor: return transforms.CenterCrop(size)(x) @@ -143,6 +143,7 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: aux_output = self._check_for_single_channel_and_squeeze(aux_output) aux_outputs[name] = aux_output + mask = self._crop_image_when_necessary(mask, input_size) return ModelOutput(output=mask, auxiliary_heads=aux_outputs) def _get_head(self, task: str, input_embed_dim: int, head_kwargs): diff --git a/terratorch/models/scalar_output_model.py b/terratorch/models/scalar_output_model.py index 85098c2d..29fd0cc4 100644 --- a/terratorch/models/scalar_output_model.py +++ b/terratorch/models/scalar_output_model.py @@ -87,7 +87,7 @@ def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: # noqa: ARG002 # dataset is properly configured to work with the model being used. return x - def crop_image(self, x:torch.Tensor, size:tuple) -> torch.Tensor: + def _crop_image_when_necessary(self, x:torch.Tensor, size:tuple) -> torch.Tensor: return transforms.CenterCrop(size)(x) @@ -97,6 +97,10 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: x = self.check_input_shape(x) features = self.encoder(x, **kwargs) + # Collecting information about the size of the input tensor in order to + # use it to possibly crop the image when necessary. + input_size = x.shape[-2:] + ## only for backwards compatibility with pre-neck times. if self.neck: prepare = self.neck @@ -113,6 +117,8 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: for name, decoder in self.aux_heads.items(): aux_output = decoder([f.clone() for f in features]) aux_outputs[name] = aux_output + + mask = self._crop_image_when_necessary(mask, input_size) return ModelOutput(output=mask, auxiliary_heads=aux_outputs) def _get_head(self, task: str, input_embed_dim: int, head_kwargs: dict): From b70a36830e785112ddec1192125951217ed3eea2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 10:19:08 -0300 Subject: [PATCH 11/39] The cropping must be placed before the head in case of scalar models MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/scalar_output_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/terratorch/models/scalar_output_model.py b/terratorch/models/scalar_output_model.py index 29fd0cc4..b93be616 100644 --- a/terratorch/models/scalar_output_model.py +++ b/terratorch/models/scalar_output_model.py @@ -3,7 +3,7 @@ import torch from segmentation_models_pytorch.base import SegmentationModel from torch import nn - +import torchvision.transforms as transforms from terratorch.models.heads import ClassificationHead from terratorch.models.model import AuxiliaryHeadWithDecoderWithoutInstantiatedHead, Model, ModelOutput from terratorch.models.utils import pad_images @@ -111,6 +111,7 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: features = prepare(features) decoder_output = self.decoder([f.clone() for f in features]) + decoder_output = self._crop_image_when_necessary(decoder_output, input_size) mask = self.head(decoder_output) aux_outputs = {} @@ -118,7 +119,6 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: aux_output = decoder([f.clone() for f in features]) aux_outputs[name] = aux_output - mask = self._crop_image_when_necessary(mask, input_size) return ModelOutput(output=mask, auxiliary_heads=aux_outputs) def _get_head(self, task: str, input_embed_dim: int, head_kwargs: dict): From ecca3aa488d868c46ab2d0a798e978bc98c556f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 10:47:32 -0300 Subject: [PATCH 12/39] Creating extra images for tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- examples/scripts/create_images.py | 34 +++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 examples/scripts/create_images.py diff --git a/examples/scripts/create_images.py b/examples/scripts/create_images.py new file mode 100644 index 00000000..c1a54b03 --- /dev/null +++ b/examples/scripts/create_images.py @@ -0,0 +1,34 @@ +from PIL import Image +import os +import random +import numpy as np +import tifffile as tiff +from argparse import ArgumentParser + +parser = ArgumentParser() +parser.add_argument("--input_file") +parser.add_argument("--output_dir") +parser.add_argument("--n_copies", type=int, default=2) + +args = parser.parse_args() +input_file = args.input_file +output_dir = args.output_dir +n_copies = args.n_copies + +pad_limit = 4 + +for c in range(n_copies): + + pad = random.randint(1, pad_limit) + filename = os.path.split(input_file)[-1] + output_file = os.path.join(output_dir, filename.replace(".tif", "_{c}.tif")) + print(pad) + imarray = tiff.imread(input_file) + im_shape = imarray.shape + im_shape_ext = tuple([i+2*pad for i in list(im_shape[:-1])]) + (im_shape[-1],) + print(im_shape_ext) + output = np.zeros(im_shape_ext) + print(output.shape) + output[pad:-pad, pad:-pad, :] = imarray + print(output.shape) + tiff.imwrite(output_file, output) From e09cd79ac1cfe1d12c2b58b44ad28fb1a8487654 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 11:53:25 -0300 Subject: [PATCH 13/39] Minor changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- examples/scripts/create_images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/scripts/create_images.py b/examples/scripts/create_images.py index c1a54b03..03f66d83 100644 --- a/examples/scripts/create_images.py +++ b/examples/scripts/create_images.py @@ -21,7 +21,7 @@ pad = random.randint(1, pad_limit) filename = os.path.split(input_file)[-1] - output_file = os.path.join(output_dir, filename.replace(".tif", "_{c}.tif")) + output_file = os.path.join(output_dir, filename.replace(".tif", f"_{c}.tif")) print(pad) imarray = tiff.imread(input_file) im_shape = imarray.shape From 6fdf1b74a06769d87ce5200188c6f4dba7b9804c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 15:36:03 -0300 Subject: [PATCH 14/39] img_size also could be necessary MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/encoder_decoder_factory.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/terratorch/models/encoder_decoder_factory.py b/terratorch/models/encoder_decoder_factory.py index 358cab70..2bad57e4 100644 --- a/terratorch/models/encoder_decoder_factory.py +++ b/terratorch/models/encoder_decoder_factory.py @@ -139,10 +139,17 @@ def build_model( # sizes, it can still work, but there is no way to fix possible # errors during execution if information about patch size is not # explicitly provided. - logging.getLogger("terratorch").info(f"The argument `patch_size` could not be found. To avoid possible errors related to nondivisible images,\ - it's better to define it in the config file.") patch_size = None + if "img_size" in backbone_kwargs: + img_size = backbone_kwargs["img_size"] + else: + # If the configs for the model are right and images have the proper + # sizes, it can still work, but there is no way to fix possible + # errors during execution if information about img_size is not + # provided in order to perform cropping when necessary. + img_size = None + if peft_config is not None: if not backbone_kwargs.get("pretrained", False): msg = ( @@ -182,6 +189,7 @@ def build_model( decoder, head_kwargs, patch_size=patch_size, + img_size=img_size, necks=neck_list, decoder_includes_head=decoder_includes_head, rescale=rescale, @@ -208,6 +216,7 @@ def build_model( decoder, head_kwargs, patch_size=patch_size, + img_size=img_size, necks=neck_list, decoder_includes_head=decoder_includes_head, rescale=rescale, @@ -221,6 +230,7 @@ def _build_appropriate_model( decoder: nn.Module, head_kwargs: dict, patch_size: int, + img_size:int, decoder_includes_head: bool = False, necks: list[Neck] | None = None, rescale: bool = True, # noqa: FBT001, FBT002 @@ -237,6 +247,7 @@ def _build_appropriate_model( decoder, head_kwargs, patch_size=patch_size, + img_size=img_size, decoder_includes_head=decoder_includes_head, neck=neck_module, rescale=rescale, @@ -249,6 +260,7 @@ def _build_appropriate_model( decoder, head_kwargs, patch_size=patch_size, + img_size=img_size, decoder_includes_head=decoder_includes_head, neck=neck_module, auxiliary_heads=auxiliary_heads, From 6178b47a6984471cd231c84e13a0721e4bbd101e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 15:37:18 -0300 Subject: [PATCH 15/39] conditional cropping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/pixel_wise_model.py | 18 +++++++++++++++--- terratorch/models/scalar_output_model.py | 12 ++++++++++-- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/terratorch/models/pixel_wise_model.py b/terratorch/models/pixel_wise_model.py index 5b6f2d41..9b75b3cd 100644 --- a/terratorch/models/pixel_wise_model.py +++ b/terratorch/models/pixel_wise_model.py @@ -1,5 +1,5 @@ # Copyright contributors to the Terratorch project - +import logging import torch import torch.nn.functional as F # noqa: N812 import torchvision.transforms as transforms @@ -28,6 +28,7 @@ def __init__( decoder: nn.Module, head_kwargs: dict, patch_size: int = None, + img_size:tuple = None, decoder_includes_head: bool = False, auxiliary_heads: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None = None, neck: nn.Module | None = None, @@ -72,6 +73,7 @@ def __init__( self.neck = neck self.rescale = rescale self.patch_size = patch_size + self.img_size = (img_size, img_size) def freeze_encoder(self): freeze_module(self.encoder) @@ -88,6 +90,7 @@ def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: return x else: x = pad_images(x, self.patch_size, "constant") + return x else: # If patch size is not provided, the user should guarantee the @@ -96,7 +99,14 @@ def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: def _crop_image_when_necessary(self, x:torch.Tensor, size:tuple) -> torch.Tensor: - return transforms.CenterCrop(size)(x) + if all(self.img_size): + + x_cropped = transforms.CenterCrop(self.img_size)(x) + return x_cropped + else: + logging.getLogger("terratorch").info("Cropping could be necessary to adjust images, so define `img_size` in your config file \ + if you get a shape mismatch.") + return x @staticmethod def _check_for_single_channel_and_squeeze(x): @@ -119,7 +129,6 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: # TODO make this verification optional to avoid unnecessary repetition x = self.check_input_shape(x) - features = self.encoder(x, **kwargs) ## only for backwards compatibility with pre-neck times. @@ -135,6 +144,7 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: if self.rescale and mask.shape[-2:] != input_size: mask = F.interpolate(mask, size=input_size, mode="bilinear") mask = self._check_for_single_channel_and_squeeze(mask) + aux_outputs = {} for name, decoder in self.aux_heads.items(): aux_output = decoder([f.clone() for f in features]) @@ -144,6 +154,8 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: aux_outputs[name] = aux_output mask = self._crop_image_when_necessary(mask, input_size) + print(mask.shape) + print(aux_outputs) return ModelOutput(output=mask, auxiliary_heads=aux_outputs) def _get_head(self, task: str, input_embed_dim: int, head_kwargs): diff --git a/terratorch/models/scalar_output_model.py b/terratorch/models/scalar_output_model.py index b93be616..8d8a365a 100644 --- a/terratorch/models/scalar_output_model.py +++ b/terratorch/models/scalar_output_model.py @@ -26,6 +26,7 @@ def __init__( decoder: nn.Module, head_kwargs: dict, patch_size:int = None, + img_size:tuple = None, decoder_includes_head: bool = False, auxiliary_heads: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None = None, neck: nn.Module | None = None, @@ -65,6 +66,7 @@ def __init__( self.neck = neck self.patch_size = patch_size + self.img_size = (img_size, img_size) def freeze_encoder(self): freeze_module(self.encoder) @@ -89,7 +91,13 @@ def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: # noqa: ARG002 def _crop_image_when_necessary(self, x:torch.Tensor, size:tuple) -> torch.Tensor: - return transforms.CenterCrop(size)(x) + if self.img_size: + + return transforms.CenterCrop(self.img_size)(x) + else: + raise NameError("Cropping is necessary to adjust images, so define `img_size` in your config file.") + logging.getLogger("terratorch").info("Cropping could be necessary to adjust images, so define `img_size` in your config file \ + if you get a shape mismatch.") def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: """Sequentially pass `x` through model`s encoder, decoder and heads""" @@ -111,7 +119,7 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: features = prepare(features) decoder_output = self.decoder([f.clone() for f in features]) - decoder_output = self._crop_image_when_necessary(decoder_output, input_size) + decoder_output = self._crop_image_when_necessary(decoder_output, pad, input_size) mask = self.head(decoder_output) aux_outputs = {} From 8cb6d2699a0bda576a10566bcec5bcf9a8eb50ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 15:37:42 -0300 Subject: [PATCH 16/39] config for testing nondivisible images MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- ...netune_prithvi_pixelwise_nondivisible.yaml | 151 ++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml diff --git a/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml new file mode 100644 index 00000000..f2705c1f --- /dev/null +++ b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml @@ -0,0 +1,151 @@ +# lightning.pytorch==2.1.1 +seed_everything: 42 +trainer: + accelerator: cpu + strategy: auto + devices: auto + num_nodes: 1 + # precision: 16-mixed + logger: + class_path: TensorBoardLogger + init_args: + save_dir: tests/ + name: all_ecos_random + callbacks: + - class_path: RichProgressBar + - class_path: LearningRateMonitor + init_args: + logging_interval: epoch + - class_path: EarlyStopping + init_args: + monitor: val/loss + patience: 100 + max_epochs: 1 + check_val_every_n_epoch: 1 + log_every_n_steps: 20 + enable_checkpointing: true + default_root_dir: tests/ +data: + class_path: GenericNonGeoPixelwiseRegressionDataModule + init_args: + batch_size: 1 + num_workers: 1 + train_transform: + #- class_path: albumentations.HorizontalFlip + # init_args: + # p: 0.5 + #- class_path: albumentations.Rotate + # init_args: + # limit: 30 + # border_mode: 0 # cv2.BORDER_CONSTANT + # value: 0 + # # mask_value: 1 + # p: 0.5 + - class_path: ToTensorV2 + dataset_bands: + - 0 + - BLUE + - GREEN + - RED + - NIR_NARROW + - SWIR_1 + - SWIR_2 + - 1 + - 2 + - 3 + - 4 + output_bands: + - BLUE + - GREEN + - RED + - NIR_NARROW + - SWIR_1 + - SWIR_2 + rgb_indices: + - 2 + - 1 + - 0 + train_data_root: tests/resources/inputs_extra + train_label_data_root: tests/resources/inputs_extra + val_data_root: tests/resources/inputs_extra + val_label_data_root: tests/resources/inputs_extra + test_data_root: tests/resources/inputs_extra + test_label_data_root: tests/resources/inputs_extra + img_grep: "regression*input*.tif" + label_grep: "regression*label*.tif" + means: + - 547.36707 + - 898.5121 + - 1020.9082 + - 2665.5352 + - 2340.584 + - 1610.1407 + stds: + - 411.4701 + - 558.54065 + - 815.94025 + - 812.4403 + - 1113.7145 + - 1067.641 + no_label_replace: -1 + no_data_replace: 0 + +model: + class_path: terratorch.tasks.PixelwiseRegressionTask + init_args: + model_args: + decoder: UperNetDecoder + pretrained: false + backbone: prithvi_eo_v2_300 + # backbone_pretrained_cfg_overlay: + # file: tests/prithvi_vit_300.pt + backbone_drop_path_rate: 0.3 + backbone_img_size: 224 + # backbone_window_size: 8 + decoder_channels: 64 + num_frames: 1 + in_channels: 6 + bands: + - BLUE + - GREEN + - RED + - NIR_NARROW + - SWIR_1 + - SWIR_2 + head_dropout: 0.5708022831486758 + head_final_act: torch.nn.ReLU + head_learned_upscale_layers: 2 + loss: rmse + #aux_heads: + # - name: aux_head + # decoder: IdentityDecoder + # decoder_args: + # decoder_out_index: 2 + # head_dropout: 0,5 + # head_channel_list: + # - 64 + # head_final_act: torch.nn.ReLU + #aux_loss: + # aux_head: 0.4 + ignore_index: -1 + freeze_backbone: true + freeze_decoder: false + model_factory: PrithviModelFactory + + # uncomment this block for tiled inference + # tiled_inference_parameters: + # h_crop: 224 + # h_stride: 192 + # w_crop: 224 + # w_stride: 192 + # average_patches: true +optimizer: + class_path: torch.optim.AdamW + init_args: + lr: 0.00013524680528283027 + weight_decay: 0.047782217873995426 +lr_scheduler: + class_path: ReduceLROnPlateau + init_args: + monitor: val/loss + From cb62e56d2857f52b798d31b7db7f6d39a9a47c8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 15:38:16 -0300 Subject: [PATCH 17/39] minor adjusts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- examples/scripts/create_images.py | 26 ++++++++++++++++++++++---- terratorch/models/utils.py | 2 +- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/examples/scripts/create_images.py b/examples/scripts/create_images.py index 03f66d83..5c195847 100644 --- a/examples/scripts/create_images.py +++ b/examples/scripts/create_images.py @@ -4,6 +4,8 @@ import numpy as np import tifffile as tiff from argparse import ArgumentParser +from osgeo import gdal +from osgeo import osr parser = ArgumentParser() parser.add_argument("--input_file") @@ -17,6 +19,12 @@ pad_limit = 4 +# config +GDAL_DATA_TYPE = gdal.GDT_Int32 +GEOTIFF_DRIVER_NAME = r'GTiff' +NO_DATA = 15 +SPATIAL_REFERENCE_SYSTEM_WKID = 4326 + for c in range(n_copies): pad = random.randint(1, pad_limit) @@ -26,9 +34,19 @@ imarray = tiff.imread(input_file) im_shape = imarray.shape im_shape_ext = tuple([i+2*pad for i in list(im_shape[:-1])]) + (im_shape[-1],) - print(im_shape_ext) + #print(im_shape_ext) output = np.zeros(im_shape_ext) - print(output.shape) + #print(output.shape) output[pad:-pad, pad:-pad, :] = imarray - print(output.shape) - tiff.imwrite(output_file, output) + #print(output.shape) + #tiff.imwrite(output_file, output) + + # create driver + driver = gdal.GetDriverByName(GEOTIFF_DRIVER_NAME) + + output_raster = driver.Create(output_file, + output.shape[1], + output.shape[0], + output.shape[-1], + eType = GDAL_DATA_TYPE) + diff --git a/terratorch/models/utils.py b/terratorch/models/utils.py index 9815160a..5704eb69 100644 --- a/terratorch/models/utils.py +++ b/terratorch/models/utils.py @@ -1,5 +1,5 @@ -import torch from torch import nn, Tensor +import torch class DecoderNotFoundError(Exception): pass From a0cac1ce87179c71becd247849f89cb29616568c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 15:39:53 -0300 Subject: [PATCH 18/39] minor adjusts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- .../configs/manufactured-finetune_prithvi_vit_100.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/resources/configs/manufactured-finetune_prithvi_vit_100.yaml b/tests/resources/configs/manufactured-finetune_prithvi_vit_100.yaml index 12c6b19f..3a696132 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_vit_100.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_vit_100.yaml @@ -97,8 +97,7 @@ model: decoder: UperNetDecoder pretrained: false backbone: prithvi_vit_100 - backbone_patch_size: 15 - #backbone_pretrained_cfg_overlay: + #backbone_pretrained_cfg_overlay: #file: tests/all_ecos_random/version_0/checkpoints/epoch=0_state_dict.ckpt #tests/prithvi_vit_100.pt backbone_drop_path_rate: 0.3 num_frames: 1 From ca881b4f0e93d46be7fe0ff510811f20f56b9378 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 16:39:37 -0300 Subject: [PATCH 19/39] Input files to be used for testing the padding for non-divisible images MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- .../inputs_extra/regression_test_input_0.tif | Bin 0 -> 2117596 bytes .../inputs_extra/regression_test_input_1.tif | Bin 0 -> 2117596 bytes .../inputs_extra/regression_test_input_2.tif | Bin 0 -> 2117596 bytes .../inputs_extra/regression_test_input_3.tif | Bin 0 -> 2117596 bytes .../inputs_extra/regression_test_label_0.tif | Bin 0 -> 9057 bytes .../inputs_extra/regression_test_label_1.tif | Bin 0 -> 9057 bytes .../inputs_extra/regression_test_label_2.tif | Bin 0 -> 9057 bytes .../inputs_extra/regression_test_label_3.tif | Bin 0 -> 9057 bytes 8 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/resources/inputs_extra/regression_test_input_0.tif create mode 100644 tests/resources/inputs_extra/regression_test_input_1.tif create mode 100644 tests/resources/inputs_extra/regression_test_input_2.tif create mode 100644 tests/resources/inputs_extra/regression_test_input_3.tif create mode 100644 tests/resources/inputs_extra/regression_test_label_0.tif create mode 100644 tests/resources/inputs_extra/regression_test_label_1.tif create mode 100644 tests/resources/inputs_extra/regression_test_label_2.tif create mode 100644 tests/resources/inputs_extra/regression_test_label_3.tif diff --git a/tests/resources/inputs_extra/regression_test_input_0.tif b/tests/resources/inputs_extra/regression_test_input_0.tif new file mode 100644 index 0000000000000000000000000000000000000000..aceab23e751e63c76c380cdfcbd6f4e42d8d8bc0 GIT binary patch literal 2117596 zcmeIyZKz~*9S895+~rZfg30^fkFn#v@bTuz@ZEj zT9^$KGRPr?3>0Qxte}t$87QO z8U7?0{3_}HDe09;>3+79&Z$z`-!7%~XerI-OKH4OO8vu9YS)dWdfQkk_l>2vIF|C| zu?+t(mcc*A(l5r-`|^0Y-yBcpN8@RC#?yLrJk9sU)7YL!y*iQF-4m%kJdw&X6DfKV zDPNt)aBVV!FHWZa^~v;pFq!TzCe!)dWZHk9OlxT>%^RoExN|D?2d7edaw^rAr&4)m zD#iSC$|t8Yykk0p?@p)xv+4AHGo9{Vrqh|4N&EOrT4!d`JU^4h<1?whIFs62GpUZv zrgFnPt(hyuFlSdO772%Nd?o&fxrV`j0QC_u_K8Z!M=YwvzS@D`}lxN%P!F8jr1{ z{=!OXZ?2^J(Ml@Uuco+tHRbzPGrYK(!R6KTUtdk{gVl8R*V4IlE$w^P(z>vg=B2eX zURz83{k7C~)>FM@J(YXbQ#`Vs^0Vt1_SQ4Fx}N^}MtV1Iq+8!e=b??XpWaC8%0`-l zjWm`wQ@?35wYxS`ZEmLW)MkoTHdB6gGsF3<3{GyPe|9Uq2e#6EVk@1Ow$grkE3N76 zG*4`&@zw3rzq6g%Pq$P3^>!+M-cB*Tlk(?wGCaML!MUCEAKOXqg`ITY+)3x7owTpt zP3!jEH1FR{yaYOXNxp`U!?wzBDIbERKK*J$~X2?{BS?zU+!mkWj}+#e)`J?>D_dY?p+7z zG!N2#>L9IG4$^%0AdT{2>R&ia?Q4gre(x}q_F;-^TKv z>(d>c2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U hAV7cs0RjXF5FkK+009C72oNAZfB*pk1pe0o{{|cU_K5%h literal 0 HcmV?d00001 diff --git a/tests/resources/inputs_extra/regression_test_input_1.tif b/tests/resources/inputs_extra/regression_test_input_1.tif new file mode 100644 index 0000000000000000000000000000000000000000..aceab23e751e63c76c380cdfcbd6f4e42d8d8bc0 GIT binary patch literal 2117596 zcmeIyZKz~*9S895+~rZfg30^fkFn#v@bTuz@ZEj zT9^$KGRPr?3>0Qxte}t$87QO z8U7?0{3_}HDe09;>3+79&Z$z`-!7%~XerI-OKH4OO8vu9YS)dWdfQkk_l>2vIF|C| zu?+t(mcc*A(l5r-`|^0Y-yBcpN8@RC#?yLrJk9sU)7YL!y*iQF-4m%kJdw&X6DfKV zDPNt)aBVV!FHWZa^~v;pFq!TzCe!)dWZHk9OlxT>%^RoExN|D?2d7edaw^rAr&4)m zD#iSC$|t8Yykk0p?@p)xv+4AHGo9{Vrqh|4N&EOrT4!d`JU^4h<1?whIFs62GpUZv zrgFnPt(hyuFlSdO772%Nd?o&fxrV`j0QC_u_K8Z!M=YwvzS@D`}lxN%P!F8jr1{ z{=!OXZ?2^J(Ml@Uuco+tHRbzPGrYK(!R6KTUtdk{gVl8R*V4IlE$w^P(z>vg=B2eX zURz83{k7C~)>FM@J(YXbQ#`Vs^0Vt1_SQ4Fx}N^}MtV1Iq+8!e=b??XpWaC8%0`-l zjWm`wQ@?35wYxS`ZEmLW)MkoTHdB6gGsF3<3{GyPe|9Uq2e#6EVk@1Ow$grkE3N76 zG*4`&@zw3rzq6g%Pq$P3^>!+M-cB*Tlk(?wGCaML!MUCEAKOXqg`ITY+)3x7owTpt zP3!jEH1FR{yaYOXNxp`U!?wzBDIbERKK*J$~X2?{BS?zU+!mkWj}+#e)`J?>D_dY?p+7z zG!N2#>L9IG4$^%0AdT{2>R&ia?Q4gre(x}q_F;-^TKv z>(d>c2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U hAV7cs0RjXF5FkK+009C72oNAZfB*pk1pe0o{{|cU_K5%h literal 0 HcmV?d00001 diff --git a/tests/resources/inputs_extra/regression_test_input_2.tif b/tests/resources/inputs_extra/regression_test_input_2.tif new file mode 100644 index 0000000000000000000000000000000000000000..aceab23e751e63c76c380cdfcbd6f4e42d8d8bc0 GIT binary patch literal 2117596 zcmeIyZKz~*9S895+~rZfg30^fkFn#v@bTuz@ZEj zT9^$KGRPr?3>0Qxte}t$87QO z8U7?0{3_}HDe09;>3+79&Z$z`-!7%~XerI-OKH4OO8vu9YS)dWdfQkk_l>2vIF|C| zu?+t(mcc*A(l5r-`|^0Y-yBcpN8@RC#?yLrJk9sU)7YL!y*iQF-4m%kJdw&X6DfKV zDPNt)aBVV!FHWZa^~v;pFq!TzCe!)dWZHk9OlxT>%^RoExN|D?2d7edaw^rAr&4)m zD#iSC$|t8Yykk0p?@p)xv+4AHGo9{Vrqh|4N&EOrT4!d`JU^4h<1?whIFs62GpUZv zrgFnPt(hyuFlSdO772%Nd?o&fxrV`j0QC_u_K8Z!M=YwvzS@D`}lxN%P!F8jr1{ z{=!OXZ?2^J(Ml@Uuco+tHRbzPGrYK(!R6KTUtdk{gVl8R*V4IlE$w^P(z>vg=B2eX zURz83{k7C~)>FM@J(YXbQ#`Vs^0Vt1_SQ4Fx}N^}MtV1Iq+8!e=b??XpWaC8%0`-l zjWm`wQ@?35wYxS`ZEmLW)MkoTHdB6gGsF3<3{GyPe|9Uq2e#6EVk@1Ow$grkE3N76 zG*4`&@zw3rzq6g%Pq$P3^>!+M-cB*Tlk(?wGCaML!MUCEAKOXqg`ITY+)3x7owTpt zP3!jEH1FR{yaYOXNxp`U!?wzBDIbERKK*J$~X2?{BS?zU+!mkWj}+#e)`J?>D_dY?p+7z zG!N2#>L9IG4$^%0AdT{2>R&ia?Q4gre(x}q_F;-^TKv z>(d>c2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U hAV7cs0RjXF5FkK+009C72oNAZfB*pk1pe0o{{|cU_K5%h literal 0 HcmV?d00001 diff --git a/tests/resources/inputs_extra/regression_test_input_3.tif b/tests/resources/inputs_extra/regression_test_input_3.tif new file mode 100644 index 0000000000000000000000000000000000000000..aceab23e751e63c76c380cdfcbd6f4e42d8d8bc0 GIT binary patch literal 2117596 zcmeIyZKz~*9S895+~rZfg30^fkFn#v@bTuz@ZEj zT9^$KGRPr?3>0Qxte}t$87QO z8U7?0{3_}HDe09;>3+79&Z$z`-!7%~XerI-OKH4OO8vu9YS)dWdfQkk_l>2vIF|C| zu?+t(mcc*A(l5r-`|^0Y-yBcpN8@RC#?yLrJk9sU)7YL!y*iQF-4m%kJdw&X6DfKV zDPNt)aBVV!FHWZa^~v;pFq!TzCe!)dWZHk9OlxT>%^RoExN|D?2d7edaw^rAr&4)m zD#iSC$|t8Yykk0p?@p)xv+4AHGo9{Vrqh|4N&EOrT4!d`JU^4h<1?whIFs62GpUZv zrgFnPt(hyuFlSdO772%Nd?o&fxrV`j0QC_u_K8Z!M=YwvzS@D`}lxN%P!F8jr1{ z{=!OXZ?2^J(Ml@Uuco+tHRbzPGrYK(!R6KTUtdk{gVl8R*V4IlE$w^P(z>vg=B2eX zURz83{k7C~)>FM@J(YXbQ#`Vs^0Vt1_SQ4Fx}N^}MtV1Iq+8!e=b??XpWaC8%0`-l zjWm`wQ@?35wYxS`ZEmLW)MkoTHdB6gGsF3<3{GyPe|9Uq2e#6EVk@1Ow$grkE3N76 zG*4`&@zw3rzq6g%Pq$P3^>!+M-cB*Tlk(?wGCaML!MUCEAKOXqg`ITY+)3x7owTpt zP3!jEH1FR{yaYOXNxp`U!?wzBDIbERKK*J$~X2?{BS?zU+!mkWj}+#e)`J?>D_dY?p+7z zG!N2#>L9IG4$^%0AdT{2>R&ia?Q4gre(x}q_F;-^TKv z>(d>c2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U hAV7cs0RjXF5FkK+009C72oNAZfB*pk1pe0o{{|cU_K5%h literal 0 HcmV?d00001 diff --git a/tests/resources/inputs_extra/regression_test_label_0.tif b/tests/resources/inputs_extra/regression_test_label_0.tif new file mode 100644 index 0000000000000000000000000000000000000000..521be614b99910cd274cdc439a74bd43c423c1e0 GIT binary patch literal 9057 zcmeHM2T+r1m;R^$0V&dJq{tPd2}Kte|#^eQE^5Re2!ngU8wu>lGKDq;W;R9a{O z6a*CrC{>!$n}E{x#p}QS-JPA?e`dWiJF{lyYw)h-!PL=LR>(rfe&ndxz7I6rUmyney;fdY~H`D zQT>Z;@zbUNYYxWJL;E@p{Lc$<^bj?$UjiGSNCCltJp=5rL~@89*ka@m6my^a#~K4* z@4xuL{r^eC!5U2=5L|2|*jc zfe0Q5n&t)H2_8xo4nci~e>@6!Q`7j<`iK6<_5WHsk8o@5-?8K|_@Vc0vLB}lHV5{) zZ-e9bp~)%B62TTAGMchP7{m@ygK!{&t_VS3-(-+yH02L{RLDl(0Ud2KZir@vEhaeiN?E|#b7YJk0;g*7Y3>UQp5zp_diLxg(yUB$2xT^A{ z=?j~^5RTG6lF{aIJi;;$DRIN-i1~~AbzUcLb$H1}^nAjp-ZlNh;-&CzqF!FNkK8#c z!EBQU7Gjn!3)G6NANBa2IM?f1i!8M{W>qZ`)#_Q^?|(AVDsRaDxxJ)ybwS;9*vmnb zd}QyZ9UQ?)+NMTi50=n48l-@=7UH|y=62k+rchn<`m2c$MQop&YIe7m+?m=V(fWn& zKBJXz*7?Yie&3UJwT1Owl^^CX%DBF9T1FRGtj|Gpiq9^FIv+dIF#^E-Ba zXV!1r`HjlIYu4|I{+sXozhQ%Ur73d9+xQnzq=AjEyHUBRBeQdR>d9sVGd|QU*7N88 zPf$a>G@d*IdEXM^R!E&Qs{a@@BSU<%)3U9xqX5q(qI=N5v3Q?tIhVBKH@2rxs+rv$ zp=X8;X6om@|AOZB_rxO~m5{|a(+xLSRQ1hQDPN9SMFUnG&a1k|fomNLdZBVfRQ6iu z(lb(^DEt&>j`Q*uzC^YE-t>}1a}Eb995OzAQ|9$Atk!0_M#b+Jq3ps&2r z&hQAjemGEmkYAMXa_#Wr>!-5ubm?zC2Sw~irV13lpG7MpbEe&DNlRf{yHbpp*W;_e zfw^A!Lz4~?F;$CB^|RC4xIM>_uGRG|^=^8ofm(2r^1FuV9E?gcb(@E>p*!U;Z9`zD z4kulF@@+aHgTK>@|6>34?0b!64cvYKQ(UmJ6I3@UwNbP0MZzcP@H?JTde25wTOE;8W3*(jx*Hh=Gfvl6~4v) z7={HprOyS@d*;HG)%Z!t{|v*6@&b;STvXBQ;#OqcUOj1JWv`xz?i-{LcxZ=0BTa>l zssZ752-d)qbX6@To)Aqd``~YLAX9)^O&jZszx7RECS5Zwp z6w@*h^vyxiR5r_Bg}l2u3S)D*Z|dt72t)Pi{>5-CzuHLh>xur0*#-U^(OruhSnP);LTbUsMyAg!w=B|=V! zu`!5JmbuYPSeK=RSw)S5QAAt)kj~LY^1m_7Ed*Wm-C_C=qPC~p)60<_MhOd)w)mhE!OL@*Z^5%6WTdpwm-?sGeu)9JOU#Be0lB(KLCZHGMl z)TW3u8^%3f*yT?t%${*WwXPNSY#Mm|;jkxWBs zf*Ay;@7}(D579KH-I1VC9sledQkT}ON~^u}Sevey`Izn@om6-@IUQmS(aSbw7QvzN zw?L_964^Lsw7~=OM{?(`SwFP7-zSE?H5pKA{iI*~YcbpPcCXS8$HH!VJQ?z=9F$lu zzSj2zQ%P-}GK2dt9{yrv$(C1Si|%}rYY=A*zc^1y)A-66-up)rn4Fw~Ip;3Lg?)s3 zgq%~!2{F0<)(6dvv#zqh_xYWOR1N(SSV^50W^b=G$vM(J7jADd!jV`r5%l~{l#S@k zuE{kt-sTVe*mqOw=Xe)dQO|x1arOivJI7YeVRw)CtQ~#%^?R*=KvCEH)oGQcXwx>8@tS}b zX0>3^HS;^eQR4kKKm>OdgCLb)wq{I+Vv5%$V~^*>s|KwgqN;T{a8;?p5#_{5W?0r0@vF; zkKc@>)urET^paINHk@iw(Bt{c&crd-{$ZaV`lCcbm;bXt-whjP@n$ZQ!Fl<}C;`}| z#(jtg@Z!jBpIU>bR%p@1bWL^)>QLdG3P$EOmDB`8Q#<=tH*5LpZ7OAQx+8EIF$g67 z-nYnF_1x*j?QivqJ!E7Ag!mS@#*!QbWkZ?+AEoA!C-C~f4YP+TCib)yFd+$B&}xG#Tl$S-JTBkvsYVHwxcQrC9=fP#QBiwAr#%}`;H=q zm(C~!@u69UxTeu%TL(RPj!OWM2Y3s$B{HwdYBDA{x~XPd_yd;Wvg@RIt%$EVpO-q< z9954j0Owz(vEUs>^=vALP*AoRN#ybx@{h3i8XLbpx6AWa8#fp8z z@S~%a*BSfltAPk*xj;q@4>{1VR4#3tOiB0tU~^iH360LuniU9XUmB5=B3UE078!T% zIF42tW@cj99FoU?c@1^AO=de@u(2uddqh))%AIF$_^-un=Rc0pFnO&hA=zVfTzJ!) z5BOW>S{GV$wvw{V@lKEXefq>&_UW1JvGrn;i~&@YgFi;xf1jS6zUcB6CpLzuA9hY! zHeaa?C435EmUFq61q$Sfq6)hOz#@;3GuUA-#oR9M5N?%)OwB@oDVFPJxw6xGXF|95 z^hiEc0}EmHwUx7xHG?{{=;4FAb4JHsByKx;ZrvtKhXQ^puU`Ca_N&rRT{FB**1=D? zPVu(N$))A6nk=`^Z@a$CZAa$di(Zpfw{Xz{^|e1vA$D^|y;MjzDEV&r4u!h39}K2Z z8pj1|OiK}`ZR}jiLmymXm8I8Vx~fap9Jp=7)SPZ?JS!%4$BLRykI9s(dHE63sZ=ej zDyweLT!aAe?OH5ccZDko8y|BYBP3VY#=>bBES{u&gV2+I=vSY|zdooxA?wnytx@6N zZIK5DYvT8ZV3ZB}iji8#5#VQ9$7-MYfcyM@4o#@GQ z|Ed+mY)OpxU+MDcL#cX|Bh_Po7VqNq@$Ugo!PTpKkw)v6zeSBmjl5p~b#$9@R|B<1 zOF`(mi4BY)r>6iQbc)G}exg5`ritm8ER8l|PLZ(@v8FY}0$Y-R=^zI6hDQz|CW;ExvP|F6DN9>-M@&dVl-^ zvX%XEKIj5DOXr6Zeao17=YSh3l%4Ow$!(-<9X$5vorOT79ZM286NRePx!78}i3sB}IB26e0r^BCj~X zZnjK^M%1b$TI72hGxZG8Nl2&!*)UU1rG{+-^4SrSuEj}sd9m#C7;=Z#i1sUxY)+PNu>q;%4 z=$6$|O5Ie>aL=x?Kpel=51U%=J0Rl7bG0+`!6UBT{7*gmIup66~$XmnY64%$Pu z%npz8o7g^pKb@al0d3;`cDKtrpSqyQNa(?vIXCU42F}>>UO##J?5QH$^O1#3P@hlM{`4 zU+1P=RXvQyug>jhFy0~GB_PDSF^Mta8x(3Wfh(432w~ys7|OD0n))So7=2^lpeln7 z)13A}*`iHbTWzLoUAE?AV{y8cGHzDY%jZ9$*z{!FgZPMMb7%qm<8+OnDQdQWYxoP9 z9M%!!RN^46=oARN%5X2{Keck}#zx)o3I&%7RBNfnIxVf@mkP zA)X?5&A1XNWpjJ-%@#Ua#DYAJYIh^*MPk~iK}jL9)TRmP0hKU)iL0PcaAEr}vbk@( zCD(?iQ6@RvGkzTak9Z1td_cLjZuPGi@W=4rdS~@tj5-Vlly|yCG<;y`r&NvFNPjcF zhQa-rKj=tga>{P(;Cz2a?1@(ShUp?2D&IYMueU;>wXwUyRn?$@3XfL%F$k$!T%&-F zQV`g+>e3FaUlTa&>_bUNvlIaYpI3saiRl&*_;l5fLHiGlQ{gdbaZ*gWOpo-q_;pfD zn3|U@EjWqkasFIQl56T*ij(b}sFvksgbfT*?fKA9l*#qm!~U~^zMe`by1f2{llCQ3 z#kd#$o=cyCzFEJwc%Y7-Tw0!4HeW@(n+Q`<%>g}F`%+kqLM!w7dmI3h44NO}I5;n15n3W@^yah08hq;c<$nx&LNl2xEsNp>SxHzrd>MD)Do1U$V?2nYQ(L#ToJg$=^TS&(<`SBIV>1+ z{<$rYMHns8Ztj;T+8XS7ge|j0hvBFS8A8&xjU_JBm1fj9Q_ApN^1J`_5oJgcXY=Sk z;bd-gi>`)S(8KElx&-lp&k*8@+qd6_A`xA-KF$dyj#r~M`S zr1!O7)yEQfo`N^?fz^X}j_Tg^+0$Y)mgw2e-5rb`H&?jP6c1l~n_I8~_Mg)*_ik<@ zQ_#ZAr5wNNWgdF7MDi)}4vC-4+rhKRzxd-UMrnt*fvy`yZJx2X>!X@Cwy>z*?K*DN zJ{s_f@ko(+ENL<{5_xdit!j8-9SbIo4!l|VD=Pf;QdR9vQz5m(%8C=xz8|)BbGk^Y z>)-0T>B-3IONDmh<>rkU$TUiQ?LdkouTnIoxr>7=3YB1JlJMT2R9Gq>Jgjk%fO+|r z*D{{6=pq*_;J_!?h-Q!7c!=rEUVL|B(t{DrW)mkLJPPUv-se{?Lds(ohfUjqS#LbO G3;#EW>qeUZ literal 0 HcmV?d00001 diff --git a/tests/resources/inputs_extra/regression_test_label_1.tif b/tests/resources/inputs_extra/regression_test_label_1.tif new file mode 100644 index 0000000000000000000000000000000000000000..521be614b99910cd274cdc439a74bd43c423c1e0 GIT binary patch literal 9057 zcmeHM2T+r1m;R^$0V&dJq{tPd2}Kte|#^eQE^5Re2!ngU8wu>lGKDq;W;R9a{O z6a*CrC{>!$n}E{x#p}QS-JPA?e`dWiJF{lyYw)h-!PL=LR>(rfe&ndxz7I6rUmyney;fdY~H`D zQT>Z;@zbUNYYxWJL;E@p{Lc$<^bj?$UjiGSNCCltJp=5rL~@89*ka@m6my^a#~K4* z@4xuL{r^eC!5U2=5L|2|*jc zfe0Q5n&t)H2_8xo4nci~e>@6!Q`7j<`iK6<_5WHsk8o@5-?8K|_@Vc0vLB}lHV5{) zZ-e9bp~)%B62TTAGMchP7{m@ygK!{&t_VS3-(-+yH02L{RLDl(0Ud2KZir@vEhaeiN?E|#b7YJk0;g*7Y3>UQp5zp_diLxg(yUB$2xT^A{ z=?j~^5RTG6lF{aIJi;;$DRIN-i1~~AbzUcLb$H1}^nAjp-ZlNh;-&CzqF!FNkK8#c z!EBQU7Gjn!3)G6NANBa2IM?f1i!8M{W>qZ`)#_Q^?|(AVDsRaDxxJ)ybwS;9*vmnb zd}QyZ9UQ?)+NMTi50=n48l-@=7UH|y=62k+rchn<`m2c$MQop&YIe7m+?m=V(fWn& zKBJXz*7?Yie&3UJwT1Owl^^CX%DBF9T1FRGtj|Gpiq9^FIv+dIF#^E-Ba zXV!1r`HjlIYu4|I{+sXozhQ%Ur73d9+xQnzq=AjEyHUBRBeQdR>d9sVGd|QU*7N88 zPf$a>G@d*IdEXM^R!E&Qs{a@@BSU<%)3U9xqX5q(qI=N5v3Q?tIhVBKH@2rxs+rv$ zp=X8;X6om@|AOZB_rxO~m5{|a(+xLSRQ1hQDPN9SMFUnG&a1k|fomNLdZBVfRQ6iu z(lb(^DEt&>j`Q*uzC^YE-t>}1a}Eb995OzAQ|9$Atk!0_M#b+Jq3ps&2r z&hQAjemGEmkYAMXa_#Wr>!-5ubm?zC2Sw~irV13lpG7MpbEe&DNlRf{yHbpp*W;_e zfw^A!Lz4~?F;$CB^|RC4xIM>_uGRG|^=^8ofm(2r^1FuV9E?gcb(@E>p*!U;Z9`zD z4kulF@@+aHgTK>@|6>34?0b!64cvYKQ(UmJ6I3@UwNbP0MZzcP@H?JTde25wTOE;8W3*(jx*Hh=Gfvl6~4v) z7={HprOyS@d*;HG)%Z!t{|v*6@&b;STvXBQ;#OqcUOj1JWv`xz?i-{LcxZ=0BTa>l zssZ752-d)qbX6@To)Aqd``~YLAX9)^O&jZszx7RECS5Zwp z6w@*h^vyxiR5r_Bg}l2u3S)D*Z|dt72t)Pi{>5-CzuHLh>xur0*#-U^(OruhSnP);LTbUsMyAg!w=B|=V! zu`!5JmbuYPSeK=RSw)S5QAAt)kj~LY^1m_7Ed*Wm-C_C=qPC~p)60<_MhOd)w)mhE!OL@*Z^5%6WTdpwm-?sGeu)9JOU#Be0lB(KLCZHGMl z)TW3u8^%3f*yT?t%${*WwXPNSY#Mm|;jkxWBs zf*Ay;@7}(D579KH-I1VC9sledQkT}ON~^u}Sevey`Izn@om6-@IUQmS(aSbw7QvzN zw?L_964^Lsw7~=OM{?(`SwFP7-zSE?H5pKA{iI*~YcbpPcCXS8$HH!VJQ?z=9F$lu zzSj2zQ%P-}GK2dt9{yrv$(C1Si|%}rYY=A*zc^1y)A-66-up)rn4Fw~Ip;3Lg?)s3 zgq%~!2{F0<)(6dvv#zqh_xYWOR1N(SSV^50W^b=G$vM(J7jADd!jV`r5%l~{l#S@k zuE{kt-sTVe*mqOw=Xe)dQO|x1arOivJI7YeVRw)CtQ~#%^?R*=KvCEH)oGQcXwx>8@tS}b zX0>3^HS;^eQR4kKKm>OdgCLb)wq{I+Vv5%$V~^*>s|KwgqN;T{a8;?p5#_{5W?0r0@vF; zkKc@>)urET^paINHk@iw(Bt{c&crd-{$ZaV`lCcbm;bXt-whjP@n$ZQ!Fl<}C;`}| z#(jtg@Z!jBpIU>bR%p@1bWL^)>QLdG3P$EOmDB`8Q#<=tH*5LpZ7OAQx+8EIF$g67 z-nYnF_1x*j?QivqJ!E7Ag!mS@#*!QbWkZ?+AEoA!C-C~f4YP+TCib)yFd+$B&}xG#Tl$S-JTBkvsYVHwxcQrC9=fP#QBiwAr#%}`;H=q zm(C~!@u69UxTeu%TL(RPj!OWM2Y3s$B{HwdYBDA{x~XPd_yd;Wvg@RIt%$EVpO-q< z9954j0Owz(vEUs>^=vALP*AoRN#ybx@{h3i8XLbpx6AWa8#fp8z z@S~%a*BSfltAPk*xj;q@4>{1VR4#3tOiB0tU~^iH360LuniU9XUmB5=B3UE078!T% zIF42tW@cj99FoU?c@1^AO=de@u(2uddqh))%AIF$_^-un=Rc0pFnO&hA=zVfTzJ!) z5BOW>S{GV$wvw{V@lKEXefq>&_UW1JvGrn;i~&@YgFi;xf1jS6zUcB6CpLzuA9hY! zHeaa?C435EmUFq61q$Sfq6)hOz#@;3GuUA-#oR9M5N?%)OwB@oDVFPJxw6xGXF|95 z^hiEc0}EmHwUx7xHG?{{=;4FAb4JHsByKx;ZrvtKhXQ^puU`Ca_N&rRT{FB**1=D? zPVu(N$))A6nk=`^Z@a$CZAa$di(Zpfw{Xz{^|e1vA$D^|y;MjzDEV&r4u!h39}K2Z z8pj1|OiK}`ZR}jiLmymXm8I8Vx~fap9Jp=7)SPZ?JS!%4$BLRykI9s(dHE63sZ=ej zDyweLT!aAe?OH5ccZDko8y|BYBP3VY#=>bBES{u&gV2+I=vSY|zdooxA?wnytx@6N zZIK5DYvT8ZV3ZB}iji8#5#VQ9$7-MYfcyM@4o#@GQ z|Ed+mY)OpxU+MDcL#cX|Bh_Po7VqNq@$Ugo!PTpKkw)v6zeSBmjl5p~b#$9@R|B<1 zOF`(mi4BY)r>6iQbc)G}exg5`ritm8ER8l|PLZ(@v8FY}0$Y-R=^zI6hDQz|CW;ExvP|F6DN9>-M@&dVl-^ zvX%XEKIj5DOXr6Zeao17=YSh3l%4Ow$!(-<9X$5vorOT79ZM286NRePx!78}i3sB}IB26e0r^BCj~X zZnjK^M%1b$TI72hGxZG8Nl2&!*)UU1rG{+-^4SrSuEj}sd9m#C7;=Z#i1sUxY)+PNu>q;%4 z=$6$|O5Ie>aL=x?Kpel=51U%=J0Rl7bG0+`!6UBT{7*gmIup66~$XmnY64%$Pu z%npz8o7g^pKb@al0d3;`cDKtrpSqyQNa(?vIXCU42F}>>UO##J?5QH$^O1#3P@hlM{`4 zU+1P=RXvQyug>jhFy0~GB_PDSF^Mta8x(3Wfh(432w~ys7|OD0n))So7=2^lpeln7 z)13A}*`iHbTWzLoUAE?AV{y8cGHzDY%jZ9$*z{!FgZPMMb7%qm<8+OnDQdQWYxoP9 z9M%!!RN^46=oARN%5X2{Keck}#zx)o3I&%7RBNfnIxVf@mkP zA)X?5&A1XNWpjJ-%@#Ua#DYAJYIh^*MPk~iK}jL9)TRmP0hKU)iL0PcaAEr}vbk@( zCD(?iQ6@RvGkzTak9Z1td_cLjZuPGi@W=4rdS~@tj5-Vlly|yCG<;y`r&NvFNPjcF zhQa-rKj=tga>{P(;Cz2a?1@(ShUp?2D&IYMueU;>wXwUyRn?$@3XfL%F$k$!T%&-F zQV`g+>e3FaUlTa&>_bUNvlIaYpI3saiRl&*_;l5fLHiGlQ{gdbaZ*gWOpo-q_;pfD zn3|U@EjWqkasFIQl56T*ij(b}sFvksgbfT*?fKA9l*#qm!~U~^zMe`by1f2{llCQ3 z#kd#$o=cyCzFEJwc%Y7-Tw0!4HeW@(n+Q`<%>g}F`%+kqLM!w7dmI3h44NO}I5;n15n3W@^yah08hq;c<$nx&LNl2xEsNp>SxHzrd>MD)Do1U$V?2nYQ(L#ToJg$=^TS&(<`SBIV>1+ z{<$rYMHns8Ztj;T+8XS7ge|j0hvBFS8A8&xjU_JBm1fj9Q_ApN^1J`_5oJgcXY=Sk z;bd-gi>`)S(8KElx&-lp&k*8@+qd6_A`xA-KF$dyj#r~M`S zr1!O7)yEQfo`N^?fz^X}j_Tg^+0$Y)mgw2e-5rb`H&?jP6c1l~n_I8~_Mg)*_ik<@ zQ_#ZAr5wNNWgdF7MDi)}4vC-4+rhKRzxd-UMrnt*fvy`yZJx2X>!X@Cwy>z*?K*DN zJ{s_f@ko(+ENL<{5_xdit!j8-9SbIo4!l|VD=Pf;QdR9vQz5m(%8C=xz8|)BbGk^Y z>)-0T>B-3IONDmh<>rkU$TUiQ?LdkouTnIoxr>7=3YB1JlJMT2R9Gq>Jgjk%fO+|r z*D{{6=pq*_;J_!?h-Q!7c!=rEUVL|B(t{DrW)mkLJPPUv-se{?Lds(ohfUjqS#LbO G3;#EW>qeUZ literal 0 HcmV?d00001 diff --git a/tests/resources/inputs_extra/regression_test_label_2.tif b/tests/resources/inputs_extra/regression_test_label_2.tif new file mode 100644 index 0000000000000000000000000000000000000000..521be614b99910cd274cdc439a74bd43c423c1e0 GIT binary patch literal 9057 zcmeHM2T+r1m;R^$0V&dJq{tPd2}Kte|#^eQE^5Re2!ngU8wu>lGKDq;W;R9a{O z6a*CrC{>!$n}E{x#p}QS-JPA?e`dWiJF{lyYw)h-!PL=LR>(rfe&ndxz7I6rUmyney;fdY~H`D zQT>Z;@zbUNYYxWJL;E@p{Lc$<^bj?$UjiGSNCCltJp=5rL~@89*ka@m6my^a#~K4* z@4xuL{r^eC!5U2=5L|2|*jc zfe0Q5n&t)H2_8xo4nci~e>@6!Q`7j<`iK6<_5WHsk8o@5-?8K|_@Vc0vLB}lHV5{) zZ-e9bp~)%B62TTAGMchP7{m@ygK!{&t_VS3-(-+yH02L{RLDl(0Ud2KZir@vEhaeiN?E|#b7YJk0;g*7Y3>UQp5zp_diLxg(yUB$2xT^A{ z=?j~^5RTG6lF{aIJi;;$DRIN-i1~~AbzUcLb$H1}^nAjp-ZlNh;-&CzqF!FNkK8#c z!EBQU7Gjn!3)G6NANBa2IM?f1i!8M{W>qZ`)#_Q^?|(AVDsRaDxxJ)ybwS;9*vmnb zd}QyZ9UQ?)+NMTi50=n48l-@=7UH|y=62k+rchn<`m2c$MQop&YIe7m+?m=V(fWn& zKBJXz*7?Yie&3UJwT1Owl^^CX%DBF9T1FRGtj|Gpiq9^FIv+dIF#^E-Ba zXV!1r`HjlIYu4|I{+sXozhQ%Ur73d9+xQnzq=AjEyHUBRBeQdR>d9sVGd|QU*7N88 zPf$a>G@d*IdEXM^R!E&Qs{a@@BSU<%)3U9xqX5q(qI=N5v3Q?tIhVBKH@2rxs+rv$ zp=X8;X6om@|AOZB_rxO~m5{|a(+xLSRQ1hQDPN9SMFUnG&a1k|fomNLdZBVfRQ6iu z(lb(^DEt&>j`Q*uzC^YE-t>}1a}Eb995OzAQ|9$Atk!0_M#b+Jq3ps&2r z&hQAjemGEmkYAMXa_#Wr>!-5ubm?zC2Sw~irV13lpG7MpbEe&DNlRf{yHbpp*W;_e zfw^A!Lz4~?F;$CB^|RC4xIM>_uGRG|^=^8ofm(2r^1FuV9E?gcb(@E>p*!U;Z9`zD z4kulF@@+aHgTK>@|6>34?0b!64cvYKQ(UmJ6I3@UwNbP0MZzcP@H?JTde25wTOE;8W3*(jx*Hh=Gfvl6~4v) z7={HprOyS@d*;HG)%Z!t{|v*6@&b;STvXBQ;#OqcUOj1JWv`xz?i-{LcxZ=0BTa>l zssZ752-d)qbX6@To)Aqd``~YLAX9)^O&jZszx7RECS5Zwp z6w@*h^vyxiR5r_Bg}l2u3S)D*Z|dt72t)Pi{>5-CzuHLh>xur0*#-U^(OruhSnP);LTbUsMyAg!w=B|=V! zu`!5JmbuYPSeK=RSw)S5QAAt)kj~LY^1m_7Ed*Wm-C_C=qPC~p)60<_MhOd)w)mhE!OL@*Z^5%6WTdpwm-?sGeu)9JOU#Be0lB(KLCZHGMl z)TW3u8^%3f*yT?t%${*WwXPNSY#Mm|;jkxWBs zf*Ay;@7}(D579KH-I1VC9sledQkT}ON~^u}Sevey`Izn@om6-@IUQmS(aSbw7QvzN zw?L_964^Lsw7~=OM{?(`SwFP7-zSE?H5pKA{iI*~YcbpPcCXS8$HH!VJQ?z=9F$lu zzSj2zQ%P-}GK2dt9{yrv$(C1Si|%}rYY=A*zc^1y)A-66-up)rn4Fw~Ip;3Lg?)s3 zgq%~!2{F0<)(6dvv#zqh_xYWOR1N(SSV^50W^b=G$vM(J7jADd!jV`r5%l~{l#S@k zuE{kt-sTVe*mqOw=Xe)dQO|x1arOivJI7YeVRw)CtQ~#%^?R*=KvCEH)oGQcXwx>8@tS}b zX0>3^HS;^eQR4kKKm>OdgCLb)wq{I+Vv5%$V~^*>s|KwgqN;T{a8;?p5#_{5W?0r0@vF; zkKc@>)urET^paINHk@iw(Bt{c&crd-{$ZaV`lCcbm;bXt-whjP@n$ZQ!Fl<}C;`}| z#(jtg@Z!jBpIU>bR%p@1bWL^)>QLdG3P$EOmDB`8Q#<=tH*5LpZ7OAQx+8EIF$g67 z-nYnF_1x*j?QivqJ!E7Ag!mS@#*!QbWkZ?+AEoA!C-C~f4YP+TCib)yFd+$B&}xG#Tl$S-JTBkvsYVHwxcQrC9=fP#QBiwAr#%}`;H=q zm(C~!@u69UxTeu%TL(RPj!OWM2Y3s$B{HwdYBDA{x~XPd_yd;Wvg@RIt%$EVpO-q< z9954j0Owz(vEUs>^=vALP*AoRN#ybx@{h3i8XLbpx6AWa8#fp8z z@S~%a*BSfltAPk*xj;q@4>{1VR4#3tOiB0tU~^iH360LuniU9XUmB5=B3UE078!T% zIF42tW@cj99FoU?c@1^AO=de@u(2uddqh))%AIF$_^-un=Rc0pFnO&hA=zVfTzJ!) z5BOW>S{GV$wvw{V@lKEXefq>&_UW1JvGrn;i~&@YgFi;xf1jS6zUcB6CpLzuA9hY! zHeaa?C435EmUFq61q$Sfq6)hOz#@;3GuUA-#oR9M5N?%)OwB@oDVFPJxw6xGXF|95 z^hiEc0}EmHwUx7xHG?{{=;4FAb4JHsByKx;ZrvtKhXQ^puU`Ca_N&rRT{FB**1=D? zPVu(N$))A6nk=`^Z@a$CZAa$di(Zpfw{Xz{^|e1vA$D^|y;MjzDEV&r4u!h39}K2Z z8pj1|OiK}`ZR}jiLmymXm8I8Vx~fap9Jp=7)SPZ?JS!%4$BLRykI9s(dHE63sZ=ej zDyweLT!aAe?OH5ccZDko8y|BYBP3VY#=>bBES{u&gV2+I=vSY|zdooxA?wnytx@6N zZIK5DYvT8ZV3ZB}iji8#5#VQ9$7-MYfcyM@4o#@GQ z|Ed+mY)OpxU+MDcL#cX|Bh_Po7VqNq@$Ugo!PTpKkw)v6zeSBmjl5p~b#$9@R|B<1 zOF`(mi4BY)r>6iQbc)G}exg5`ritm8ER8l|PLZ(@v8FY}0$Y-R=^zI6hDQz|CW;ExvP|F6DN9>-M@&dVl-^ zvX%XEKIj5DOXr6Zeao17=YSh3l%4Ow$!(-<9X$5vorOT79ZM286NRePx!78}i3sB}IB26e0r^BCj~X zZnjK^M%1b$TI72hGxZG8Nl2&!*)UU1rG{+-^4SrSuEj}sd9m#C7;=Z#i1sUxY)+PNu>q;%4 z=$6$|O5Ie>aL=x?Kpel=51U%=J0Rl7bG0+`!6UBT{7*gmIup66~$XmnY64%$Pu z%npz8o7g^pKb@al0d3;`cDKtrpSqyQNa(?vIXCU42F}>>UO##J?5QH$^O1#3P@hlM{`4 zU+1P=RXvQyug>jhFy0~GB_PDSF^Mta8x(3Wfh(432w~ys7|OD0n))So7=2^lpeln7 z)13A}*`iHbTWzLoUAE?AV{y8cGHzDY%jZ9$*z{!FgZPMMb7%qm<8+OnDQdQWYxoP9 z9M%!!RN^46=oARN%5X2{Keck}#zx)o3I&%7RBNfnIxVf@mkP zA)X?5&A1XNWpjJ-%@#Ua#DYAJYIh^*MPk~iK}jL9)TRmP0hKU)iL0PcaAEr}vbk@( zCD(?iQ6@RvGkzTak9Z1td_cLjZuPGi@W=4rdS~@tj5-Vlly|yCG<;y`r&NvFNPjcF zhQa-rKj=tga>{P(;Cz2a?1@(ShUp?2D&IYMueU;>wXwUyRn?$@3XfL%F$k$!T%&-F zQV`g+>e3FaUlTa&>_bUNvlIaYpI3saiRl&*_;l5fLHiGlQ{gdbaZ*gWOpo-q_;pfD zn3|U@EjWqkasFIQl56T*ij(b}sFvksgbfT*?fKA9l*#qm!~U~^zMe`by1f2{llCQ3 z#kd#$o=cyCzFEJwc%Y7-Tw0!4HeW@(n+Q`<%>g}F`%+kqLM!w7dmI3h44NO}I5;n15n3W@^yah08hq;c<$nx&LNl2xEsNp>SxHzrd>MD)Do1U$V?2nYQ(L#ToJg$=^TS&(<`SBIV>1+ z{<$rYMHns8Ztj;T+8XS7ge|j0hvBFS8A8&xjU_JBm1fj9Q_ApN^1J`_5oJgcXY=Sk z;bd-gi>`)S(8KElx&-lp&k*8@+qd6_A`xA-KF$dyj#r~M`S zr1!O7)yEQfo`N^?fz^X}j_Tg^+0$Y)mgw2e-5rb`H&?jP6c1l~n_I8~_Mg)*_ik<@ zQ_#ZAr5wNNWgdF7MDi)}4vC-4+rhKRzxd-UMrnt*fvy`yZJx2X>!X@Cwy>z*?K*DN zJ{s_f@ko(+ENL<{5_xdit!j8-9SbIo4!l|VD=Pf;QdR9vQz5m(%8C=xz8|)BbGk^Y z>)-0T>B-3IONDmh<>rkU$TUiQ?LdkouTnIoxr>7=3YB1JlJMT2R9Gq>Jgjk%fO+|r z*D{{6=pq*_;J_!?h-Q!7c!=rEUVL|B(t{DrW)mkLJPPUv-se{?Lds(ohfUjqS#LbO G3;#EW>qeUZ literal 0 HcmV?d00001 diff --git a/tests/resources/inputs_extra/regression_test_label_3.tif b/tests/resources/inputs_extra/regression_test_label_3.tif new file mode 100644 index 0000000000000000000000000000000000000000..521be614b99910cd274cdc439a74bd43c423c1e0 GIT binary patch literal 9057 zcmeHM2T+r1m;R^$0V&dJq{tPd2}Kte|#^eQE^5Re2!ngU8wu>lGKDq;W;R9a{O z6a*CrC{>!$n}E{x#p}QS-JPA?e`dWiJF{lyYw)h-!PL=LR>(rfe&ndxz7I6rUmyney;fdY~H`D zQT>Z;@zbUNYYxWJL;E@p{Lc$<^bj?$UjiGSNCCltJp=5rL~@89*ka@m6my^a#~K4* z@4xuL{r^eC!5U2=5L|2|*jc zfe0Q5n&t)H2_8xo4nci~e>@6!Q`7j<`iK6<_5WHsk8o@5-?8K|_@Vc0vLB}lHV5{) zZ-e9bp~)%B62TTAGMchP7{m@ygK!{&t_VS3-(-+yH02L{RLDl(0Ud2KZir@vEhaeiN?E|#b7YJk0;g*7Y3>UQp5zp_diLxg(yUB$2xT^A{ z=?j~^5RTG6lF{aIJi;;$DRIN-i1~~AbzUcLb$H1}^nAjp-ZlNh;-&CzqF!FNkK8#c z!EBQU7Gjn!3)G6NANBa2IM?f1i!8M{W>qZ`)#_Q^?|(AVDsRaDxxJ)ybwS;9*vmnb zd}QyZ9UQ?)+NMTi50=n48l-@=7UH|y=62k+rchn<`m2c$MQop&YIe7m+?m=V(fWn& zKBJXz*7?Yie&3UJwT1Owl^^CX%DBF9T1FRGtj|Gpiq9^FIv+dIF#^E-Ba zXV!1r`HjlIYu4|I{+sXozhQ%Ur73d9+xQnzq=AjEyHUBRBeQdR>d9sVGd|QU*7N88 zPf$a>G@d*IdEXM^R!E&Qs{a@@BSU<%)3U9xqX5q(qI=N5v3Q?tIhVBKH@2rxs+rv$ zp=X8;X6om@|AOZB_rxO~m5{|a(+xLSRQ1hQDPN9SMFUnG&a1k|fomNLdZBVfRQ6iu z(lb(^DEt&>j`Q*uzC^YE-t>}1a}Eb995OzAQ|9$Atk!0_M#b+Jq3ps&2r z&hQAjemGEmkYAMXa_#Wr>!-5ubm?zC2Sw~irV13lpG7MpbEe&DNlRf{yHbpp*W;_e zfw^A!Lz4~?F;$CB^|RC4xIM>_uGRG|^=^8ofm(2r^1FuV9E?gcb(@E>p*!U;Z9`zD z4kulF@@+aHgTK>@|6>34?0b!64cvYKQ(UmJ6I3@UwNbP0MZzcP@H?JTde25wTOE;8W3*(jx*Hh=Gfvl6~4v) z7={HprOyS@d*;HG)%Z!t{|v*6@&b;STvXBQ;#OqcUOj1JWv`xz?i-{LcxZ=0BTa>l zssZ752-d)qbX6@To)Aqd``~YLAX9)^O&jZszx7RECS5Zwp z6w@*h^vyxiR5r_Bg}l2u3S)D*Z|dt72t)Pi{>5-CzuHLh>xur0*#-U^(OruhSnP);LTbUsMyAg!w=B|=V! zu`!5JmbuYPSeK=RSw)S5QAAt)kj~LY^1m_7Ed*Wm-C_C=qPC~p)60<_MhOd)w)mhE!OL@*Z^5%6WTdpwm-?sGeu)9JOU#Be0lB(KLCZHGMl z)TW3u8^%3f*yT?t%${*WwXPNSY#Mm|;jkxWBs zf*Ay;@7}(D579KH-I1VC9sledQkT}ON~^u}Sevey`Izn@om6-@IUQmS(aSbw7QvzN zw?L_964^Lsw7~=OM{?(`SwFP7-zSE?H5pKA{iI*~YcbpPcCXS8$HH!VJQ?z=9F$lu zzSj2zQ%P-}GK2dt9{yrv$(C1Si|%}rYY=A*zc^1y)A-66-up)rn4Fw~Ip;3Lg?)s3 zgq%~!2{F0<)(6dvv#zqh_xYWOR1N(SSV^50W^b=G$vM(J7jADd!jV`r5%l~{l#S@k zuE{kt-sTVe*mqOw=Xe)dQO|x1arOivJI7YeVRw)CtQ~#%^?R*=KvCEH)oGQcXwx>8@tS}b zX0>3^HS;^eQR4kKKm>OdgCLb)wq{I+Vv5%$V~^*>s|KwgqN;T{a8;?p5#_{5W?0r0@vF; zkKc@>)urET^paINHk@iw(Bt{c&crd-{$ZaV`lCcbm;bXt-whjP@n$ZQ!Fl<}C;`}| z#(jtg@Z!jBpIU>bR%p@1bWL^)>QLdG3P$EOmDB`8Q#<=tH*5LpZ7OAQx+8EIF$g67 z-nYnF_1x*j?QivqJ!E7Ag!mS@#*!QbWkZ?+AEoA!C-C~f4YP+TCib)yFd+$B&}xG#Tl$S-JTBkvsYVHwxcQrC9=fP#QBiwAr#%}`;H=q zm(C~!@u69UxTeu%TL(RPj!OWM2Y3s$B{HwdYBDA{x~XPd_yd;Wvg@RIt%$EVpO-q< z9954j0Owz(vEUs>^=vALP*AoRN#ybx@{h3i8XLbpx6AWa8#fp8z z@S~%a*BSfltAPk*xj;q@4>{1VR4#3tOiB0tU~^iH360LuniU9XUmB5=B3UE078!T% zIF42tW@cj99FoU?c@1^AO=de@u(2uddqh))%AIF$_^-un=Rc0pFnO&hA=zVfTzJ!) z5BOW>S{GV$wvw{V@lKEXefq>&_UW1JvGrn;i~&@YgFi;xf1jS6zUcB6CpLzuA9hY! zHeaa?C435EmUFq61q$Sfq6)hOz#@;3GuUA-#oR9M5N?%)OwB@oDVFPJxw6xGXF|95 z^hiEc0}EmHwUx7xHG?{{=;4FAb4JHsByKx;ZrvtKhXQ^puU`Ca_N&rRT{FB**1=D? zPVu(N$))A6nk=`^Z@a$CZAa$di(Zpfw{Xz{^|e1vA$D^|y;MjzDEV&r4u!h39}K2Z z8pj1|OiK}`ZR}jiLmymXm8I8Vx~fap9Jp=7)SPZ?JS!%4$BLRykI9s(dHE63sZ=ej zDyweLT!aAe?OH5ccZDko8y|BYBP3VY#=>bBES{u&gV2+I=vSY|zdooxA?wnytx@6N zZIK5DYvT8ZV3ZB}iji8#5#VQ9$7-MYfcyM@4o#@GQ z|Ed+mY)OpxU+MDcL#cX|Bh_Po7VqNq@$Ugo!PTpKkw)v6zeSBmjl5p~b#$9@R|B<1 zOF`(mi4BY)r>6iQbc)G}exg5`ritm8ER8l|PLZ(@v8FY}0$Y-R=^zI6hDQz|CW;ExvP|F6DN9>-M@&dVl-^ zvX%XEKIj5DOXr6Zeao17=YSh3l%4Ow$!(-<9X$5vorOT79ZM286NRePx!78}i3sB}IB26e0r^BCj~X zZnjK^M%1b$TI72hGxZG8Nl2&!*)UU1rG{+-^4SrSuEj}sd9m#C7;=Z#i1sUxY)+PNu>q;%4 z=$6$|O5Ie>aL=x?Kpel=51U%=J0Rl7bG0+`!6UBT{7*gmIup66~$XmnY64%$Pu z%npz8o7g^pKb@al0d3;`cDKtrpSqyQNa(?vIXCU42F}>>UO##J?5QH$^O1#3P@hlM{`4 zU+1P=RXvQyug>jhFy0~GB_PDSF^Mta8x(3Wfh(432w~ys7|OD0n))So7=2^lpeln7 z)13A}*`iHbTWzLoUAE?AV{y8cGHzDY%jZ9$*z{!FgZPMMb7%qm<8+OnDQdQWYxoP9 z9M%!!RN^46=oARN%5X2{Keck}#zx)o3I&%7RBNfnIxVf@mkP zA)X?5&A1XNWpjJ-%@#Ua#DYAJYIh^*MPk~iK}jL9)TRmP0hKU)iL0PcaAEr}vbk@( zCD(?iQ6@RvGkzTak9Z1td_cLjZuPGi@W=4rdS~@tj5-Vlly|yCG<;y`r&NvFNPjcF zhQa-rKj=tga>{P(;Cz2a?1@(ShUp?2D&IYMueU;>wXwUyRn?$@3XfL%F$k$!T%&-F zQV`g+>e3FaUlTa&>_bUNvlIaYpI3saiRl&*_;l5fLHiGlQ{gdbaZ*gWOpo-q_;pfD zn3|U@EjWqkasFIQl56T*ij(b}sFvksgbfT*?fKA9l*#qm!~U~^zMe`by1f2{llCQ3 z#kd#$o=cyCzFEJwc%Y7-Tw0!4HeW@(n+Q`<%>g}F`%+kqLM!w7dmI3h44NO}I5;n15n3W@^yah08hq;c<$nx&LNl2xEsNp>SxHzrd>MD)Do1U$V?2nYQ(L#ToJg$=^TS&(<`SBIV>1+ z{<$rYMHns8Ztj;T+8XS7ge|j0hvBFS8A8&xjU_JBm1fj9Q_ApN^1J`_5oJgcXY=Sk z;bd-gi>`)S(8KElx&-lp&k*8@+qd6_A`xA-KF$dyj#r~M`S zr1!O7)yEQfo`N^?fz^X}j_Tg^+0$Y)mgw2e-5rb`H&?jP6c1l~n_I8~_Mg)*_ik<@ zQ_#ZAr5wNNWgdF7MDi)}4vC-4+rhKRzxd-UMrnt*fvy`yZJx2X>!X@Cwy>z*?K*DN zJ{s_f@ko(+ENL<{5_xdit!j8-9SbIo4!l|VD=Pf;QdR9vQz5m(%8C=xz8|)BbGk^Y z>)-0T>B-3IONDmh<>rkU$TUiQ?LdkouTnIoxr>7=3YB1JlJMT2R9Gq>Jgjk%fO+|r z*D{{6=pq*_;J_!?h-Q!7c!=rEUVL|B(t{DrW)mkLJPPUv-se{?Lds(ohfUjqS#LbO G3;#EW>qeUZ literal 0 HcmV?d00001 From 62fa3058348ad7e12f7fb686b9fd0a29dbbbacb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 16:40:15 -0300 Subject: [PATCH 20/39] minor changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/pixel_wise_model.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/terratorch/models/pixel_wise_model.py b/terratorch/models/pixel_wise_model.py index 9b75b3cd..bc01f173 100644 --- a/terratorch/models/pixel_wise_model.py +++ b/terratorch/models/pixel_wise_model.py @@ -154,8 +154,6 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: aux_outputs[name] = aux_output mask = self._crop_image_when_necessary(mask, input_size) - print(mask.shape) - print(aux_outputs) return ModelOutput(output=mask, auxiliary_heads=aux_outputs) def _get_head(self, task: str, input_embed_dim: int, head_kwargs): From 1c409e8cc128d72f6419c65022aa2a676268f20a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 17:14:46 -0300 Subject: [PATCH 21/39] more tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- examples/scripts/create_images.py | 2 +- ...ufactured-finetune_prithvi_pixelwise_nondivisible.yaml | 4 ++-- tests/test_finetune.py | 8 ++++++++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/examples/scripts/create_images.py b/examples/scripts/create_images.py index 5c195847..034e85f9 100644 --- a/examples/scripts/create_images.py +++ b/examples/scripts/create_images.py @@ -27,7 +27,7 @@ for c in range(n_copies): - pad = random.randint(1, pad_limit) + pad = 3#random.randint(1, pad_limit) filename = os.path.split(input_file)[-1] output_file = os.path.join(output_dir, filename.replace(".tif", f"_{c}.tif")) print(pad) diff --git a/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml index f2705c1f..7fc0b834 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml @@ -28,8 +28,8 @@ trainer: data: class_path: GenericNonGeoPixelwiseRegressionDataModule init_args: - batch_size: 1 - num_workers: 1 + batch_size: 2 + num_workers: 4 train_transform: #- class_path: albumentations.HorizontalFlip # init_args: diff --git a/tests/test_finetune.py b/tests/test_finetune.py index 76d4df15..badccfb1 100644 --- a/tests/test_finetune.py +++ b/tests/test_finetune.py @@ -54,6 +54,14 @@ def test_finetune_pad(case): gc.collect() +@pytest.mark.parametrize("model_name", ["prithvi_eo_v2_300"]) +@pytest.mark.parametrize("case", ["fit", "test", "validate"]) +def test_finetune_pad(case): + command_list = [case, "-c", f"tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml"] + _ = build_lightning_cli(command_list) + + gc.collect() + @pytest.mark.parametrize("model_name", ["prithvi_swin_B"]) def test_finetune_metrics_from_file(model_name): From 42c3d989e5e07ad3d24f3af64305b32bf150015a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 17:40:37 -0300 Subject: [PATCH 22/39] merging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- .../backbones/select_patch_embed_weights.py | 149 +++++++++--------- terratorch/models/clay_model_factory.py | 17 +- 2 files changed, 89 insertions(+), 77 deletions(-) diff --git a/terratorch/models/backbones/select_patch_embed_weights.py b/terratorch/models/backbones/select_patch_embed_weights.py index b175140e..de9e1b90 100644 --- a/terratorch/models/backbones/select_patch_embed_weights.py +++ b/terratorch/models/backbones/select_patch_embed_weights.py @@ -10,63 +10,87 @@ from terratorch.datasets import HLSBands, OpticalBands, SARBands import collections -def patch_embed_weights_are_compatible(model_patch_embed: torch.Tensor, checkpoint_patch_embed: torch.Tensor) -> bool: - # check all dimensions are the same except for channel dimension - if len(model_patch_embed.shape) != len(checkpoint_patch_embed.shape): - return False - model_shape = [model_patch_embed.shape[i] for i in range(len(model_patch_embed.shape)) if i != 1] - checkpoint_shape = [checkpoint_patch_embed.shape[i] for i in range(len(checkpoint_patch_embed.shape)) if i != 1] - return model_shape == checkpoint_shape - -def select_patch_embed_weights( - state_dict: dict, model: nn.Module, pretrained_bands: list[HLSBands | int | OpticalBands| SARBands], model_bands: list[HLSBands | int | OpticalBands| SARBands], custom_proj_key: str = None -) -> dict: - """Filter out the patch embedding weights according to the bands being used. - If a band exists in the pretrained_bands, but not in model_bands, drop it. - If a band exists in model_bands, but not pretrained_bands, randomly initialize those weights. - - - Args: - state_dict (dict): State Dict - model (nn.Module): Model to load the weights onto. - pretrained_bands (list[HLSBands | int]): List of bands the model was pretrained on, in the correct order. - model_bands (list[HLSBands | int]): List of bands the model is going to be finetuned on, in the correct order - - Returns: - dict: New state dict - """ - if (type(pretrained_bands) == type(model_bands)) | (type(pretrained_bands) == int) | (type(model_bands) == int): - - if custom_proj_key is None: - _possible_keys_for_proj_weight = { - "patch_embed.proj.weight", - "module.patch_embed.proj.weight", - "patch_embed.projection.weight", - "module.patch_embed.projection.weight", - } - else: - _possible_keys_for_proj_weight = {custom_proj_key} - - patch_embed_proj_weight_key = state_dict.keys() & _possible_keys_for_proj_weight if (type(state_dict) in [collections.OrderedDict, dict]) else state_dict().keys() & _possible_keys_for_proj_weight - if len(patch_embed_proj_weight_key) == 0: - msg = "Could not find key for patch embed weight" - raise Exception(msg) - if len(patch_embed_proj_weight_key) > 1: - msg = "Too many matches for key for patch embed weight" - raise Exception(msg) - + def patch_embed_weights_are_compatible(model_patch_embed: torch.Tensor, checkpoint_patch_embed: torch.Tensor) -> bool: + # check all dimensions are the same except for channel dimension + if len(model_patch_embed.shape) != len(checkpoint_patch_embed.shape): + return False + + model_shape = [model_patch_embed.shape[i] for i in range(len(model_patch_embed.shape)) if i != 1] + checkpoint_shape = [checkpoint_patch_embed.shape[i] for i in range(len(checkpoint_patch_embed.shape)) if i != 1] + return model_shape == checkpoint_shape + + def select_patch_embed_weights( + state_dict: dict, model: nn.Module, pretrained_bands: list[HLSBands | int | OpticalBands| SARBands], model_bands: list[HLSBands | int | OpticalBands| SARBands], custom_proj_key: str = None + ) -> dict: + """Filter out the patch embedding weights according to the bands being used. + If a band exists in the pretrained_bands, but not in model_bands, drop it. + If a band exists in model_bands, but not pretrained_bands, randomly initialize those weights. + + + Args: + state_dict (dict): State Dict + model (nn.Module): Model to load the weights onto. + pretrained_bands (list[HLSBands | int]): List of bands the model was pretrained on, in the correct order. + model_bands (list[HLSBands | int]): List of bands the model is going to be finetuned on, in the correct order + + Returns: + dict: New state dict + """ + if (type(pretrained_bands) == type(model_bands)) | (type(pretrained_bands) == int) | (type(model_bands) == int): + + if custom_proj_key is None: + _possible_keys_for_proj_weight = { + "patch_embed.proj.weight", + "module.patch_embed.proj.weight", + "patch_embed.projection.weight", + "module.patch_embed.projection.weight", + } + else: + _possible_keys_for_proj_weight = {custom_proj_key} + + patch_embed_proj_weight_key = state_dict.keys() & _possible_keys_for_proj_weight if (type(state_dict) in [collections.OrderedDict, dict]) else state_dict().keys() & _possible_keys_for_proj_weight + if len(patch_embed_proj_weight_key) == 0: + msg = "Could not find key for patch embed weight" + raise Exception(msg) + if len(patch_embed_proj_weight_key) > 1: + msg = "Too many matches for key for patch embed weight" + raise Exception(msg) + + # extract the single element from the set + (patch_embed_proj_weight_key,) = patch_embed_proj_weight_key + patch_embed_weight = state_dict[patch_embed_proj_weight_key] + + temp_weight = model.state_dict()[patch_embed_proj_weight_key].clone() + + # only do this if the patch size and tubelet size match. If not, start with random weights + if patch_embed_weights_are_compatible(temp_weight, patch_embed_weight): + torch.nn.init.xavier_uniform_(temp_weight.view([temp_weight.shape[0], -1])) + for index, band in enumerate(model_bands): + if band in pretrained_bands: + logging.debug(f"Loaded weights for {band} in position {index} of patch embed") + temp_weight[:, index] = patch_embed_weight[:, pretrained_bands.index(band)] + else: + warnings.warn( + f"Incompatible shapes between patch embedding of model {temp_weight.shape} and\ + of checkpoint {patch_embed_weight.shape}", + category=UserWarning, + stacklevel=1, + ) + + state_dict[patch_embed_proj_weight_key] = temp_weight + # extract the single element from the set (patch_embed_proj_weight_key,) = patch_embed_proj_weight_key patch_embed_weight = state_dict[patch_embed_proj_weight_key] - - temp_weight = model.state_dict()[patch_embed_proj_weight_key].clone() - + + temp_weight = model.state_dict()[patch_embed_proj_weight_key].clone() + # only do this if the patch size and tubelet size match. If not, start with random weights if patch_embed_weights_are_compatible(temp_weight, patch_embed_weight): torch.nn.init.xavier_uniform_(temp_weight.view([temp_weight.shape[0], -1])) for index, band in enumerate(model_bands): if band in pretrained_bands: - logging.debug(f"Loaded weights for {band} in position {index} of patch embed") + logging.info(f"Loaded weights for {band} in position {index} of patch embed") temp_weight[:, index] = patch_embed_weight[:, pretrained_bands.index(band)] else: warnings.warn( @@ -75,30 +99,7 @@ def select_patch_embed_weights( category=UserWarning, stacklevel=1, ) - + state_dict[patch_embed_proj_weight_key] = temp_weight - - # extract the single element from the set - (patch_embed_proj_weight_key,) = patch_embed_proj_weight_key - patch_embed_weight = state_dict[patch_embed_proj_weight_key] - - temp_weight = model.state_dict()[patch_embed_proj_weight_key].clone() - - # only do this if the patch size and tubelet size match. If not, start with random weights - if patch_embed_weights_are_compatible(temp_weight, patch_embed_weight): - torch.nn.init.xavier_uniform_(temp_weight.view([temp_weight.shape[0], -1])) - for index, band in enumerate(model_bands): - if band in pretrained_bands: - logging.info(f"Loaded weights for {band} in position {index} of patch embed") - temp_weight[:, index] = patch_embed_weight[:, pretrained_bands.index(band)] - else: - warnings.warn( - f"Incompatible shapes between patch embedding of model {temp_weight.shape} and\ - of checkpoint {patch_embed_weight.shape}", - category=UserWarning, - stacklevel=1, - ) - - state_dict[patch_embed_proj_weight_key] = temp_weight ->>>>>>> main + return state_dict diff --git a/terratorch/models/clay_model_factory.py b/terratorch/models/clay_model_factory.py index 41aa4e22..391b93f6 100644 --- a/terratorch/models/clay_model_factory.py +++ b/terratorch/models/clay_model_factory.py @@ -132,10 +132,17 @@ def build_model( # sizes, it can still work, but there is no way to fix possible # errors during execution if information about patch size is not # explicitly provided. - logging.getLogger("terratorch").info(f"The argument `patch_size` could not be found. To avoid possible errors related to nondivisible images,\ - it's better to define it in the config file.") patch_size = None + if "img_size" in backbone_kwargs: + img_size = backbone_kwargs["img_size"] + else: + # If the configs for the model are right and images have the proper + # sizes, it can still work, but there is no way to fix possible + # errors during execution if information about img_size is not + # provided in order to perform cropping when necessary. + img_size = None + # Trying to find the model on HuggingFace. try: backbone: nn.Module = timm.create_model( @@ -171,7 +178,7 @@ def build_model( head_kwargs["num_classes"] = num_classes if aux_decoders is None: return _build_appropriate_model( - task, backbone, decoder, head_kwargs, prepare_features_for_image_model, patch_size=patch_size, rescale=rescale + task, backbone, decoder, head_kwargs, prepare_features_for_image_model, patch_size=patch_size, img_size=img_size, rescale=rescale ) to_be_aux_decoders: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] = [] @@ -201,6 +208,7 @@ def build_model( head_kwargs, prepare_features_for_image_model, patch_size=patch_size, + img_size=img_size, rescale=rescale, auxiliary_heads=to_be_aux_decoders, ) @@ -213,6 +221,7 @@ def _build_appropriate_model( head_kwargs: dict, prepare_features_for_image_model: Callable, patch_size:int=None, + img_size:int=None, rescale: bool = True, # noqa: FBT001, FBT002 auxiliary_heads: dict | None = None, ): @@ -223,6 +232,7 @@ def _build_appropriate_model( decoder, head_kwargs, patch_size=patch_size, + img_size=img_size, rescale=rescale, auxiliary_heads=auxiliary_heads, ) @@ -233,6 +243,7 @@ def _build_appropriate_model( decoder, head_kwargs, patch_size=patch_size, + img_size=img_size, auxiliary_heads=auxiliary_heads, ) From fd1599f4c81702f16759b42d3b6b1e9cb0f14ba6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 17:43:33 -0300 Subject: [PATCH 23/39] merging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- .../backbones/select_patch_embed_weights.py | 62 +++++++++---------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/terratorch/models/backbones/select_patch_embed_weights.py b/terratorch/models/backbones/select_patch_embed_weights.py index de9e1b90..81573d72 100644 --- a/terratorch/models/backbones/select_patch_embed_weights.py +++ b/terratorch/models/backbones/select_patch_embed_weights.py @@ -10,20 +10,20 @@ from terratorch.datasets import HLSBands, OpticalBands, SARBands import collections - def patch_embed_weights_are_compatible(model_patch_embed: torch.Tensor, checkpoint_patch_embed: torch.Tensor) -> bool: - # check all dimensions are the same except for channel dimension - if len(model_patch_embed.shape) != len(checkpoint_patch_embed.shape): - return False - - model_shape = [model_patch_embed.shape[i] for i in range(len(model_patch_embed.shape)) if i != 1] - checkpoint_shape = [checkpoint_patch_embed.shape[i] for i in range(len(checkpoint_patch_embed.shape)) if i != 1] - return model_shape == checkpoint_shape - - def select_patch_embed_weights( - state_dict: dict, model: nn.Module, pretrained_bands: list[HLSBands | int | OpticalBands| SARBands], model_bands: list[HLSBands | int | OpticalBands| SARBands], custom_proj_key: str = None - ) -> dict: - """Filter out the patch embedding weights according to the bands being used. - If a band exists in the pretrained_bands, but not in model_bands, drop it. +def patch_embed_weights_are_compatible(model_patch_embed: torch.Tensor, checkpoint_patch_embed: torch.Tensor) -> bool: + # check all dimensions are the same except for channel dimension + if len(model_patch_embed.shape) != len(checkpoint_patch_embed.shape): + return False + + model_shape = [model_patch_embed.shape[i] for i in range(len(model_patch_embed.shape)) if i != 1] + checkpoint_shape = [checkpoint_patch_embed.shape[i] for i in range(len(checkpoint_patch_embed.shape)) if i != 1] + return model_shape == checkpoint_shape + +def select_patch_embed_weights( + state_dict: dict, model: nn.Module, pretrained_bands: list[HLSBands | int | OpticalBands| SARBands], model_bands: list[HLSBands | int | OpticalBands| SARBands], custom_proj_key: str = None +) -> dict: + """Filter out the patch embedding weights according to the bands being used. + If a band exists in the pretrained_bands, but not in model_bands, drop it. If a band exists in model_bands, but not pretrained_bands, randomly initialize those weights. @@ -35,18 +35,18 @@ def select_patch_embed_weights( Returns: dict: New state dict - """ - if (type(pretrained_bands) == type(model_bands)) | (type(pretrained_bands) == int) | (type(model_bands) == int): - - if custom_proj_key is None: - _possible_keys_for_proj_weight = { - "patch_embed.proj.weight", - "module.patch_embed.proj.weight", - "patch_embed.projection.weight", - "module.patch_embed.projection.weight", - } - else: - _possible_keys_for_proj_weight = {custom_proj_key} + """ + if (type(pretrained_bands) == type(model_bands)) | (type(pretrained_bands) == int) | (type(model_bands) == int): + + if custom_proj_key is None: + _possible_keys_for_proj_weight = { + "patch_embed.proj.weight", + "module.patch_embed.proj.weight", + "patch_embed.projection.weight", + "module.patch_embed.projection.weight", + } + else: + _possible_keys_for_proj_weight = {custom_proj_key} patch_embed_proj_weight_key = state_dict.keys() & _possible_keys_for_proj_weight if (type(state_dict) in [collections.OrderedDict, dict]) else state_dict().keys() & _possible_keys_for_proj_weight if len(patch_embed_proj_weight_key) == 0: @@ -55,13 +55,13 @@ def select_patch_embed_weights( if len(patch_embed_proj_weight_key) > 1: msg = "Too many matches for key for patch embed weight" raise Exception(msg) - + # extract the single element from the set (patch_embed_proj_weight_key,) = patch_embed_proj_weight_key patch_embed_weight = state_dict[patch_embed_proj_weight_key] - + temp_weight = model.state_dict()[patch_embed_proj_weight_key].clone() - + # only do this if the patch size and tubelet size match. If not, start with random weights if patch_embed_weights_are_compatible(temp_weight, patch_embed_weight): torch.nn.init.xavier_uniform_(temp_weight.view([temp_weight.shape[0], -1])) @@ -76,9 +76,9 @@ def select_patch_embed_weights( category=UserWarning, stacklevel=1, ) - + state_dict[patch_embed_proj_weight_key] = temp_weight - + # extract the single element from the set (patch_embed_proj_weight_key,) = patch_embed_proj_weight_key patch_embed_weight = state_dict[patch_embed_proj_weight_key] From 41af8f7bb043044dae1fb522a0e66efffe929df2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 17:50:28 -0300 Subject: [PATCH 24/39] merging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- .../backbones/select_patch_embed_weights.py | 80 +++++++++---------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/terratorch/models/backbones/select_patch_embed_weights.py b/terratorch/models/backbones/select_patch_embed_weights.py index 81573d72..dfafc514 100644 --- a/terratorch/models/backbones/select_patch_embed_weights.py +++ b/terratorch/models/backbones/select_patch_embed_weights.py @@ -13,7 +13,7 @@ def patch_embed_weights_are_compatible(model_patch_embed: torch.Tensor, checkpoint_patch_embed: torch.Tensor) -> bool: # check all dimensions are the same except for channel dimension if len(model_patch_embed.shape) != len(checkpoint_patch_embed.shape): - return False + return False model_shape = [model_patch_embed.shape[i] for i in range(len(model_patch_embed.shape)) if i != 1] checkpoint_shape = [checkpoint_patch_embed.shape[i] for i in range(len(checkpoint_patch_embed.shape)) if i != 1] @@ -24,17 +24,17 @@ def select_patch_embed_weights( ) -> dict: """Filter out the patch embedding weights according to the bands being used. If a band exists in the pretrained_bands, but not in model_bands, drop it. - If a band exists in model_bands, but not pretrained_bands, randomly initialize those weights. + If a band exists in model_bands, but not pretrained_bands, randomly initialize those weights. - Args: - state_dict (dict): State Dict - model (nn.Module): Model to load the weights onto. - pretrained_bands (list[HLSBands | int]): List of bands the model was pretrained on, in the correct order. - model_bands (list[HLSBands | int]): List of bands the model is going to be finetuned on, in the correct order + Args: + state_dict (dict): State Dict + model (nn.Module): Model to load the weights onto. + pretrained_bands (list[HLSBands | int]): List of bands the model was pretrained on, in the correct order. + model_bands (list[HLSBands | int]): List of bands the model is going to be finetuned on, in the correct order - Returns: - dict: New state dict + Returns: + dict: New state dict """ if (type(pretrained_bands) == type(model_bands)) | (type(pretrained_bands) == int) | (type(model_bands) == int): @@ -48,37 +48,37 @@ def select_patch_embed_weights( else: _possible_keys_for_proj_weight = {custom_proj_key} - patch_embed_proj_weight_key = state_dict.keys() & _possible_keys_for_proj_weight if (type(state_dict) in [collections.OrderedDict, dict]) else state_dict().keys() & _possible_keys_for_proj_weight - if len(patch_embed_proj_weight_key) == 0: - msg = "Could not find key for patch embed weight" - raise Exception(msg) - if len(patch_embed_proj_weight_key) > 1: - msg = "Too many matches for key for patch embed weight" - raise Exception(msg) - - # extract the single element from the set - (patch_embed_proj_weight_key,) = patch_embed_proj_weight_key - patch_embed_weight = state_dict[patch_embed_proj_weight_key] - - temp_weight = model.state_dict()[patch_embed_proj_weight_key].clone() - - # only do this if the patch size and tubelet size match. If not, start with random weights - if patch_embed_weights_are_compatible(temp_weight, patch_embed_weight): - torch.nn.init.xavier_uniform_(temp_weight.view([temp_weight.shape[0], -1])) - for index, band in enumerate(model_bands): - if band in pretrained_bands: - logging.debug(f"Loaded weights for {band} in position {index} of patch embed") - temp_weight[:, index] = patch_embed_weight[:, pretrained_bands.index(band)] - else: - warnings.warn( - f"Incompatible shapes between patch embedding of model {temp_weight.shape} and\ - of checkpoint {patch_embed_weight.shape}", - category=UserWarning, - stacklevel=1, - ) - - state_dict[patch_embed_proj_weight_key] = temp_weight - + patch_embed_proj_weight_key = state_dict.keys() & _possible_keys_for_proj_weight if (type(state_dict) in [collections.OrderedDict, dict]) else state_dict().keys() & _possible_keys_for_proj_weight + if len(patch_embed_proj_weight_key) == 0: + msg = "Could not find key for patch embed weight" + raise Exception(msg) + if len(patch_embed_proj_weight_key) > 1: + msg = "Too many matches for key for patch embed weight" + raise Exception(msg) + + # extract the single element from the set + (patch_embed_proj_weight_key,) = patch_embed_proj_weight_key + patch_embed_weight = state_dict[patch_embed_proj_weight_key] + + temp_weight = model.state_dict()[patch_embed_proj_weight_key].clone() + + # only do this if the patch size and tubelet size match. If not, start with random weights + if patch_embed_weights_are_compatible(temp_weight, patch_embed_weight): + torch.nn.init.xavier_uniform_(temp_weight.view([temp_weight.shape[0], -1])) + for index, band in enumerate(model_bands): + if band in pretrained_bands: + logging.debug(f"Loaded weights for {band} in position {index} of patch embed") + temp_weight[:, index] = patch_embed_weight[:, pretrained_bands.index(band)] + else: + warnings.warn( + f"Incompatible shapes between patch embedding of model {temp_weight.shape} and\ + of checkpoint {patch_embed_weight.shape}", + category=UserWarning, + stacklevel=1, + ) + + state_dict[patch_embed_proj_weight_key] = temp_weight + # extract the single element from the set (patch_embed_proj_weight_key,) = patch_embed_proj_weight_key patch_embed_weight = state_dict[patch_embed_proj_weight_key] From e04e53e107332225fcdae3de5de72c8a7e4eb5e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 6 Jan 2025 18:28:24 -0300 Subject: [PATCH 25/39] argument not used MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/scalar_output_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/terratorch/models/scalar_output_model.py b/terratorch/models/scalar_output_model.py index 7e39c983..b1658158 100644 --- a/terratorch/models/scalar_output_model.py +++ b/terratorch/models/scalar_output_model.py @@ -120,7 +120,7 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: features = prepare(features) decoder_output = self.decoder([f.clone() for f in features]) - decoder_output = self._crop_image_when_necessary(decoder_output, pad, input_size) + decoder_output = self._crop_image_when_necessary(decoder_output, input_size) mask = self.head(decoder_output) aux_outputs = {} From ed74fb5fc3645a2be653a2552fbd4ccd11172e55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Thu, 9 Jan 2025 15:09:33 -0300 Subject: [PATCH 26/39] This opration should not be here MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/scalar_output_model.py | 1 - 1 file changed, 1 deletion(-) diff --git a/terratorch/models/scalar_output_model.py b/terratorch/models/scalar_output_model.py index b1658158..d6bf6188 100644 --- a/terratorch/models/scalar_output_model.py +++ b/terratorch/models/scalar_output_model.py @@ -120,7 +120,6 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: features = prepare(features) decoder_output = self.decoder([f.clone() for f in features]) - decoder_output = self._crop_image_when_necessary(decoder_output, input_size) mask = self.head(decoder_output) aux_outputs = {} From fce754d5f08c8132e6efe634863885ca86677230 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Fri, 17 Jan 2025 12:15:02 -0300 Subject: [PATCH 27/39] wrong identation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/backbones/select_patch_embed_weights.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/terratorch/models/backbones/select_patch_embed_weights.py b/terratorch/models/backbones/select_patch_embed_weights.py index 394f7670..9749f8c0 100644 --- a/terratorch/models/backbones/select_patch_embed_weights.py +++ b/terratorch/models/backbones/select_patch_embed_weights.py @@ -82,4 +82,4 @@ def select_patch_embed_weights( state_dict[patch_embed_proj_weight_key] = temp_weight - return state_dict + return state_dict From f3dc433b059499b6a5d3046872a6372a7e116f33 Mon Sep 17 00:00:00 2001 From: Benedikt Blumenstiel Date: Mon, 20 Jan 2025 18:45:27 +0100 Subject: [PATCH 28/39] Simplified padding code Signed-off-by: Benedikt Blumenstiel --- terratorch/models/backbones/prithvi_vit.py | 4 +- terratorch/models/encoder_decoder_factory.py | 38 +++++------- terratorch/models/pixel_wise_model.py | 62 +++++++------------- terratorch/models/scalar_output_model.py | 57 +++++------------- terratorch/models/utils.py | 37 +++++++++--- 5 files changed, 83 insertions(+), 115 deletions(-) diff --git a/terratorch/models/backbones/prithvi_vit.py b/terratorch/models/backbones/prithvi_vit.py index 586e6241..db471a0a 100644 --- a/terratorch/models/backbones/prithvi_vit.py +++ b/terratorch/models/backbones/prithvi_vit.py @@ -61,8 +61,8 @@ def _cfg(**kwargs): "prithvi_eo_v2_300": _cfg(embed_dim=1024, depth=24, num_heads=16), "prithvi_eo_v2_300_tl": _cfg(embed_dim=1024, depth=24, num_heads=16, coords_encoding=["time", "location"], coords_scale_learn=True), - "prithvi_eo_v2_600": _cfg(embed_dim=1280, depth=32, num_heads=16), - "prithvi_eo_v2_600_tl": _cfg(embed_dim=1280, depth=32, num_heads=16, + "prithvi_eo_v2_600": _cfg(embed_dim=1280, depth=32, num_heads=16, patch_size=[1, 14, 14]), + "prithvi_eo_v2_600_tl": _cfg(embed_dim=1280, depth=32, num_heads=16, patch_size=[1, 14, 14], coords_encoding=["time", "location"], coords_scale_learn=True), } diff --git a/terratorch/models/encoder_decoder_factory.py b/terratorch/models/encoder_decoder_factory.py index 2bad57e4..7f5adcb9 100644 --- a/terratorch/models/encoder_decoder_factory.py +++ b/terratorch/models/encoder_decoder_factory.py @@ -130,25 +130,15 @@ def build_model( backbone_kwargs, kwargs = extract_prefix_keys(kwargs, "backbone_") backbone = _get_backbone(backbone, **backbone_kwargs) - # Getting some necessary parameters - # Patch size - if "patch_size" in backbone_kwargs: - patch_size = backbone_kwargs["patch_size"] - else: - # If the configs for the model are right and images have the proper - # sizes, it can still work, but there is no way to fix possible - # errors during execution if information about patch size is not - # explicitly provided. - patch_size = None - - if "img_size" in backbone_kwargs: - img_size = backbone_kwargs["img_size"] - else: - # If the configs for the model are right and images have the proper - # sizes, it can still work, but there is no way to fix possible - # errors during execution if information about img_size is not - # provided in order to perform cropping when necessary. - img_size = None + # If patch size is not provided in the config or by the model, it might lead to errors due to irregular images. + patch_size = backbone_kwargs.get("patch_size", None) + if patch_size is None: + # Infer patch size from model by checking all backbone modules + for module in backbone.modules(): + if hasattr(module, "patch_size"): + patch_size = module.patch_size + break + padding = backbone_kwargs.get("padding", "reflect") if peft_config is not None: if not backbone_kwargs.get("pretrained", False): @@ -189,7 +179,7 @@ def build_model( decoder, head_kwargs, patch_size=patch_size, - img_size=img_size, + padding=padding, necks=neck_list, decoder_includes_head=decoder_includes_head, rescale=rescale, @@ -216,7 +206,7 @@ def build_model( decoder, head_kwargs, patch_size=patch_size, - img_size=img_size, + padding=padding, necks=neck_list, decoder_includes_head=decoder_includes_head, rescale=rescale, @@ -230,7 +220,7 @@ def _build_appropriate_model( decoder: nn.Module, head_kwargs: dict, patch_size: int, - img_size:int, + padding: str, decoder_includes_head: bool = False, necks: list[Neck] | None = None, rescale: bool = True, # noqa: FBT001, FBT002 @@ -247,7 +237,7 @@ def _build_appropriate_model( decoder, head_kwargs, patch_size=patch_size, - img_size=img_size, + padding=padding, decoder_includes_head=decoder_includes_head, neck=neck_module, rescale=rescale, @@ -260,7 +250,7 @@ def _build_appropriate_model( decoder, head_kwargs, patch_size=patch_size, - img_size=img_size, + padding=padding, decoder_includes_head=decoder_includes_head, neck=neck_module, auxiliary_heads=auxiliary_heads, diff --git a/terratorch/models/pixel_wise_model.py b/terratorch/models/pixel_wise_model.py index bc01f173..e3437c6e 100644 --- a/terratorch/models/pixel_wise_model.py +++ b/terratorch/models/pixel_wise_model.py @@ -28,7 +28,7 @@ def __init__( decoder: nn.Module, head_kwargs: dict, patch_size: int = None, - img_size:tuple = None, + padding: str = None, decoder_includes_head: bool = False, auxiliary_heads: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None = None, neck: nn.Module | None = None, @@ -73,7 +73,7 @@ def __init__( self.neck = neck self.rescale = rescale self.patch_size = patch_size - self.img_size = (img_size, img_size) + self.padding = padding def freeze_encoder(self): freeze_module(self.encoder) @@ -82,32 +82,6 @@ def freeze_decoder(self): freeze_module(self.decoder) freeze_module(self.head) - def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: - - if self.patch_size: - x_shape = x.shape[2:] - if all([i//self.patch_size==0 for i in x_shape]): - return x - else: - x = pad_images(x, self.patch_size, "constant") - - return x - else: - # If patch size is not provided, the user should guarantee the - # dataset is properly configured to work with the model being used. - return x - - def _crop_image_when_necessary(self, x:torch.Tensor, size:tuple) -> torch.Tensor: - - if all(self.img_size): - - x_cropped = transforms.CenterCrop(self.img_size)(x) - return x_cropped - else: - logging.getLogger("terratorch").info("Cropping could be necessary to adjust images, so define `img_size` in your config file \ - if you get a shape mismatch.") - return x - @staticmethod def _check_for_single_channel_and_squeeze(x): if x.shape[1] == 1: @@ -117,21 +91,26 @@ def _check_for_single_channel_and_squeeze(x): def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: """Sequentially pass `x` through model`s encoder, decoder and heads""" - if isinstance(x, torch.Tensor): - input_size = x.shape[-2:] - elif hasattr(kwargs, 'image_size'): - input_size = kwargs['image_size'] - elif isinstance(x, dict): - # Multimodal input in passed as dict - input_size = list(x.values())[0].shape[-2:] - else: - ValueError('Could not infer input shape.') + def _get_size(x): + if isinstance(x, torch.Tensor): + return x.shape[-2:] + elif isinstance(x, dict): + # Multimodal input in passed as dict (Assuming first modality to be an image) + return list(x.values())[0].shape[-2:] + elif hasattr(kwargs, 'image_size'): + return kwargs['image_size'] + else: + ValueError('Could not infer image shape.') + + image_size = _get_size(x) + if isinstance(x, torch.Tensor) and self.patch_size: + # Only works for single image modalities + x = pad_images(x, self.patch_size, self.padding) + input_size = _get_size(x) - # TODO make this verification optional to avoid unnecessary repetition - x = self.check_input_shape(x) features = self.encoder(x, **kwargs) - ## only for backwards compatibility with pre-neck times. + # only for backwards compatibility with pre-neck times. if self.neck: prepare = self.neck else: @@ -144,6 +123,7 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: if self.rescale and mask.shape[-2:] != input_size: mask = F.interpolate(mask, size=input_size, mode="bilinear") mask = self._check_for_single_channel_and_squeeze(mask) + mask = mask[..., :image_size[0], :image_size[1]] aux_outputs = {} for name, decoder in self.aux_heads.items(): @@ -151,9 +131,9 @@ def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: if self.rescale and aux_output.shape[-2:] != input_size: aux_output = F.interpolate(aux_output, size=input_size, mode="bilinear") aux_output = self._check_for_single_channel_and_squeeze(aux_output) + aux_output = aux_output[..., :image_size[0], :image_size[1]] aux_outputs[name] = aux_output - mask = self._crop_image_when_necessary(mask, input_size) return ModelOutput(output=mask, auxiliary_heads=aux_outputs) def _get_head(self, task: str, input_embed_dim: int, head_kwargs): diff --git a/terratorch/models/scalar_output_model.py b/terratorch/models/scalar_output_model.py index d6bf6188..2e135e22 100644 --- a/terratorch/models/scalar_output_model.py +++ b/terratorch/models/scalar_output_model.py @@ -9,6 +9,7 @@ from terratorch.models.utils import pad_images import pdb + def freeze_module(module: nn.Module): for param in module.parameters(): param.requires_grad_(False) @@ -21,16 +22,16 @@ class ScalarOutputModel(Model, SegmentationModel): """ def __init__( - self, - task: str, - encoder: nn.Module, - decoder: nn.Module, - head_kwargs: dict, - patch_size:int = None, - img_size:tuple = None, - decoder_includes_head: bool = False, - auxiliary_heads: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None = None, - neck: nn.Module | None = None, + self, + task: str, + encoder: nn.Module, + decoder: nn.Module, + head_kwargs: dict, + patch_size: int = None, + padding: str = None, + decoder_includes_head: bool = False, + auxiliary_heads: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None = None, + neck: nn.Module | None = None, ) -> None: """Constructor @@ -67,7 +68,7 @@ def __init__( self.neck = neck self.patch_size = patch_size - self.img_size = (img_size, img_size) + self.padding = padding def freeze_encoder(self): freeze_module(self.encoder) @@ -76,41 +77,15 @@ def freeze_decoder(self): freeze_module(self.decoder) freeze_module(self.head) - def check_input_shape(self, x: torch.Tensor) -> torch.Tensor: # noqa: ARG002 - - if self.patch_size: - x_shape = x.shape[2:] - if all([i//self.patch_size==0 for i in x_shape]): - return x - else: - x = pad_images(x, self.patch_size, "constant") - return x - else: - # If patch size is not provided, the user should guarantee the - # dataset is properly configured to work with the model being used. - return x - - def _crop_image_when_necessary(self, x:torch.Tensor, size:tuple) -> torch.Tensor: - - if self.img_size: - - return transforms.CenterCrop(self.img_size)(x) - else: - raise NameError("Cropping is necessary to adjust images, so define `img_size` in your config file.") - logging.getLogger("terratorch").info("Cropping could be necessary to adjust images, so define `img_size` in your config file \ - if you get a shape mismatch.") - def forward(self, x: torch.Tensor, **kwargs) -> ModelOutput: """Sequentially pass `x` through model`s encoder, decoder and heads""" - x = self.check_input_shape(x) + if isinstance(x, torch.Tensor) and self.patch_size: + # Only works for single image modalities + x = pad_images(x, self.patch_size, self.padding) features = self.encoder(x, **kwargs) - # Collecting information about the size of the input tensor in order to - # use it to possibly crop the image when necessary. - input_size = x.shape[-2:] - - ## only for backwards compatibility with pre-neck times. + # only for backwards compatibility with pre-neck times. if self.neck: prepare = self.neck else: diff --git a/terratorch/models/utils.py b/terratorch/models/utils.py index 5704eb69..ce140d23 100644 --- a/terratorch/models/utils.py +++ b/terratorch/models/utils.py @@ -1,3 +1,5 @@ +import logging + from torch import nn, Tensor import torch @@ -15,15 +17,36 @@ def extract_prefix_keys(d: dict, prefix: str) -> dict: return extracted_dict, remaining_dict -def pad_images(imgs: Tensor,patch_size: int, padding:str) -> Tensor: - p = patch_size - # h, w = imgs.shape[3], imgs.shape[4] - t, h, w = imgs.shape[-3:] - h_pad, w_pad = (p - h % p) % p, (p - w % p) % p # Ensure padding is within bounds - if h_pad > 0 or w_pad > 0: + +def pad_images(imgs: Tensor, patch_size: int | list, padding: str) -> Tensor: + p_t = 1 + if isinstance(patch_size, int): + p_h = p_w = patch_size + elif len(patch_size) == 1: + p_h = p_w = patch_size[0] + elif len(patch_size) == 2: + p_h, p_w = patch_size + elif len(patch_size) == 3: + p_t, p_h, p_w = patch_size + else: + raise ValueError(f'patch size {patch_size} not valid, must be int or list of ints with length 1, 2 or 3.') + + if p_t > 1 and len(imgs.shape) < 5: + raise ValueError(f"Multi-temporal padding requested (p_t = {p_t}) " + f"but no multi-temporal data provided (data shape = {imgs.shape}).") + + h, w = imgs.shape[-2:] + t = imgs.shape[-3:] if len(imgs.shape) > 4 else 1 + t_pad, h_pad, w_pad = (p_t - t % p_t) % p_t, (p_h - h % p_h) % p_h, (p_w - w % p_w) % p_w + if t_pad > 0: + # Multi-temporal padding + imgs = torch.stack([ + nn.functional.pad(img, (0, w_pad, 0, h_pad, 0, t_pad), mode=padding) + for img in imgs # Apply per image to avoid NotImplementedError from torch.nn.functional.pad + ]) + elif h_pad > 0 or w_pad > 0: imgs = torch.stack([ nn.functional.pad(img, (0, w_pad, 0, h_pad), mode=padding) for img in imgs # Apply per image to avoid NotImplementedError from torch.nn.functional.pad ]) return imgs - From 4e0fcf2587858f1d231cb8be8dd6302bfcf5a727 Mon Sep 17 00:00:00 2001 From: Benedikt Blumenstiel Date: Mon, 20 Jan 2025 18:52:21 +0100 Subject: [PATCH 29/39] Fix clay padding Signed-off-by: Benedikt Blumenstiel --- terratorch/models/clay_model_factory.py | 45 ++++++++------------ terratorch/models/encoder_decoder_factory.py | 2 +- 2 files changed, 18 insertions(+), 29 deletions(-) diff --git a/terratorch/models/clay_model_factory.py b/terratorch/models/clay_model_factory.py index 391b93f6..03c72352 100644 --- a/terratorch/models/clay_model_factory.py +++ b/terratorch/models/clay_model_factory.py @@ -109,6 +109,17 @@ def build_model( # Path for accessing the model source code. self.syspath_kwarg = "model_sys_path" + backbone_kwargs, kwargs = extract_prefix_keys(kwargs, "backbone_") + + # If patch size is not provided in the config or by the model, it might lead to errors due to irregular images. + patch_size = backbone_kwargs.get("patch_size", None) + if patch_size is None: + # Infer patch size from model by checking all backbone modules + for module in backbone.modules(): + if hasattr(module, "patch_size"): + patch_size = module.patch_size + break + padding = backbone_kwargs.get("padding", "reflect") # TODO: support auxiliary heads if not isinstance(backbone, nn.Module): @@ -121,28 +132,6 @@ def build_model( msg = f"Task {task} not supported. Please choose one of {SUPPORTED_TASKS}" raise NotImplementedError(msg) - backbone_kwargs, kwargs = extract_prefix_keys(kwargs, "backbone_") - - # Getting some necessary parameters - # Patch size - if "patch_size" in backbone_kwargs: - patch_size = backbone_kwargs["patch_size"] - else: - # If the configs for the model are right and images have the proper - # sizes, it can still work, but there is no way to fix possible - # errors during execution if information about patch size is not - # explicitly provided. - patch_size = None - - if "img_size" in backbone_kwargs: - img_size = backbone_kwargs["img_size"] - else: - # If the configs for the model are right and images have the proper - # sizes, it can still work, but there is no way to fix possible - # errors during execution if information about img_size is not - # provided in order to perform cropping when necessary. - img_size = None - # Trying to find the model on HuggingFace. try: backbone: nn.Module = timm.create_model( @@ -178,7 +167,7 @@ def build_model( head_kwargs["num_classes"] = num_classes if aux_decoders is None: return _build_appropriate_model( - task, backbone, decoder, head_kwargs, prepare_features_for_image_model, patch_size=patch_size, img_size=img_size, rescale=rescale + task, backbone, decoder, head_kwargs, prepare_features_for_image_model, patch_size=patch_size, padding=padding, rescale=rescale ) to_be_aux_decoders: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] = [] @@ -208,7 +197,7 @@ def build_model( head_kwargs, prepare_features_for_image_model, patch_size=patch_size, - img_size=img_size, + padding=padding, rescale=rescale, auxiliary_heads=to_be_aux_decoders, ) @@ -220,8 +209,8 @@ def _build_appropriate_model( decoder: nn.Module, head_kwargs: dict, prepare_features_for_image_model: Callable, - patch_size:int=None, - img_size:int=None, + patch_size: int | list | None, + padding: str, rescale: bool = True, # noqa: FBT001, FBT002 auxiliary_heads: dict | None = None, ): @@ -232,7 +221,7 @@ def _build_appropriate_model( decoder, head_kwargs, patch_size=patch_size, - img_size=img_size, + padding=padding, rescale=rescale, auxiliary_heads=auxiliary_heads, ) @@ -243,7 +232,7 @@ def _build_appropriate_model( decoder, head_kwargs, patch_size=patch_size, - img_size=img_size, + padding=padding, auxiliary_heads=auxiliary_heads, ) diff --git a/terratorch/models/encoder_decoder_factory.py b/terratorch/models/encoder_decoder_factory.py index 7f5adcb9..8209b047 100644 --- a/terratorch/models/encoder_decoder_factory.py +++ b/terratorch/models/encoder_decoder_factory.py @@ -219,7 +219,7 @@ def _build_appropriate_model( backbone: nn.Module, decoder: nn.Module, head_kwargs: dict, - patch_size: int, + patch_size: int | list | None, padding: str, decoder_includes_head: bool = False, necks: list[Neck] | None = None, From 2bb57b20a37512372110817604d3d97431efb91f Mon Sep 17 00:00:00 2001 From: Benedikt Blumenstiel Date: Mon, 20 Jan 2025 18:54:00 +0100 Subject: [PATCH 30/39] Remove padding from prithvi Signed-off-by: Benedikt Blumenstiel --- terratorch/models/backbones/prithvi_vit.py | 23 ---------------------- 1 file changed, 23 deletions(-) diff --git a/terratorch/models/backbones/prithvi_vit.py b/terratorch/models/backbones/prithvi_vit.py index db471a0a..4bdaa19e 100644 --- a/terratorch/models/backbones/prithvi_vit.py +++ b/terratorch/models/backbones/prithvi_vit.py @@ -10,7 +10,6 @@ from terratorch.models.backbones.select_patch_embed_weights import select_patch_embed_weights from terratorch.datasets.utils import generate_bands_intervals from terratorch.models.backbones.prithvi_mae import PrithviViT, PrithviMAE -from terratorch.models.utils import pad_images logger = logging.getLogger(__name__) @@ -236,28 +235,6 @@ def forward_filter_indices(*args, **kwargs): model.model_bands = model_bands model.pretrained_bands = pretrained_bands - padding = kwargs.get("padding", "none") - patch_size = kwargs.get("patch_size", 16) - if isinstance(patch_size, list): - patch_size = patch_size[-1] - - if padding != "none": - original_forward = model.forward - original_forward_features = model.forward_features - - def pad_and_forward(forward_fn, patch_size, padding, *args, **kwargs): - inputs = pad_images(args[0], patch_size, padding) - return forward_fn(inputs, **kwargs) - - def forward_pad_images(*args, **kwargs): - return pad_and_forward(original_forward, patch_size, padding, *args, **kwargs) - - def forward_features_pad_images(*args, **kwargs): - return pad_and_forward(original_forward_features, patch_size, padding, *args, **kwargs) - - model.forward = forward_pad_images - model.forward_features = forward_features_pad_images - return model From fac50f0ab25f6fd743fcf045685f1f68aa5b3e81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 20 Jan 2025 16:48:55 -0300 Subject: [PATCH 31/39] Moving this search MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/clay_model_factory.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/terratorch/models/clay_model_factory.py b/terratorch/models/clay_model_factory.py index 03c72352..6540b44a 100644 --- a/terratorch/models/clay_model_factory.py +++ b/terratorch/models/clay_model_factory.py @@ -111,16 +111,6 @@ def build_model( self.syspath_kwarg = "model_sys_path" backbone_kwargs, kwargs = extract_prefix_keys(kwargs, "backbone_") - # If patch size is not provided in the config or by the model, it might lead to errors due to irregular images. - patch_size = backbone_kwargs.get("patch_size", None) - if patch_size is None: - # Infer patch size from model by checking all backbone modules - for module in backbone.modules(): - if hasattr(module, "patch_size"): - patch_size = module.patch_size - break - padding = backbone_kwargs.get("padding", "reflect") - # TODO: support auxiliary heads if not isinstance(backbone, nn.Module): if not "clay" in backbone: @@ -153,6 +143,16 @@ def build_model( backbone: nn.Module = Embedder(ckpt_path=checkpoint_path, **backbone_kwargs) print("Model Clay was successfully restored.") + # If patch size is not provided in the config or by the model, it might lead to errors due to irregular images. + patch_size = backbone_kwargs.get("patch_size", None) + if patch_size is None: + # Infer patch size from model by checking all backbone modules + for module in backbone.modules(): + if hasattr(module, "patch_size"): + patch_size = module.patch_size + break + padding = backbone_kwargs.get("padding", "reflect") + # allow decoder to be a module passed directly decoder_cls = _get_decoder(decoder) decoder_kwargs, kwargs = extract_prefix_keys(kwargs, "decoder_") From 0d64030238ee6cc7797efe88025267f3ba960005 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 20 Jan 2025 19:18:40 -0300 Subject: [PATCH 32/39] Limiting version for jsonargparse MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- requirements/required.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/required.txt b/requirements/required.txt index c4aec6ea..1f185734 100644 --- a/requirements/required.txt +++ b/requirements/required.txt @@ -15,6 +15,7 @@ lightning==2.4.0 git+https://github.com/qubvel-org/segmentation_models.pytorch.git@3952e1f8e9684a385a81e30381b8fb5b1ac086cf timm==1.0.11 numpy==1.26.4 +jsonargparse<=4.36.0 # These dependencies are optional # and must be installed just in case From 23b59245a3807fbe588e1ef2e5aa833271d02931 Mon Sep 17 00:00:00 2001 From: Joao Lucas de Sousa Almeida Date: Mon, 20 Jan 2025 19:45:07 -0300 Subject: [PATCH 33/39] 4.35.0 Signed-off-by: Joao Lucas de Sousa Almeida --- requirements/required.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/required.txt b/requirements/required.txt index 1f185734..986de635 100644 --- a/requirements/required.txt +++ b/requirements/required.txt @@ -15,7 +15,7 @@ lightning==2.4.0 git+https://github.com/qubvel-org/segmentation_models.pytorch.git@3952e1f8e9684a385a81e30381b8fb5b1ac086cf timm==1.0.11 numpy==1.26.4 -jsonargparse<=4.36.0 +jsonargparse<=4.35.0 # These dependencies are optional # and must be installed just in case From 67866125c61a56c0239e99cf0516e0edbcd72caa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Tue, 21 Jan 2025 10:26:09 -0300 Subject: [PATCH 34/39] Cropping the image when necessary MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/encoder_decoder_factory.py | 10 +++++++++- terratorch/models/pixel_wise_model.py | 10 ++++++++++ .../manufactured-finetune_prithvi_pixelwise_pad.yaml | 3 +++ tests/test_finetune.py | 2 +- 4 files changed, 23 insertions(+), 2 deletions(-) diff --git a/terratorch/models/encoder_decoder_factory.py b/terratorch/models/encoder_decoder_factory.py index 8209b047..512d44c1 100644 --- a/terratorch/models/encoder_decoder_factory.py +++ b/terratorch/models/encoder_decoder_factory.py @@ -1,6 +1,6 @@ # Copyright contributors to the Terratorch project - +from typing import List import warnings import logging from torch import nn @@ -130,8 +130,12 @@ def build_model( backbone_kwargs, kwargs = extract_prefix_keys(kwargs, "backbone_") backbone = _get_backbone(backbone, **backbone_kwargs) + # The image can be optionally cropped to a final format when necessary + output_size = backbone_kwargs.get("output_size", None) + # If patch size is not provided in the config or by the model, it might lead to errors due to irregular images. patch_size = backbone_kwargs.get("patch_size", None) + if patch_size is None: # Infer patch size from model by checking all backbone modules for module in backbone.modules(): @@ -180,6 +184,7 @@ def build_model( head_kwargs, patch_size=patch_size, padding=padding, + output_size=output_size, necks=neck_list, decoder_includes_head=decoder_includes_head, rescale=rescale, @@ -207,6 +212,7 @@ def build_model( head_kwargs, patch_size=patch_size, padding=padding, + output_size=output_size, necks=neck_list, decoder_includes_head=decoder_includes_head, rescale=rescale, @@ -221,6 +227,7 @@ def _build_appropriate_model( head_kwargs: dict, patch_size: int | list | None, padding: str, + output_size: List[int] | None = None, decoder_includes_head: bool = False, necks: list[Neck] | None = None, rescale: bool = True, # noqa: FBT001, FBT002 @@ -238,6 +245,7 @@ def _build_appropriate_model( head_kwargs, patch_size=patch_size, padding=padding, + output_size=output_size, decoder_includes_head=decoder_includes_head, neck=neck_module, rescale=rescale, diff --git a/terratorch/models/pixel_wise_model.py b/terratorch/models/pixel_wise_model.py index e3437c6e..12d6b83d 100644 --- a/terratorch/models/pixel_wise_model.py +++ b/terratorch/models/pixel_wise_model.py @@ -1,4 +1,5 @@ # Copyright contributors to the Terratorch project +from typing import List import logging import torch import torch.nn.functional as F # noqa: N812 @@ -30,6 +31,7 @@ def __init__( patch_size: int = None, padding: str = None, decoder_includes_head: bool = False, + output_size: List[int] | None = None, auxiliary_heads: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None = None, neck: nn.Module | None = None, rescale: bool = True, # noqa: FBT002, FBT001 @@ -42,6 +44,7 @@ def __init__( decoder (nn.Module): Decoder to be used head_kwargs (dict): Arguments to be passed at instantiation of the head. decoder_includes_head (bool): Whether the decoder already incldes a head. If true, a head will not be added. Defaults to False. + output_size (List[int]): The size of the epxected output/target tensor. It is used to crop the output before returning it. auxiliary_heads (list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None, optional): List of AuxiliaryHeads with heads to be instantiated. Defaults to None. neck (nn.Module | None): Module applied between backbone and decoder. @@ -74,6 +77,9 @@ def __init__( self.rescale = rescale self.patch_size = patch_size self.padding = padding + self.output_size = output_size + self.reference_top = 0 + self.reference_left = 0 def freeze_encoder(self): freeze_module(self.encoder) @@ -134,6 +140,10 @@ def _get_size(x): aux_output = aux_output[..., :image_size[0], :image_size[1]] aux_outputs[name] = aux_output + # Cropping when necessary + if self.output_size: + mask = transforms.functional.crop(mask, self.reference_left, self.reference_left, *image_size) + return ModelOutput(output=mask, auxiliary_heads=aux_outputs) def _get_head(self, task: str, input_embed_dim: int, head_kwargs): diff --git a/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml index 7e8ef8b7..20984e9b 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml @@ -102,6 +102,9 @@ model: backbone_drop_path_rate: 0.3 # backbone_window_size: 8 backbone_patch_size: 13 + backbone_output_size: + - 224 + - 224 decoder_channels: 64 num_frames: 1 in_channels: 6 diff --git a/tests/test_finetune.py b/tests/test_finetune.py index badccfb1..4174c581 100644 --- a/tests/test_finetune.py +++ b/tests/test_finetune.py @@ -56,7 +56,7 @@ def test_finetune_pad(case): @pytest.mark.parametrize("model_name", ["prithvi_eo_v2_300"]) @pytest.mark.parametrize("case", ["fit", "test", "validate"]) -def test_finetune_pad(case): +def test_finetune_pad_nondivisible(case): command_list = [case, "-c", f"tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml"] _ = build_lightning_cli(command_list) From bc68b8748c6b56e54210bf982e31674251a098a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Tue, 21 Jan 2025 10:57:34 -0300 Subject: [PATCH 35/39] tests no more required MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- tests/test_finetune.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tests/test_finetune.py b/tests/test_finetune.py index 4174c581..76d4df15 100644 --- a/tests/test_finetune.py +++ b/tests/test_finetune.py @@ -54,14 +54,6 @@ def test_finetune_pad(case): gc.collect() -@pytest.mark.parametrize("model_name", ["prithvi_eo_v2_300"]) -@pytest.mark.parametrize("case", ["fit", "test", "validate"]) -def test_finetune_pad_nondivisible(case): - command_list = [case, "-c", f"tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml"] - _ = build_lightning_cli(command_list) - - gc.collect() - @pytest.mark.parametrize("model_name", ["prithvi_swin_B"]) def test_finetune_metrics_from_file(model_name): From 17a608e5872c2c079d7fafc9881a64c922d12b06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Fri, 24 Jan 2025 10:04:36 -0300 Subject: [PATCH 36/39] Updating model name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- .../configs/manufactured-finetune_prithvi_eo_v1_100.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/resources/configs/manufactured-finetune_prithvi_eo_v1_100.yaml b/tests/resources/configs/manufactured-finetune_prithvi_eo_v1_100.yaml index 3a696132..4fde7c17 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_eo_v1_100.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_eo_v1_100.yaml @@ -96,7 +96,7 @@ model: model_args: decoder: UperNetDecoder pretrained: false - backbone: prithvi_vit_100 + backbone: prithvi_eo_v1_100 #backbone_pretrained_cfg_overlay: #file: tests/all_ecos_random/version_0/checkpoints/epoch=0_state_dict.ckpt #tests/prithvi_vit_100.pt backbone_drop_path_rate: 0.3 From 1ff89380ac358402c002b56aefd5be37350f7541 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Fri, 24 Jan 2025 10:51:50 -0300 Subject: [PATCH 37/39] Fixing indent MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/scalar_output_model.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/terratorch/models/scalar_output_model.py b/terratorch/models/scalar_output_model.py index 2e135e22..4b73eea6 100644 --- a/terratorch/models/scalar_output_model.py +++ b/terratorch/models/scalar_output_model.py @@ -22,16 +22,16 @@ class ScalarOutputModel(Model, SegmentationModel): """ def __init__( - self, - task: str, - encoder: nn.Module, - decoder: nn.Module, - head_kwargs: dict, - patch_size: int = None, - padding: str = None, - decoder_includes_head: bool = False, - auxiliary_heads: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None = None, - neck: nn.Module | None = None, + self, + task: str, + encoder: nn.Module, + decoder: nn.Module, + head_kwargs: dict, + patch_size: int = None, + padding: str = None, + decoder_includes_head: bool = False, + auxiliary_heads: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None = None, + neck: nn.Module | None = None, ) -> None: """Constructor From b0a47802e7a358516df582e87a2fbf1a00a47781 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Fri, 24 Jan 2025 13:21:41 -0300 Subject: [PATCH 38/39] Removing output_size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: João Lucas de Sousa Almeida --- terratorch/models/encoder_decoder_factory.py | 7 ------- terratorch/models/pixel_wise_model.py | 8 -------- ...ured-finetune_prithvi_pixelwise_nondivisible.yaml | 12 ++++++------ .../manufactured-finetune_prithvi_pixelwise_pad.yaml | 3 --- 4 files changed, 6 insertions(+), 24 deletions(-) diff --git a/terratorch/models/encoder_decoder_factory.py b/terratorch/models/encoder_decoder_factory.py index 512d44c1..efd018c3 100644 --- a/terratorch/models/encoder_decoder_factory.py +++ b/terratorch/models/encoder_decoder_factory.py @@ -130,9 +130,6 @@ def build_model( backbone_kwargs, kwargs = extract_prefix_keys(kwargs, "backbone_") backbone = _get_backbone(backbone, **backbone_kwargs) - # The image can be optionally cropped to a final format when necessary - output_size = backbone_kwargs.get("output_size", None) - # If patch size is not provided in the config or by the model, it might lead to errors due to irregular images. patch_size = backbone_kwargs.get("patch_size", None) @@ -184,7 +181,6 @@ def build_model( head_kwargs, patch_size=patch_size, padding=padding, - output_size=output_size, necks=neck_list, decoder_includes_head=decoder_includes_head, rescale=rescale, @@ -212,7 +208,6 @@ def build_model( head_kwargs, patch_size=patch_size, padding=padding, - output_size=output_size, necks=neck_list, decoder_includes_head=decoder_includes_head, rescale=rescale, @@ -227,7 +222,6 @@ def _build_appropriate_model( head_kwargs: dict, patch_size: int | list | None, padding: str, - output_size: List[int] | None = None, decoder_includes_head: bool = False, necks: list[Neck] | None = None, rescale: bool = True, # noqa: FBT001, FBT002 @@ -245,7 +239,6 @@ def _build_appropriate_model( head_kwargs, patch_size=patch_size, padding=padding, - output_size=output_size, decoder_includes_head=decoder_includes_head, neck=neck_module, rescale=rescale, diff --git a/terratorch/models/pixel_wise_model.py b/terratorch/models/pixel_wise_model.py index 12d6b83d..2ee84ce7 100644 --- a/terratorch/models/pixel_wise_model.py +++ b/terratorch/models/pixel_wise_model.py @@ -31,7 +31,6 @@ def __init__( patch_size: int = None, padding: str = None, decoder_includes_head: bool = False, - output_size: List[int] | None = None, auxiliary_heads: list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None = None, neck: nn.Module | None = None, rescale: bool = True, # noqa: FBT002, FBT001 @@ -44,7 +43,6 @@ def __init__( decoder (nn.Module): Decoder to be used head_kwargs (dict): Arguments to be passed at instantiation of the head. decoder_includes_head (bool): Whether the decoder already incldes a head. If true, a head will not be added. Defaults to False. - output_size (List[int]): The size of the epxected output/target tensor. It is used to crop the output before returning it. auxiliary_heads (list[AuxiliaryHeadWithDecoderWithoutInstantiatedHead] | None, optional): List of AuxiliaryHeads with heads to be instantiated. Defaults to None. neck (nn.Module | None): Module applied between backbone and decoder. @@ -77,9 +75,6 @@ def __init__( self.rescale = rescale self.patch_size = patch_size self.padding = padding - self.output_size = output_size - self.reference_top = 0 - self.reference_left = 0 def freeze_encoder(self): freeze_module(self.encoder) @@ -140,9 +135,6 @@ def _get_size(x): aux_output = aux_output[..., :image_size[0], :image_size[1]] aux_outputs[name] = aux_output - # Cropping when necessary - if self.output_size: - mask = transforms.functional.crop(mask, self.reference_left, self.reference_left, *image_size) return ModelOutput(output=mask, auxiliary_heads=aux_outputs) diff --git a/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml index 7fc0b834..a4765288 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_nondivisible.yaml @@ -65,12 +65,12 @@ data: - 2 - 1 - 0 - train_data_root: tests/resources/inputs_extra - train_label_data_root: tests/resources/inputs_extra - val_data_root: tests/resources/inputs_extra - val_label_data_root: tests/resources/inputs_extra - test_data_root: tests/resources/inputs_extra - test_label_data_root: tests/resources/inputs_extra + train_data_root: tests/resources/inputs + train_label_data_root: tests/resources/inputs + val_data_root: tests/resources/inputs + val_label_data_root: tests/resources/inputs + test_data_root: tests/resources/inputs + test_label_data_root: tests/resources/inputs img_grep: "regression*input*.tif" label_grep: "regression*label*.tif" means: diff --git a/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml index 20984e9b..7e8ef8b7 100644 --- a/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml +++ b/tests/resources/configs/manufactured-finetune_prithvi_pixelwise_pad.yaml @@ -102,9 +102,6 @@ model: backbone_drop_path_rate: 0.3 # backbone_window_size: 8 backbone_patch_size: 13 - backbone_output_size: - - 224 - - 224 decoder_channels: 64 num_frames: 1 in_channels: 6 From 0dc1e953c377bd9b4298731140bc5a9d051bd1d4 Mon Sep 17 00:00:00 2001 From: Joao Lucas de Sousa Almeida Date: Fri, 24 Jan 2025 19:38:54 -0300 Subject: [PATCH 39/39] indent Signed-off-by: Joao Lucas de Sousa Almeida --- terratorch/models/backbones/select_patch_embed_weights.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/terratorch/models/backbones/select_patch_embed_weights.py b/terratorch/models/backbones/select_patch_embed_weights.py index aa373ecc..fda7a060 100644 --- a/terratorch/models/backbones/select_patch_embed_weights.py +++ b/terratorch/models/backbones/select_patch_embed_weights.py @@ -12,7 +12,7 @@ def patch_embed_weights_are_compatible(model_patch_embed: torch.Tensor, checkpoint_patch_embed: torch.Tensor) -> bool: # check all dimensions are the same except for channel dimension if len(model_patch_embed.shape) != len(checkpoint_patch_embed.shape): - return False + return False model_shape = [model_patch_embed.shape[i] for i in range(len(model_patch_embed.shape)) if i != 1] checkpoint_shape = [checkpoint_patch_embed.shape[i] for i in range(len(checkpoint_patch_embed.shape)) if i != 1]