-
Notifications
You must be signed in to change notification settings - Fork 446
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Upgrade OV, NNCF and MAPI #3335
Conversation
Codecov ReportAttention: Patch coverage is
Additional details and impacted files@@ Coverage Diff @@
## develop #3335 +/- ##
===========================================
+ Coverage 74.63% 77.30% +2.67%
===========================================
Files 212 250 +38
Lines 19514 22828 +3314
===========================================
+ Hits 14564 17647 +3083
- Misses 4950 5181 +231
Flags with carried forward coverage won't be shown. Click here to find out more. ☔ View full report in Codecov by Sentry. |
Daily: https://github.com/openvinotoolkit/training_extensions/actions/runs/8796785266 |
Could you check |
That's because of my attempt to fix e2e, I'll revert this |
I double checked, ssd300 model doesn't exist anymore, so I picked up another. We have a new issue now, after label info refactoring: |
Is it possible to distinguish whether the given IR comes from OMZ or not? |
OMZ models have model_info containing at least model_type, that allows us to create MAPI model |
If so, is it possible to create a branch for OMZ models in |
Right, and you're already checking |
I couldn't remember what I did before exactly. So, I looked back what I did. training_extensions/src/otx/core/model/base.py Lines 1004 to 1012 in e402a84
This test failure log shows that it can successfully parse label information from OMZ model such as _________________________ test_otx_ov_test[detection] __________________________
ov_recipe = '/home/vinnamki/otx/training_extensions/src/otx/recipe/detection/openvino_model.yaml'
tmp_path = PosixPath('/data/tmp/pytest-of-vinnamki/pytest-757/test_otx_ov_test_detection_0')
fxt_target_dataset_per_task = {'action_classification': 'tests/assets/action_classification_dataset/', 'action_detection': 'tests/assets/action_dete..., 'anomaly_classification': 'tests/assets/anomaly_hazelnut', 'anomaly_detection': 'tests/assets/anomaly_hazelnut', ...}
fxt_open_subprocess = False
@pytest.mark.parametrize(
"ov_recipe",
pytest.RECIPE_OV_LIST,
ids=lambda path: Path(path).parent.name,
)
def test_otx_ov_test(
ov_recipe: str,
tmp_path: Path,
fxt_target_dataset_per_task: dict,
fxt_open_subprocess: bool,
) -> None:
"""
Test OTX CLI e2e commands.
- 'otx test' with OV model
Args:
recipe (str): The OV recipe to use for testing. (eg. 'classification/openvino_model.yaml')
tmp_path (Path): The temporary path for storing the testing outputs.
Returns:
None
"""
task = ov_recipe.split("/")[-2]
model_name = ov_recipe.split("/")[-1].split(".")[0]
if task in [
"multi_label_cls",
"instance_segmentation",
"h_label_cls",
"visual_prompting",
"zero_shot_visual_prompting",
"anomaly_classification",
"anomaly_detection",
"anomaly_segmentation",
"action_classification",
]:
# OMZ doesn't have proper model for Pytorch MaskRCNN interface
# TODO(Kirill): Need to change this test when export enabled
pytest.skip("OMZ doesn't have proper model for these types of tasks.")
# otx test
tmp_path_test = tmp_path / f"otx_test_{task}_{model_name}"
command_cfg = [
"otx",
"test",
"--config",
ov_recipe,
"--data_root",
fxt_target_dataset_per_task[task],
"--work_dir",
str(tmp_path_test / "outputs"),
"--engine.device",
"cpu",
"--disable-infer-num-classes",
]
> run_main(command_cfg=command_cfg, open_subprocess=fxt_open_subprocess)
tests/integration/cli/test_cli.py:394:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/utils.py:18: in run_main
_run_main(command_cfg)
tests/utils.py:37: in _run_main
main()
src/otx/cli/__init__.py:17: in main
OTXCLI()
src/otx/cli/cli.py:60: in __init__
self.run()
src/otx/cli/cli.py:530: in run
fn(**fn_kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <otx.engine.engine.Engine object at 0x7c8c3e50cbd0>, checkpoint = None
datamodule = <otx.core.data.module.OTXDataModule object at 0x7c8c7a73bb10>
metric = None
kwargs = {'accumulate_grad_batches': 1, 'barebones': False, 'callbacks': [<lightning.pytorch.callbacks.progress.rich_progress.R...ng.pytorch.callbacks.rich_model_summary.RichModelSummary object at 0x7c8c3e27cb10>], 'check_val_every_n_epoch': 1, ...}
model = OVDetectionModel(), is_ir_ckpt = False
msg = "To launch a test pipeline, the label information should be same between the training and testing datasets. Please che...onitor']]), datamodule.label_info=LabelInfo(label_names=['car', 'tree', 'bug'], label_groups=[['car', 'tree', 'bug']])"
def test(
self,
checkpoint: PathLike | None = None,
datamodule: EVAL_DATALOADERS | OTXDataModule | None = None,
metric: MetricCallable | None = None,
**kwargs,
) -> dict:
"""Run the testing phase of the engine.
Args:
datamodule (EVAL_DATALOADERS | OTXDataModule | None, optional): The data module containing the test data.
checkpoint (PathLike | None, optional): Path to the checkpoint file to load the model from.
Defaults to None.
metric (MetricCallable | None): If not None, it will override `OTXModel.metric_callable` with the given
metric callable. It will temporarilly change the evaluation metric for the validation and test.
**kwargs: Additional keyword arguments for pl.Trainer configuration.
Returns:
dict: Dictionary containing the callback metrics from the trainer.
Example:
>>> engine.test(
... datamodule=OTXDataModule(),
... checkpoint=<checkpoint/path>,
... )
CLI Usage:
1. you can pick a model.
```python
otx test
--model <CONFIG | CLASS_PATH_OR_NAME> --data_root <DATASET_PATH, str>
--checkpoint <CKPT_PATH, str>
```
2. If you have a ready configuration file, run it like this.
```python
otx test --config <CONFIG_PATH, str> --checkpoint <CKPT_PATH, str>
```
"""
model = self.model
checkpoint = checkpoint if checkpoint is not None else self.checkpoint
datamodule = datamodule if datamodule is not None else self.datamodule
is_ir_ckpt = Path(str(checkpoint)).suffix in [".xml", ".onnx"]
if is_ir_ckpt and not isinstance(model, OVModel):
model = self._auto_configurator.get_ov_model(model_name=str(checkpoint), label_info=datamodule.label_info)
if self.device.accelerator != "cpu":
msg = "IR model supports inference only on CPU device. The device is changed automatic."
warn(msg, stacklevel=1)
self.device = DeviceType.cpu # type: ignore[assignment]
# NOTE: Re-initiate datamodule without tiling as model API supports its own tiling mechanism
if isinstance(model, OVModel):
datamodule = self._auto_configurator.update_ov_subset_pipeline(datamodule=datamodule, subset="test")
# NOTE, trainer.test takes only lightning based checkpoint.
# So, it can't take the OTX1.x checkpoint.
if checkpoint is not None and not is_ir_ckpt:
model_cls = self.model.__class__
model = model_cls.load_from_checkpoint(checkpoint_path=checkpoint)
if model.label_info != self.datamodule.label_info:
msg = (
"To launch a test pipeline, the label information should be same "
"between the training and testing datasets. "
"Please check whether you use the same dataset: "
f"model.label_info={model.label_info}, "
f"datamodule.label_info={self.datamodule.label_info}"
)
> raise ValueError(msg)
E ValueError: To launch a test pipeline, the label information should be same between the training and testing datasets. Please check whether you use the same dataset: model.label_info=LabelInfo(label_names=['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'], label_groups=[['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']]), datamodule.label_info=LabelInfo(label_names=['car', 'tree', 'bug'], label_groups=[['car', 'tree', 'bug']]) |
Summary
Also MAPI-related imports were refactored.