Skip to content

Commit

Permalink
Resolve "Errors Encountered While Following Tutorial #38"
Browse files Browse the repository at this point in the history
  • Loading branch information
piotr-bazan-nv committed Oct 15, 2024
1 parent e415c0a commit cd18055
Show file tree
Hide file tree
Showing 7 changed files with 48 additions and 67 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ limitations under the License.
- change: Remove PyTorch dependencies from `nav.profile`
- new: Introducing custom_args in TensorConfig for custom runners to handle -
enables dynamic shapes user setup for TorchTensorRT compilation
- fix: Exception is raised with Python >=3.11 due to wrong dataclass initialization

## 0.12.0

Expand Down
12 changes: 9 additions & 3 deletions model_navigator/reporting/optimize/base_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,10 @@
from rich.console import Console
from rich.table import Table

from model_navigator.reporting.optimize.events import OptimizeEvent, default_event_emitter
from model_navigator.reporting.optimize.events import (
OptimizeEvent,
default_event_emitter,
)


class BaseReport(ABC):
Expand All @@ -43,15 +46,18 @@ def __init__(self, event_emitter: Optional[EventEmitter] = None) -> None:
self.inplace_started = False
self.has_optimization_started = False
self.is_first_pipeline_command = False
self.console = Console(record=True)
self.console = Console(record=True, width=256) # specify width to prevent auto-width detection
self.listen_for_events()

def listen_for_events(self):
"""Register listener on events."""
self.emitter.on(OptimizeEvent.MODULE_REGISTERED, self.on_module_registered)
self.emitter.on(OptimizeEvent.MODULE_REGISTRY_CLEARED, self.on_registry_cleared)
self.emitter.on(OptimizeEvent.WORKSPACE_INITIALIZED, self.on_workspace_initialized)
self.emitter.on(OptimizeEvent.MODULE_PICKED_FOR_OPTIMIZATION, self.on_module_picked_for_optimization)
self.emitter.on(
OptimizeEvent.MODULE_PICKED_FOR_OPTIMIZATION,
self.on_module_picked_for_optimization,
)
self.emitter.on(OptimizeEvent.OPTIMIZATION_STARTED, self.on_optimization_started)
self.emitter.on(OptimizeEvent.OPTIMIZATION_FINISHED, self.on_optimization_finished)
self.emitter.on(OptimizeEvent.PIPELINE_STARTED, self.on_pipeline_started)
Expand Down
8 changes: 3 additions & 5 deletions model_navigator/reporting/profile/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,17 +38,15 @@ class Row:
"""Represents a row in the status table."""

runtime: str = ""
status: Text = Text()
status: Text = field(default_factory=Text)
results: List[ProfilingResult] = field(default_factory=list)
is_separator: bool = False


class SimpleReport:
"""Profile reports."""

def __init__(
self, show_results: bool = True, event_emitter: Optional[EventEmitter] = None, width: Optional[int] = None
) -> None:
def __init__(self, show_results: bool = True, event_emitter: Optional[EventEmitter] = None) -> None:
"""Initialized object.
Args:
Expand All @@ -68,7 +66,7 @@ def __init__(

self.table_data: List[Row] = []

self.console = Console(record=True, width=width)
self.console = Console(record=True, width=256) # specify width to prevent auto-width detection
self.listen_for_events()

def listen_for_events(self):
Expand Down
19 changes: 8 additions & 11 deletions tests/assets/reports/optimize/scenario_inplace_expected.txt
Original file line number Diff line number Diff line change
Expand Up @@ -182,14 +182,11 @@ model_c: Verifying model trt-fp32 on TensorRT backend OK
model_c: Verifying model trt-fp16 on TensorRT backend ...
model_c: Verifying model trt-fp16 on TensorRT backend OK
Optimization finished for all modules.
Optimization result for max throughput and min latency strategy
┏━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Module name ┃ Optimized backend ┃ Path ┃
┡━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ model_a │ torchscript-trace on │ /home/dev/.cache/model_navig… │
│ │ TorchScriptCUDA backend │ │
│ model_b │ torchscript-trace on │ /home/dev/.cache/model_navig… │
│ │ TorchScriptCUDA backend │ │
│ model_c │ torchscript-trace on │ /home/dev/.cache/model_navig… │
│ │ TorchScriptCUDA backend │ │
└─────────────┴────────────────────────────────┴───────────────────────────────┘
Optimization result for max throughput and min latency strategy
┏━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Module name ┃ Optimized backend ┃ Path ┃
┡━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ model_a │ torchscript-trace on TorchScriptCUDA backend │ /home/dev/.cache/model_navigator/model_a/0/torchscript-trace/model.pt │
│ model_b │ torchscript-trace on TorchScriptCUDA backend │ /home/dev/.cache/model_navigator/model_b/0/torchscript-trace/model.pt │
│ model_c │ torchscript-trace on TorchScriptCUDA backend │ /home/dev/.cache/model_navigator/model_c/0/torchscript-trace/model.pt │
└─────────────┴──────────────────────────────────────────────┴───────────────────────────────────────────────────────────────────────┘
55 changes: 20 additions & 35 deletions tests/assets/reports/optimize/scenario_jax_expected.txt
Original file line number Diff line number Diff line change
Expand Up @@ -33,15 +33,11 @@ Model: Building TensorRT engine trt-fp32 from ONNX model ...
Model: Building TensorRT engine trt-fp32 from ONNX model SKIPPED
Model: Building TensorRT engine trt-fp16 from ONNX model ...
Model: Building TensorRT engine trt-fp16 from ONNX model SKIPPED
Model: Validating model tf-savedmodel-jit-xla on TensorFlowSavedModelCUDA
backend ...
Model: Validating model tf-savedmodel-jit-xla on TensorFlowSavedModelCUDA
backend OK
Model: Validating model tf-savedmodel-jit on TensorFlowSavedModelCUDA backend
...
Model: Validating model tf-savedmodel-jit-xla on TensorFlowSavedModelCUDA backend ...
Model: Validating model tf-savedmodel-jit-xla on TensorFlowSavedModelCUDA backend OK
Model: Validating model tf-savedmodel-jit on TensorFlowSavedModelCUDA backend ...
Model: Validating model tf-savedmodel-jit on TensorFlowSavedModelCUDA backend OK
Model: Validating model tf-savedmodel-xla on TensorFlowSavedModelCUDA backend
...
Model: Validating model tf-savedmodel-xla on TensorFlowSavedModelCUDA backend ...
Model: Validating model tf-savedmodel-xla on TensorFlowSavedModelCUDA backend OK
Model: Validating model tf-savedmodel on TensorFlowSavedModelCUDA backend ...
Model: Validating model tf-savedmodel on TensorFlowSavedModelCUDA backend OK
Expand Down Expand Up @@ -77,18 +73,12 @@ Model: Validating model trt-fp32 on TensorRT backend ...
Model: Validating model trt-fp32 on TensorRT backend FAIL
Model: Validating model trt-fp16 on TensorRT backend ...
Model: Validating model trt-fp16 on TensorRT backend SKIPPED
Model: Benchmarking model tf-savedmodel-jit-xla on TensorFlowSavedModelCUDA
backend ...
Model: Benchmarking model tf-savedmodel-jit-xla on TensorFlowSavedModelCUDA
backend OK
Model: Benchmarking model tf-savedmodel-jit on TensorFlowSavedModelCUDA backend
...
Model: Benchmarking model tf-savedmodel-jit on TensorFlowSavedModelCUDA backend
OK
Model: Benchmarking model tf-savedmodel-xla on TensorFlowSavedModelCUDA backend
...
Model: Benchmarking model tf-savedmodel-xla on TensorFlowSavedModelCUDA backend
OK
Model: Benchmarking model tf-savedmodel-jit-xla on TensorFlowSavedModelCUDA backend ...
Model: Benchmarking model tf-savedmodel-jit-xla on TensorFlowSavedModelCUDA backend OK
Model: Benchmarking model tf-savedmodel-jit on TensorFlowSavedModelCUDA backend ...
Model: Benchmarking model tf-savedmodel-jit on TensorFlowSavedModelCUDA backend OK
Model: Benchmarking model tf-savedmodel-xla on TensorFlowSavedModelCUDA backend ...
Model: Benchmarking model tf-savedmodel-xla on TensorFlowSavedModelCUDA backend OK
Model: Benchmarking model tf-savedmodel on TensorFlowSavedModelCUDA backend ...
Model: Benchmarking model tf-savedmodel on TensorFlowSavedModelCUDA backend OK
Model: Benchmarking model onnx-jit-xla on OnnxCUDA backend ...
Expand Down Expand Up @@ -123,16 +113,12 @@ Model: Benchmarking model trt-fp32 on TensorRT backend ...
Model: Benchmarking model trt-fp32 on TensorRT backend FAIL
Model: Benchmarking model trt-fp16 on TensorRT backend ...
Model: Benchmarking model trt-fp16 on TensorRT backend SKIPPED
Model: Verifying model tf-savedmodel-jit-xla on TensorFlowSavedModelCUDA backend
...
Model: Verifying model tf-savedmodel-jit-xla on TensorFlowSavedModelCUDA backend
SKIPPED
Model: Verifying model tf-savedmodel-jit-xla on TensorFlowSavedModelCUDA backend ...
Model: Verifying model tf-savedmodel-jit-xla on TensorFlowSavedModelCUDA backend SKIPPED
Model: Verifying model tf-savedmodel-jit on TensorFlowSavedModelCUDA backend ...
Model: Verifying model tf-savedmodel-jit on TensorFlowSavedModelCUDA backend
SKIPPED
Model: Verifying model tf-savedmodel-jit on TensorFlowSavedModelCUDA backend SKIPPED
Model: Verifying model tf-savedmodel-xla on TensorFlowSavedModelCUDA backend ...
Model: Verifying model tf-savedmodel-xla on TensorFlowSavedModelCUDA backend
SKIPPED
Model: Verifying model tf-savedmodel-xla on TensorFlowSavedModelCUDA backend SKIPPED
Model: Verifying model tf-savedmodel on TensorFlowSavedModelCUDA backend ...
Model: Verifying model tf-savedmodel on TensorFlowSavedModelCUDA backend SKIPPED
Model: Verifying model onnx-jit-xla on OnnxCUDA backend ...
Expand Down Expand Up @@ -168,10 +154,9 @@ Model: Verifying model trt-fp32 on TensorRT backend SKIPPED
Model: Verifying model trt-fp16 on TensorRT backend ...
Model: Verifying model trt-fp16 on TensorRT backend SKIPPED
Optimization finished for the model.
Optimization result for max throughput and min latency strategy
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Optimized backend ┃ Path ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ tf-savedmodel on │ /workspace/navigator_workspace/tf-s… │
│ TensorFlowSavedModelCUDA backend │ │
└───────────────────────────────────────┴──────────────────────────────────────┘
Optimization result for max throughput and min latency strategy
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Optimized backend ┃ Path ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ tf-savedmodel on TensorFlowSavedModelCUDA backend │ /workspace/navigator_workspace/tf-savedmodel/model.savedmodel │
└───────────────────────────────────────────────────┴───────────────────────────────────────────────────────────────┘
18 changes: 6 additions & 12 deletions tests/assets/reports/optimize/scenario_torch_expected.txt
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,8 @@ Model: Validating model torchscript-script on TorchScriptCUDA backend ...
Model: Validating model torchscript-script on TorchScriptCUDA backend OK
Model: Validating model torchscript-trace on TorchScriptCUDA backend ...
Model: Validating model torchscript-trace on TorchScriptCUDA backend OK
Model: Validating model torch-exportedprogram on TorchExportedProgramCUDA
backend ...
Model: Validating model torch-exportedprogram on TorchExportedProgramCUDA
backend FAIL
Model: Validating model torch-exportedprogram on TorchExportedProgramCUDA backend ...
Model: Validating model torch-exportedprogram on TorchExportedProgramCUDA backend FAIL
Model: Validating model onnx-dynamo on OnnxCUDA backend ...
Model: Validating model onnx-dynamo on OnnxCUDA backend SKIPPED
Model: Validating model onnx-dynamo on OnnxTensorRT backend ...
Expand Down Expand Up @@ -89,10 +87,8 @@ Model: Benchmarking model torchscript-script on TorchScriptCUDA backend ...
Model: Benchmarking model torchscript-script on TorchScriptCUDA backend OK
Model: Benchmarking model torchscript-trace on TorchScriptCUDA backend ...
Model: Benchmarking model torchscript-trace on TorchScriptCUDA backend OK
Model: Benchmarking model torch-exportedprogram on TorchExportedProgramCUDA
backend ...
Model: Benchmarking model torch-exportedprogram on TorchExportedProgramCUDA
backend FAIL
Model: Benchmarking model torch-exportedprogram on TorchExportedProgramCUDA backend ...
Model: Benchmarking model torch-exportedprogram on TorchExportedProgramCUDA backend FAIL
Model: Benchmarking model onnx-dynamo on OnnxCUDA backend ...
Model: Benchmarking model onnx-dynamo on OnnxCUDA backend SKIPPED
Model: Benchmarking model onnx-dynamo on OnnxTensorRT backend ...
Expand Down Expand Up @@ -137,10 +133,8 @@ Model: Verifying model torchscript-script on TorchScriptCUDA backend ...
Model: Verifying model torchscript-script on TorchScriptCUDA backend OK
Model: Verifying model torchscript-trace on TorchScriptCUDA backend ...
Model: Verifying model torchscript-trace on TorchScriptCUDA backend OK
Model: Verifying model torch-exportedprogram on TorchExportedProgramCUDA backend
...
Model: Verifying model torch-exportedprogram on TorchExportedProgramCUDA backend
SKIPPED
Model: Verifying model torch-exportedprogram on TorchExportedProgramCUDA backend ...
Model: Verifying model torch-exportedprogram on TorchExportedProgramCUDA backend SKIPPED
Model: Verifying model onnx-dynamo on OnnxCUDA backend ...
Model: Verifying model onnx-dynamo on OnnxCUDA backend SKIPPED
Model: Verifying model onnx-dynamo on OnnxTensorRT backend ...
Expand Down
2 changes: 1 addition & 1 deletion tests/unit/base/test_reporting_profile.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def emit_events(event_emitter, filename):
@pytest.mark.parametrize("scenario_name", ["profile_with_results"])
def test_simple_report_with_results(scenario_name, mock_event_emitter): # noqa: F811
# given
report = SimpleReport(event_emitter=mock_event_emitter, width=200)
report = SimpleReport(event_emitter=mock_event_emitter)
# when
emit_events(mock_event_emitter, f"scenario_{scenario_name}.txt")
# then
Expand Down

0 comments on commit cd18055

Please sign in to comment.