From 5f879065e5864cbc9b0662759b1cea969af9df2c Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Fri, 2 Aug 2024 14:07:29 +0200 Subject: [PATCH 01/18] adding unit tests for vizro-ai dashboard --- .../vizro-ai/dashboard/_graph/conftest.py | 31 ++++++ .../_graph/test_dashboard_creation.py | 37 +++++++ .../dashboard/_response_models/conftest.py | 103 ++++++++++++++++++ .../_response_models/test_components.py | 27 +++++ .../_response_models/test_controls.py | 49 +++++++++ .../_response_models/test_dashboard.py | 18 +++ .../_response_models/test_df_info.py | 7 ++ .../dashboard/_response_models/test_layout.py | 43 ++++++++ .../dashboard/_response_models/test_page.py | 55 ++++++++++ .../tests/unit/vizro-ai/dashboard/conftest.py | 46 ++++++++ .../dashboard/test_pydantic_output.py | 28 +++++ 11 files changed, 444 insertions(+) create mode 100644 vizro-ai/tests/unit/vizro-ai/dashboard/_graph/conftest.py create mode 100644 vizro-ai/tests/unit/vizro-ai/dashboard/_graph/test_dashboard_creation.py create mode 100644 vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py create mode 100644 vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py create mode 100644 vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py create mode 100644 vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_dashboard.py create mode 100644 vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_df_info.py create mode 100644 vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py create mode 100644 vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py create mode 100644 vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py create mode 100644 vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_graph/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_graph/conftest.py new file mode 100644 index 000000000..28b14fa89 --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_graph/conftest.py @@ -0,0 +1,31 @@ +import pandas as pd +import pytest +from langchain_core.messages import HumanMessage +from vizro_ai.dashboard._graph.dashboard_creation import GraphState +from vizro_ai.dashboard.utils import AllDfMetadata, DfMetadata + + +@pytest.fixture +def dataframes(): + return [pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [4, 5, 6, 7, 8]})] + + +@pytest.fixture +def df_metadata(): + df_metadata = AllDfMetadata({}) + df_metadata.all_df_metadata["gdp_chart"] = DfMetadata( + df_schema={"a": "int64", "b": "int64"}, + df=pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [4, 5, 6, 7, 8]}), + df_sample=pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [4, 5, 6, 7, 8]}), + ) + return df_metadata + + +@pytest.fixture +def graph_state(dataframes, df_metadata): + return GraphState( + messages=[HumanMessage(content="contents of the message")], + dfs=dataframes, + all_df_metadata=df_metadata, + pages=[], + ) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_graph/test_dashboard_creation.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_graph/test_dashboard_creation.py new file mode 100644 index 000000000..e5bd38bd5 --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_graph/test_dashboard_creation.py @@ -0,0 +1,37 @@ +import pandas as pd +import pytest + +try: + from pydantic.v1 import ValidationError +except ImportError: # pragma: no cov + from pydantic import ValidationError + +from langchain_core.messages import HumanMessage +from vizro_ai.dashboard._graph.dashboard_creation import GraphState + + +class TestConfig: + """Test GraphState config creation.""" + + def test_graph_state_instantiation(self, graph_state, dataframes): + assert isinstance(graph_state, GraphState) + assert graph_state.messages[0].content == "contents of the message" + assert graph_state.dfs == dataframes + assert "gdp_chart" in graph_state.all_df_metadata.all_df_metadata + assert graph_state.pages == [] + + @pytest.mark.parametrize( + "dataframes, output_error", + [ + (pd.DataFrame(), "value is not a valid list"), + ([pd.DataFrame(), {}], "instance of DataFrame expected"), + ], + ) + def test_check_dataframes(self, dataframes, output_error, df_metadata): + with pytest.raises(ValidationError, match=output_error): + GraphState( + messages=[HumanMessage(content="contents of the message")], + dfs=dataframes, + all_df_metadata=df_metadata, + pages=[], + ) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py new file mode 100644 index 000000000..7533749c7 --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py @@ -0,0 +1,103 @@ +from typing import Any, List + +import pandas as pd +import pytest +from langchain.output_parsers import PydanticOutputParser +from langchain_community.llms.fake import FakeListLLM +from vizro_ai.dashboard._response_models.components import ComponentPlan +from vizro_ai.dashboard._response_models.page import PagePlan +from vizro_ai.dashboard.utils import AllDfMetadata, DfMetadata + + +class FakeListLLM(FakeListLLM): + def bind_tools(self, tools: List[Any]): + return super().bind(tools=tools) + + def with_structured_output(self, schema): + llm = self + output_parser = PydanticOutputParser(pydantic_object=schema) + return llm | output_parser + + +@pytest.fixture +def fake_llm_card(): + response = ['{"text":"this is a card","href":""}'] + return FakeListLLM(responses=response) + + +@pytest.fixture +def fake_llm_layout(): + response = ['{"grid":[[0,1]]}'] + return FakeListLLM(responses=response) + + +@pytest.fixture +def fake_llm_filter(): + response = ['{"column": "a", "targets": ["gdp_chart"]}'] + return FakeListLLM(responses=response) + + +@pytest.fixture +def df_cols(): + return ["continent", "country", "population", "gdp"] + + +@pytest.fixture +def controllable_components(): + return ["gdp_chart"] + + +@pytest.fixture +def layout_description(): + return "The layout of this page should use `grid=[[0,1]]`" + + +@pytest.fixture +def df(): + return pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [4, 5, 6, 7, 8]}) + + +@pytest.fixture +def df_sample(): + return pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [4, 5, 6, 7, 8]}) + + +@pytest.fixture +def df_schema(): + return {"a": "int64", "b": "int64"} + + +@pytest.fixture +def df_metadata(df, df_schema, df_sample): + df_metadata = AllDfMetadata({}) + df_metadata.all_df_metadata["gdp_chart"] = DfMetadata( + df_schema=df_schema, + df=df, + df_sample=df_sample, + ) + return df_metadata + + +@pytest.fixture +def component_card(): + return ComponentPlan( + component_type="Card", + component_description="This is a card", + component_id="card_1", + df_name="N/A", + ) + + +@pytest.fixture +def component_card_2(): + return ComponentPlan( + component_type="Card", + component_description="This is a second card", + component_id="card_2", + df_name="N/A", + ) + + +@pytest.fixture +def page_plan(component_card): + return PagePlan(title="Test Page", components_plan=[component_card], controls_plan=[], layout_plan=None) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py new file mode 100644 index 000000000..4f7cdc4be --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py @@ -0,0 +1,27 @@ +import pytest +from vizro_ai.dashboard._response_models.components import ComponentPlan + + +class TestComponentCreate: + """Tests component creation.""" + + def test_component_plan_instantiation(self): + component = ComponentPlan( + component_id="card_1", + component_type="Card", + component_description="This is a card", + df_name="N/A", + ) + assert component.component_id == "card_1" + assert component.component_type == "Card" + assert component.component_description == "This is a card" + assert component.df_name == "N/A" + + @pytest.mark.xfail(raises=ValueError, reason="Known issue: real model is required for .plot") + def test_card_create(self, component_card, fake_llm_card): + if component_card.component_type == "Card": + actual = component_card.create( + model=fake_llm_card, + all_df_metadata=None, + ) + assert actual.type == "card" diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py new file mode 100644 index 000000000..fe08e75ed --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py @@ -0,0 +1,49 @@ +import pytest +from vizro_ai.dashboard._response_models.controls import ControlPlan, _create_filter_proxy + +try: + from pydantic.v1 import ValidationError +except ImportError: # pragma: no cov + from pydantic import ValidationError + + +class TestControlCreate: + """Tests control creation.""" + + def test_create_filter_proxy_validate_targets(self, df_cols, df_schema, controllable_components): + actual = _create_filter_proxy(df_cols, df_schema, controllable_components) + with pytest.raises(ValidationError, match="targets must be one of"): + actual(targets=["population_chart"], column="gdp") + + def test_create_filter_proxy_validate_targets_not_empty(self, df_cols, df_schema, controllable_components): + actual = _create_filter_proxy(df_cols=df_cols, df_schema=df_schema, controllable_components=[]) + with pytest.raises(ValidationError): + actual(targets=[], column="gdp") + + def test_create_filter_proxy_validate_columns(self, df_cols, df_schema, controllable_components): + actual = _create_filter_proxy(df_cols, df_schema, controllable_components) + with pytest.raises(ValidationError, match="column must be one of"): + actual(targets=["gdp_chart"], column="x") + + +class TestControlPlan: + """Test control plan.""" + + def test_control_plan_invalid_df_name(self, fake_llm_filter, df_metadata): + control_plan = ControlPlan( + control_type="Filter", + control_description="Create a filter that filters the data based on the column 'a'.", + df_name="population_chart", + ) + default_control = control_plan.create( + model=fake_llm_filter, controllable_components=["gdp_chart"], all_df_metadata=df_metadata + ) + assert default_control is None + + def test_control_plan_invalid_type(self, fake_llm_filter, df_metadata): + with pytest.raises(ValidationError): + ControlPlan( + control_type="parameter", + control_description="Create a parameter that targets the data based on the column 'a'.", + df_name="gdp_chart", + ) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_dashboard.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_dashboard.py new file mode 100644 index 000000000..25b56eb9a --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_dashboard.py @@ -0,0 +1,18 @@ +from vizro_ai.dashboard._response_models.dashboard import DashboardPlan + + +class TestDashboardPlanner: + """Tests dashboard planner.""" + + def test_dashboard_planner(self, page_plan): + dashboard_plan = DashboardPlan( + title="Test Dashboard", + pages=[page_plan], + ) + assert dashboard_plan.pages[0].title == "Test Page" + assert dashboard_plan.pages[0].components_plan[0].component_id == "card_1" + assert dashboard_plan.pages[0].components_plan[0].component_type == "Card" + assert dashboard_plan.pages[0].components_plan[0].component_description == "This is a card" + assert dashboard_plan.pages[0].components_plan[0].df_name == "N/A" + assert dashboard_plan.pages[0].layout_plan is None + assert dashboard_plan.pages[0].controls_plan == [] diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_df_info.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_df_info.py new file mode 100644 index 000000000..e6eb82a4f --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_df_info.py @@ -0,0 +1,7 @@ +from vizro_ai.dashboard._response_models.df_info import _get_df_info + + +def test_get_df_info(df, df_schema): + actual_df_schema, _ = _get_df_info(df=df) + + assert actual_df_schema == df_schema diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py new file mode 100644 index 000000000..823887451 --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py @@ -0,0 +1,43 @@ +import pytest +import vizro.models as vm +from vizro_ai.dashboard._pydantic_output import _get_pydantic_model +from vizro_ai.dashboard._response_models.layout import LayoutPlan, _convert_to_grid + + +class TestLayoutPlan: + """Test layout creation.""" + + def test_structured_output_layout_create(self, fake_llm_layout, layout_description): + structured_output = _get_pydantic_model( + query=layout_description, llm_model=fake_llm_layout, response_model=vm.Layout, df_info=None + ) + assert structured_output.dict(exclude={"id": True}) == vm.Layout(grid=[[0, 1]]).dict(exclude={"id": True}) + + def test_layout_plan(self): + layout_plan = LayoutPlan( + layout_grid_template_areas=["graph card"], + ) + layout = layout_plan.create(["graph", "card"]) + + assert layout.dict(exclude={"id": True}) == vm.Layout(grid=[[0, 1]]).dict(exclude={"id": True}) + + +@pytest.mark.parametrize( + "layout_grid_template_areas, component_ids, grid", + [ + ( + ["card_1 scatter_plot scatter_plot", "card_2 scatter_plot scatter_plot"], + ["card_1", "scatter_plot", "card_2"], + [[0, 1, 1], [2, 1, 1]], + ), + ( + ["card_1 scatter_plot scatter_plot", "card_2 scatter_plot scatter_plot"], + ["card_1", "scatter_plot"], + [], + ), + ], +) +def test_convert_to_grid(layout_grid_template_areas, component_ids, grid): + actual_grid = _convert_to_grid(layout_grid_template_areas=layout_grid_template_areas, component_ids=component_ids) + + assert actual_grid == grid diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py new file mode 100644 index 000000000..e67dc821b --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py @@ -0,0 +1,55 @@ +import pytest +from vizro_ai.dashboard._response_models.page import PagePlan + +try: + from pydantic.v1 import ValidationError +except ImportError: # pragma: no cov + from pydantic import ValidationError + + +class TestPagePlan: + """Test for page plan.""" + + def test_dashboard_plan(self, component_card): + page_plan = PagePlan( + title="Test Page", + components_plan=[component_card], + controls_plan=[], + layout_plan=None, + ) + assert page_plan.title == "Test Page" + assert page_plan.components_plan[0].component_id == "card_1" + assert page_plan.components_plan[0].component_type == "Card" + assert page_plan.components_plan[0].component_description == "This is a card" + assert page_plan.layout_plan is None + assert page_plan.controls_plan == [] + assert page_plan.unsupported_specs == [] + + def test_page_plan_invalid_components(self): + with pytest.raises(ValidationError, match="A page must contain at least one component."): + PagePlan( + title="Test Page", + components_plan=[], + controls_plan=[], + layout_plan=None, + ) + + def test_page_plan_unsupported_specs(self, component_card): + page_plan = PagePlan( + title="Test Page", + components_plan=[component_card], + controls_plan=[], + layout_plan=None, + unsupported_specs=["Unknown"], + ) + + assert page_plan.unsupported_specs == [] + + def test_page_plan_duplicate_components(self, component_card): + with pytest.raises(ValidationError): + PagePlan( + title="Test Page", + components_plan=[component_card, component_card], + controls_plan=[], + layout_plan=None, + ) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py new file mode 100644 index 000000000..1ca1a2e1c --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py @@ -0,0 +1,46 @@ +from typing import Any, List + +import pytest +from langchain.output_parsers import PydanticOutputParser +from langchain_community.llms.fake import FakeListLLM +from langchain_core.messages import HumanMessage + + +class FakeListLLM(FakeListLLM): + def bind_tools(self, tools: List[Any]): + return super().bind(tools=tools) + + def with_structured_output(self, schema): + llm = self + output_parser = PydanticOutputParser(pydantic_object=schema) + return llm | output_parser + + +@pytest.fixture +def fake_llm(): + response = ['{"text":"this is a card","href":""}'] + return FakeListLLM(responses=response) + + +@pytest.fixture +def component_description(): + return "This is a card" + + +@pytest.fixture +def query(): + return "I need a page with one card saying: Simple card." + + +@pytest.fixture +def message_output_valid(): + return {"message": [HumanMessage(content="I need a page with one card saying: Simple card.")], "df_info": None} + + +@pytest.fixture +def message_output_error(): + return { + "message": [HumanMessage(content="I need a page with one card saying: Simple card.")], + "df_info": None, + "validation_error": "ValidationError", + } diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py new file mode 100644 index 000000000..ee34210fb --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py @@ -0,0 +1,28 @@ +import vizro.models as vm +from vizro_ai.dashboard._pydantic_output import _create_message_content, _create_prompt_template, _get_pydantic_model + + +def test_get_pydantic_output(component_description, fake_llm): + pydantic_output = _get_pydantic_model( + query=component_description, llm_model=fake_llm, response_model=vm.Card, df_info=None + ) + assert pydantic_output.dict(exclude={"id": True}) == vm.Card(text="this is a card", href="").dict( + exclude={"id": True} + ) + + +def test_create_message_content_valid(query, message_output_valid): + message_content = _create_message_content(query=query, df_info=None) + + assert message_content == message_output_valid + + +def test_create_message_content_error(query, message_output_error): + message_content = _create_message_content(query=query, df_info=None, validation_error="ValidationError", retry=True) + assert message_content == message_output_error + + +def test_create_prompt_template(): + additional_info = "Pay special attention to the following error: {validation_error}" + model = _create_prompt_template(additional_info) + assert additional_info in model.messages[0].prompt.template From a781ad0db4a35ef5dd59299924390050a3577efc Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Fri, 2 Aug 2024 14:46:07 +0200 Subject: [PATCH 02/18] adding changelog --- ...58_nadija_ratkusic_graca_add_unit_tests.md | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 vizro-ai/changelog.d/20240802_144558_nadija_ratkusic_graca_add_unit_tests.md diff --git a/vizro-ai/changelog.d/20240802_144558_nadija_ratkusic_graca_add_unit_tests.md b/vizro-ai/changelog.d/20240802_144558_nadija_ratkusic_graca_add_unit_tests.md new file mode 100644 index 000000000..f1f65e73c --- /dev/null +++ b/vizro-ai/changelog.d/20240802_144558_nadija_ratkusic_graca_add_unit_tests.md @@ -0,0 +1,48 @@ + + + + + + + + + From 97e7b357e97c59d9741dd7fb9ad3609738f9f6f3 Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Mon, 5 Aug 2024 17:25:17 +0200 Subject: [PATCH 03/18] addressing pr comments --- .../dashboard/_response_models/conftest.py | 30 +++++++++++++--- .../_response_models/test_controls.py | 34 +++++++++++++++++-- .../_response_models/test_df_info.py | 6 ++-- .../dashboard/_response_models/test_layout.py | 5 +++ .../tests/unit/vizro-ai/dashboard/conftest.py | 4 +-- .../dashboard/test_pydantic_output.py | 2 +- 6 files changed, 68 insertions(+), 13 deletions(-) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py index 7533749c7..2ac65ee70 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py @@ -9,7 +9,7 @@ from vizro_ai.dashboard.utils import AllDfMetadata, DfMetadata -class FakeListLLM(FakeListLLM): +class MockStructuredOutputLLM(FakeListLLM): def bind_tools(self, tools: List[Any]): return super().bind(tools=tools) @@ -22,19 +22,25 @@ def with_structured_output(self, schema): @pytest.fixture def fake_llm_card(): response = ['{"text":"this is a card","href":""}'] - return FakeListLLM(responses=response) + return MockStructuredOutputLLM(responses=response) @pytest.fixture def fake_llm_layout(): response = ['{"grid":[[0,1]]}'] - return FakeListLLM(responses=response) + return MockStructuredOutputLLM(responses=response) @pytest.fixture def fake_llm_filter(): response = ['{"column": "a", "targets": ["gdp_chart"]}'] - return FakeListLLM(responses=response) + return MockStructuredOutputLLM(responses=response) + + +@pytest.fixture +def fake_llm_filter_1(): + response = ['{"column": "country", "targets": ["gdp_chart"]}'] + return MockStructuredOutputLLM(responses=response) @pytest.fixture @@ -42,6 +48,11 @@ def df_cols(): return ["continent", "country", "population", "gdp"] +@pytest.fixture +def df_schema_1(): + return {"continent": "object", "country": "object", "population": "int64", "gdp": "int64"} + + @pytest.fixture def controllable_components(): return ["gdp_chart"] @@ -59,7 +70,8 @@ def df(): @pytest.fixture def df_sample(): - return pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [4, 5, 6, 7, 8]}) + df = pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [4, 5, 6, 7, 8]}) + return df.sample(5, replace=True, random_state=19) @pytest.fixture @@ -101,3 +113,11 @@ def component_card_2(): @pytest.fixture def page_plan(component_card): return PagePlan(title="Test Page", components_plan=[component_card], controls_plan=[], layout_plan=None) + + +@pytest.fixture +def filter_prompt(): + return """ + Create a filter from the following instructions: Filter the gdp chart by country. + Do not make up things that are optional and DO NOT configure actions, action triggers or action chains. + If no options are specified, leave them out.""" diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py index fe08e75ed..fc8a9e008 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py @@ -1,14 +1,20 @@ import pytest -from vizro_ai.dashboard._response_models.controls import ControlPlan, _create_filter_proxy +import vizro.models as vm +from vizro.managers import model_manager +from vizro.models import VizroBaseModel +from vizro_ai.dashboard._response_models.controls import ControlPlan, _create_filter, _create_filter_proxy try: from pydantic.v1 import ValidationError except ImportError: # pragma: no cov from pydantic import ValidationError +# Needed for testing control creation. +model_manager.__setitem__("gdp_chart", VizroBaseModel) -class TestControlCreate: - """Tests control creation.""" + +class TestFilterProxyCreate: + """Tests filter proxy creation.""" def test_create_filter_proxy_validate_targets(self, df_cols, df_schema, controllable_components): actual = _create_filter_proxy(df_cols, df_schema, controllable_components) @@ -25,6 +31,14 @@ def test_create_filter_proxy_validate_columns(self, df_cols, df_schema, controll with pytest.raises(ValidationError, match="column must be one of"): actual(targets=["gdp_chart"], column="x") + def test_create_filter_proxy(self, df_cols, df_schema, controllable_components): + actual = _create_filter_proxy(df_cols, df_schema, controllable_components) + actual_filter = actual(targets=["gdp_chart"], column="gdp") + + assert actual_filter.dict(exclude={"id": True}) == vm.Filter(targets=["gdp_chart"], column="gdp").dict( + exclude={"id": True} + ) + class TestControlPlan: """Test control plan.""" @@ -47,3 +61,17 @@ def test_control_plan_invalid_type(self, fake_llm_filter, df_metadata): control_description="Create a parameter that targets the data based on the column 'a'.", df_name="gdp_chart", ) + + +def test_create_filter(filter_prompt, fake_llm_filter_1, df_cols, df_schema_1, controllable_components): + + actual = _create_filter( + filter_prompt=filter_prompt, + model=fake_llm_filter_1, + df_cols=df_cols, + df_schema=df_schema_1, + controllable_components=controllable_components, + ) + assert actual.dict(exclude={"id": True}) == vm.Filter(targets=["gdp_chart"], column="country").dict( + exclude={"id": True} + ) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_df_info.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_df_info.py index e6eb82a4f..1483a270c 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_df_info.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_df_info.py @@ -1,7 +1,9 @@ +from pandas.testing import assert_frame_equal from vizro_ai.dashboard._response_models.df_info import _get_df_info -def test_get_df_info(df, df_schema): - actual_df_schema, _ = _get_df_info(df=df) +def test_get_df_info(df, df_schema, df_sample): + actual_df_schema, actual_df_sample = _get_df_info(df=df) assert actual_df_schema == df_schema + assert_frame_equal(actual_df_sample, df_sample) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py index 823887451..4156ebc06 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py @@ -35,6 +35,11 @@ def test_layout_plan(self): ["card_1", "scatter_plot"], [], ), + ( + ["card_1 scatter_plot scatter_plot", ". scatter_plot scatter_plot"], + ["card_1", "scatter_plot"], + [[0, 1, 1], [-1, 1, 1]], + ), ], ) def test_convert_to_grid(layout_grid_template_areas, component_ids, grid): diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py index 1ca1a2e1c..49344675d 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py @@ -6,7 +6,7 @@ from langchain_core.messages import HumanMessage -class FakeListLLM(FakeListLLM): +class MockStructuredOutputLLM(FakeListLLM): def bind_tools(self, tools: List[Any]): return super().bind(tools=tools) @@ -19,7 +19,7 @@ def with_structured_output(self, schema): @pytest.fixture def fake_llm(): response = ['{"text":"this is a card","href":""}'] - return FakeListLLM(responses=response) + return MockStructuredOutputLLM(responses=response) @pytest.fixture diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py index ee34210fb..008036ded 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py @@ -2,7 +2,7 @@ from vizro_ai.dashboard._pydantic_output import _create_message_content, _create_prompt_template, _get_pydantic_model -def test_get_pydantic_output(component_description, fake_llm): +def test_get_pydantic_model(component_description, fake_llm): pydantic_output = _get_pydantic_model( query=component_description, llm_model=fake_llm, response_model=vm.Card, df_info=None ) From 48789b9286f547050cddc66faf98ec4847a82825 Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Mon, 5 Aug 2024 17:42:26 +0200 Subject: [PATCH 04/18] consolidate and remove unused fixtures --- .../dashboard/_response_models/conftest.py | 32 ++++++----------- .../_response_models/test_controls.py | 35 +++++++++---------- 2 files changed, 27 insertions(+), 40 deletions(-) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py index 2ac65ee70..90a1b613a 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py @@ -33,29 +33,13 @@ def fake_llm_layout(): @pytest.fixture def fake_llm_filter(): - response = ['{"column": "a", "targets": ["gdp_chart"]}'] + response = ['{"column": "a", "targets": ["bar_chart"]}'] return MockStructuredOutputLLM(responses=response) -@pytest.fixture -def fake_llm_filter_1(): - response = ['{"column": "country", "targets": ["gdp_chart"]}'] - return MockStructuredOutputLLM(responses=response) - - -@pytest.fixture -def df_cols(): - return ["continent", "country", "population", "gdp"] - - -@pytest.fixture -def df_schema_1(): - return {"continent": "object", "country": "object", "population": "int64", "gdp": "int64"} - - @pytest.fixture def controllable_components(): - return ["gdp_chart"] + return ["bar_chart"] @pytest.fixture @@ -69,8 +53,12 @@ def df(): @pytest.fixture -def df_sample(): - df = pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [4, 5, 6, 7, 8]}) +def df_cols(): + return ["a", "b"] + + +@pytest.fixture +def df_sample(df): return df.sample(5, replace=True, random_state=19) @@ -82,7 +70,7 @@ def df_schema(): @pytest.fixture def df_metadata(df, df_schema, df_sample): df_metadata = AllDfMetadata({}) - df_metadata.all_df_metadata["gdp_chart"] = DfMetadata( + df_metadata.all_df_metadata["bar_chart"] = DfMetadata( df_schema=df_schema, df=df, df_sample=df_sample, @@ -118,6 +106,6 @@ def page_plan(component_card): @pytest.fixture def filter_prompt(): return """ - Create a filter from the following instructions: Filter the gdp chart by country. + Create a filter from the following instructions: Filter the bar chart by column `a`. Do not make up things that are optional and DO NOT configure actions, action triggers or action chains. If no options are specified, leave them out.""" diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py index fc8a9e008..60bf7c5c4 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py @@ -10,32 +10,32 @@ from pydantic import ValidationError # Needed for testing control creation. -model_manager.__setitem__("gdp_chart", VizroBaseModel) +model_manager.__setitem__("bar_chart", VizroBaseModel) class TestFilterProxyCreate: """Tests filter proxy creation.""" def test_create_filter_proxy_validate_targets(self, df_cols, df_schema, controllable_components): - actual = _create_filter_proxy(df_cols, df_schema, controllable_components) + filter_proxy = _create_filter_proxy(df_cols, df_schema, controllable_components) with pytest.raises(ValidationError, match="targets must be one of"): - actual(targets=["population_chart"], column="gdp") + filter_proxy(targets=["population_chart"], column="a") def test_create_filter_proxy_validate_targets_not_empty(self, df_cols, df_schema, controllable_components): - actual = _create_filter_proxy(df_cols=df_cols, df_schema=df_schema, controllable_components=[]) + filter_proxy = _create_filter_proxy(df_cols=df_cols, df_schema=df_schema, controllable_components=[]) with pytest.raises(ValidationError): - actual(targets=[], column="gdp") + filter_proxy(targets=[], column="a") def test_create_filter_proxy_validate_columns(self, df_cols, df_schema, controllable_components): - actual = _create_filter_proxy(df_cols, df_schema, controllable_components) + filter_proxy = _create_filter_proxy(df_cols, df_schema, controllable_components) with pytest.raises(ValidationError, match="column must be one of"): - actual(targets=["gdp_chart"], column="x") + filter_proxy(targets=["bar_chart"], column="x") def test_create_filter_proxy(self, df_cols, df_schema, controllable_components): - actual = _create_filter_proxy(df_cols, df_schema, controllable_components) - actual_filter = actual(targets=["gdp_chart"], column="gdp") + filter_proxy = _create_filter_proxy(df_cols, df_schema, controllable_components) + actual_filter = filter_proxy(targets=["bar_chart"], column="a") - assert actual_filter.dict(exclude={"id": True}) == vm.Filter(targets=["gdp_chart"], column="gdp").dict( + assert actual_filter.dict(exclude={"id": True}) == vm.Filter(targets=["bar_chart"], column="a").dict( exclude={"id": True} ) @@ -50,7 +50,7 @@ def test_control_plan_invalid_df_name(self, fake_llm_filter, df_metadata): df_name="population_chart", ) default_control = control_plan.create( - model=fake_llm_filter, controllable_components=["gdp_chart"], all_df_metadata=df_metadata + model=fake_llm_filter, controllable_components=["bar_chart"], all_df_metadata=df_metadata ) assert default_control is None @@ -59,19 +59,18 @@ def test_control_plan_invalid_type(self, fake_llm_filter, df_metadata): ControlPlan( control_type="parameter", control_description="Create a parameter that targets the data based on the column 'a'.", - df_name="gdp_chart", + df_name="bar_chart", ) -def test_create_filter(filter_prompt, fake_llm_filter_1, df_cols, df_schema_1, controllable_components): - - actual = _create_filter( +def test_create_filter(filter_prompt, fake_llm_filter, df_cols, df_schema, controllable_components): + actual_filter = _create_filter( filter_prompt=filter_prompt, - model=fake_llm_filter_1, + model=fake_llm_filter, df_cols=df_cols, - df_schema=df_schema_1, + df_schema=df_schema, controllable_components=controllable_components, ) - assert actual.dict(exclude={"id": True}) == vm.Filter(targets=["gdp_chart"], column="country").dict( + assert actual_filter.dict(exclude={"id": True}) == vm.Filter(targets=["bar_chart"], column="a").dict( exclude={"id": True} ) From 85714d345239f11cd406367f41fe53f0e64ecb2e Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Thu, 8 Aug 2024 10:50:31 +0200 Subject: [PATCH 05/18] addressing pr comments --- .../tests/unit/vizro-ai/dashboard/conftest.py | 6 ++++++ .../vizro-ai/dashboard/test_pydantic_output.py | 15 ++++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py index 49344675d..27d063803 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py @@ -44,3 +44,9 @@ def message_output_error(): "df_info": None, "validation_error": "ValidationError", } + + +@pytest.fixture +def fake_llm_invalid(): + response = ['{"text":"this is a card", "href": "", "icon": "summary"}'] + return MockStructuredOutputLLM(responses=response) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py index 008036ded..36c50a0f8 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py @@ -1,16 +1,25 @@ +import pytest import vizro.models as vm +from tests_utils.asserts import assert_component_equal from vizro_ai.dashboard._pydantic_output import _create_message_content, _create_prompt_template, _get_pydantic_model -def test_get_pydantic_model(component_description, fake_llm): +def test_get_pydantic_model_valid(component_description, fake_llm): pydantic_output = _get_pydantic_model( query=component_description, llm_model=fake_llm, response_model=vm.Card, df_info=None ) - assert pydantic_output.dict(exclude={"id": True}) == vm.Card(text="this is a card", href="").dict( - exclude={"id": True} + assert_component_equal( + pydantic_output.build(), vm.Card(text="this is a card", href="").build(), keys_to_strip={"id"} ) +def test_get_pydantic_model_invalid(component_description, fake_llm_invalid): + with pytest.raises(ValueError, match="1 validation error for Card"): + _get_pydantic_model( + query=component_description, llm_model=fake_llm_invalid, response_model=vm.Card, df_info=None + ) + + def test_create_message_content_valid(query, message_output_valid): message_content = _create_message_content(query=query, df_info=None) From 91d22bb7abca16d1f6e38b420c2efc993a8e1ecd Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Thu, 8 Aug 2024 14:49:39 +0200 Subject: [PATCH 06/18] addressing pr comments --- .../dashboard/_response_models/conftest.py | 6 ++++++ .../dashboard/_response_models/test_components.py | 15 +++++++++++++++ .../dashboard/_response_models/test_layout.py | 11 ++++++----- 3 files changed, 27 insertions(+), 5 deletions(-) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py index 90a1b613a..09aa0db66 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py @@ -2,6 +2,7 @@ import pandas as pd import pytest +import vizro.models as vm from langchain.output_parsers import PydanticOutputParser from langchain_community.llms.fake import FakeListLLM from vizro_ai.dashboard._response_models.components import ComponentPlan @@ -109,3 +110,8 @@ def filter_prompt(): Create a filter from the following instructions: Filter the bar chart by column `a`. Do not make up things that are optional and DO NOT configure actions, action triggers or action chains. If no options are specified, leave them out.""" + + +@pytest.fixture +def layout(): + return vm.Layout(grid=[[0, 1]]).build() diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py index 4f7cdc4be..a12311afe 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py @@ -1,4 +1,6 @@ import pytest +import vizro.models as vm +from tests_utils.asserts import assert_component_equal from vizro_ai.dashboard._response_models.components import ComponentPlan @@ -25,3 +27,16 @@ def test_card_create(self, component_card, fake_llm_card): all_df_metadata=None, ) assert actual.type == "card" + + +def test_card_create_valid(mocker, fake_llm_card, component_card, df): + mock_vizro_ai_call = mocker.patch("vizro_ai.VizroAI") + # Define the mock return value + mock_vizro_ai_call.return_value = vm.Card(text="This is card.") + + result = component_card.create( + model=fake_llm_card, + all_df_metadata=None, + ) + + assert_component_equal(result.build(), vm.Card(text="this is a card").build(), keys_to_strip={"id"}) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py index 4156ebc06..8d84d629e 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py @@ -1,5 +1,6 @@ import pytest import vizro.models as vm +from tests_utils.asserts import assert_component_equal from vizro_ai.dashboard._pydantic_output import _get_pydantic_model from vizro_ai.dashboard._response_models.layout import LayoutPlan, _convert_to_grid @@ -7,19 +8,19 @@ class TestLayoutPlan: """Test layout creation.""" - def test_structured_output_layout_create(self, fake_llm_layout, layout_description): + def test_structured_output_layout_create(self, fake_llm_layout, layout_description, layout): structured_output = _get_pydantic_model( query=layout_description, llm_model=fake_llm_layout, response_model=vm.Layout, df_info=None ) - assert structured_output.dict(exclude={"id": True}) == vm.Layout(grid=[[0, 1]]).dict(exclude={"id": True}) + assert_component_equal(structured_output.build(), layout, keys_to_strip={"id"}) - def test_layout_plan(self): + def test_layout_plan(self, layout): layout_plan = LayoutPlan( layout_grid_template_areas=["graph card"], ) - layout = layout_plan.create(["graph", "card"]) + actual_layout = layout_plan.create(["graph", "card"]) - assert layout.dict(exclude={"id": True}) == vm.Layout(grid=[[0, 1]]).dict(exclude={"id": True}) + assert_component_equal(actual_layout.build(), layout, keys_to_strip={"id"}) @pytest.mark.parametrize( From dac7a8462fa0167a6e504ee1e9e9ae4278742d1f Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Thu, 8 Aug 2024 16:40:40 +0200 Subject: [PATCH 07/18] reverting the use of assert_component_equal --- .../unit/vizro-ai/dashboard/_response_models/conftest.py | 2 +- .../vizro-ai/dashboard/_response_models/test_components.py | 3 +-- .../unit/vizro-ai/dashboard/_response_models/test_layout.py | 5 ++--- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py index 09aa0db66..2bfc37e81 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py @@ -114,4 +114,4 @@ def filter_prompt(): @pytest.fixture def layout(): - return vm.Layout(grid=[[0, 1]]).build() + return vm.Layout(grid=[[0, 1]]) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py index a12311afe..99f70d548 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py @@ -1,6 +1,5 @@ import pytest import vizro.models as vm -from tests_utils.asserts import assert_component_equal from vizro_ai.dashboard._response_models.components import ComponentPlan @@ -39,4 +38,4 @@ def test_card_create_valid(mocker, fake_llm_card, component_card, df): all_df_metadata=None, ) - assert_component_equal(result.build(), vm.Card(text="this is a card").build(), keys_to_strip={"id"}) + assert result.dict(exclude={"id": True}) == vm.Card(text="this is a card").dict(exclude={"id": True}) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py index 8d84d629e..cf254e9b9 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py @@ -1,6 +1,5 @@ import pytest import vizro.models as vm -from tests_utils.asserts import assert_component_equal from vizro_ai.dashboard._pydantic_output import _get_pydantic_model from vizro_ai.dashboard._response_models.layout import LayoutPlan, _convert_to_grid @@ -12,7 +11,7 @@ def test_structured_output_layout_create(self, fake_llm_layout, layout_descripti structured_output = _get_pydantic_model( query=layout_description, llm_model=fake_llm_layout, response_model=vm.Layout, df_info=None ) - assert_component_equal(structured_output.build(), layout, keys_to_strip={"id"}) + assert structured_output.dict(exclude={"id": True}) == layout.dict(exclude={"id": True}) def test_layout_plan(self, layout): layout_plan = LayoutPlan( @@ -20,7 +19,7 @@ def test_layout_plan(self, layout): ) actual_layout = layout_plan.create(["graph", "card"]) - assert_component_equal(actual_layout.build(), layout, keys_to_strip={"id"}) + assert actual_layout.dict(exclude={"id": True}) == layout.dict(exclude={"id": True}) @pytest.mark.parametrize( From 768bead0a46dde7bfc23c0a9215e470ef77446d4 Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Thu, 8 Aug 2024 16:47:06 +0200 Subject: [PATCH 08/18] fixing tests --- .../tests/unit/vizro-ai/dashboard/test_pydantic_output.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py index 36c50a0f8..1f8444176 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py @@ -1,6 +1,5 @@ import pytest import vizro.models as vm -from tests_utils.asserts import assert_component_equal from vizro_ai.dashboard._pydantic_output import _create_message_content, _create_prompt_template, _get_pydantic_model @@ -8,8 +7,8 @@ def test_get_pydantic_model_valid(component_description, fake_llm): pydantic_output = _get_pydantic_model( query=component_description, llm_model=fake_llm, response_model=vm.Card, df_info=None ) - assert_component_equal( - pydantic_output.build(), vm.Card(text="this is a card", href="").build(), keys_to_strip={"id"} + assert pydantic_output.dict(exclude={"id": True}) == vm.Card(text="this is a card", href="").dict( + exclude={"id": True} ) From 017f1f1fba88ea3c6254ad10f0ffd3a3bf996976 Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Thu, 8 Aug 2024 16:55:05 +0200 Subject: [PATCH 09/18] adding pytest mock --- vizro-ai/hatch.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/vizro-ai/hatch.toml b/vizro-ai/hatch.toml index 258e6c479..d3fa9feba 100644 --- a/vizro-ai/hatch.toml +++ b/vizro-ai/hatch.toml @@ -13,6 +13,7 @@ dependencies = [ "devtools[pygments]", "coverage[toml]>=6.5", "pytest", + "pytest-mock", "toml", "nbformat>=4.2.0", "pyhamcrest", From 0853de451ad699f46bd287166cc963953f932b20 Mon Sep 17 00:00:00 2001 From: Maximilian Schulz Date: Wed, 14 Aug 2024 11:56:06 +0200 Subject: [PATCH 10/18] Mocking example --- .../dashboard/_response_models/conftest.py | 21 +++++++++++++++++++ .../_response_models/test_components.py | 16 ++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py index 2bfc37e81..f95a1f6d5 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py @@ -8,6 +8,7 @@ from vizro_ai.dashboard._response_models.components import ComponentPlan from vizro_ai.dashboard._response_models.page import PagePlan from vizro_ai.dashboard.utils import AllDfMetadata, DfMetadata +import vizro.plotly.express as px class MockStructuredOutputLLM(FakeListLLM): @@ -89,6 +90,26 @@ def component_card(): ) + +@pytest.fixture +def component_plan_graph(): + return ComponentPlan( + component_type="Graph", + component_description="Scatter chart with x-axis as 'a' and y-axis as 'b'", + component_id="graph_1", + df_name="bar_chart", + ) + +@pytest.fixture +def mock_vizro_ai_return(df): + return px.scatter( + data_frame=df, + x="a", + y="b", + ) + + + @pytest.fixture def component_card_2(): return ComponentPlan( diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py index 99f70d548..9cd75b5c3 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py @@ -26,6 +26,22 @@ def test_card_create(self, component_card, fake_llm_card): all_df_metadata=None, ) assert actual.type == "card" + + def test_graph_create_valid(self, mocker,component_plan_graph, mock_vizro_ai_return,fake_llm,df_metadata, df): + mock_vizro_ai_object = mocker.patch("vizro_ai.VizroAI.__init__") + mock_vizro_ai_object.return_value = None + mock_vizro_ai_call = mocker.patch("vizro_ai.VizroAI.plot") + mock_vizro_ai_call.return_value = mock_vizro_ai_return + result = component_plan_graph.create( + model=None, + all_df_metadata=df_metadata, + ) + expected = vm.Graph( + id = "mock_id", + figure = mock_vizro_ai_return + ) + assert result.dict(exclude={"id": True}) == expected.dict(exclude={"id": True}) #TODO: this should really be assert_components_equal + def test_card_create_valid(mocker, fake_llm_card, component_card, df): From 7ff9138e12c0bce227f2004e68c298a7ae5624f1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 14 Aug 2024 10:00:40 +0000 Subject: [PATCH 11/18] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../dashboard/_response_models/conftest.py | 5 ++--- .../dashboard/_response_models/test_components.py | 14 ++++++-------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py index f95a1f6d5..00dfd9d84 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py @@ -3,12 +3,12 @@ import pandas as pd import pytest import vizro.models as vm +import vizro.plotly.express as px from langchain.output_parsers import PydanticOutputParser from langchain_community.llms.fake import FakeListLLM from vizro_ai.dashboard._response_models.components import ComponentPlan from vizro_ai.dashboard._response_models.page import PagePlan from vizro_ai.dashboard.utils import AllDfMetadata, DfMetadata -import vizro.plotly.express as px class MockStructuredOutputLLM(FakeListLLM): @@ -90,7 +90,6 @@ def component_card(): ) - @pytest.fixture def component_plan_graph(): return ComponentPlan( @@ -100,6 +99,7 @@ def component_plan_graph(): df_name="bar_chart", ) + @pytest.fixture def mock_vizro_ai_return(df): return px.scatter( @@ -109,7 +109,6 @@ def mock_vizro_ai_return(df): ) - @pytest.fixture def component_card_2(): return ComponentPlan( diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py index 9cd75b5c3..b4397e572 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py @@ -26,8 +26,8 @@ def test_card_create(self, component_card, fake_llm_card): all_df_metadata=None, ) assert actual.type == "card" - - def test_graph_create_valid(self, mocker,component_plan_graph, mock_vizro_ai_return,fake_llm,df_metadata, df): + + def test_graph_create_valid(self, mocker, component_plan_graph, mock_vizro_ai_return, fake_llm, df_metadata, df): mock_vizro_ai_object = mocker.patch("vizro_ai.VizroAI.__init__") mock_vizro_ai_object.return_value = None mock_vizro_ai_call = mocker.patch("vizro_ai.VizroAI.plot") @@ -36,12 +36,10 @@ def test_graph_create_valid(self, mocker,component_plan_graph, mock_vizro_ai_ret model=None, all_df_metadata=df_metadata, ) - expected = vm.Graph( - id = "mock_id", - figure = mock_vizro_ai_return - ) - assert result.dict(exclude={"id": True}) == expected.dict(exclude={"id": True}) #TODO: this should really be assert_components_equal - + expected = vm.Graph(id="mock_id", figure=mock_vizro_ai_return) + assert result.dict(exclude={"id": True}) == expected.dict( + exclude={"id": True} + ) # TODO: this should really be assert_components_equal def test_card_create_valid(mocker, fake_llm_card, component_card, df): From 405fa05edb20729642f41f918bcc6b3159eaeffb Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Thu, 15 Aug 2024 14:54:53 +0200 Subject: [PATCH 12/18] addressing pr comments --- vizro-ai/pyproject.toml | 2 + vizro-ai/tests/conftest.py | 5 +++ .../dashboard/_response_models/conftest.py | 27 +++++++++-- .../_response_models/test_components.py | 45 ++++++++++++------- .../_response_models/test_controls.py | 8 ++-- .../dashboard/_response_models/test_layout.py | 10 ++--- .../dashboard/_response_models/test_page.py | 12 ++--- .../tests/unit/vizro-ai/dashboard/conftest.py | 6 ++- .../dashboard/test_pydantic_output.py | 10 ++--- 9 files changed, 84 insertions(+), 41 deletions(-) create mode 100644 vizro-ai/tests/conftest.py diff --git a/vizro-ai/pyproject.toml b/vizro-ai/pyproject.toml index d6c027b84..047cae64f 100644 --- a/vizro-ai/pyproject.toml +++ b/vizro-ai/pyproject.toml @@ -63,3 +63,5 @@ filterwarnings = [ # Ignore LLMchian deprecation warning: "ignore:.*The class `LLMChain` was deprecated in LangChain 0.1.17" ] +norecursedirs = ["tests/tests_utils"] +pythonpath = ["tests/tests_utils"] diff --git a/vizro-ai/tests/conftest.py b/vizro-ai/tests/conftest.py new file mode 100644 index 000000000..35ee95331 --- /dev/null +++ b/vizro-ai/tests/conftest.py @@ -0,0 +1,5 @@ +import pytest + +# Allow our custom assert functions in tests_utils/asserts.py to do introspection nicely still. +# See https://pytest.org/en/7.4.x/how-to/assert.html#assertion-introspection-details +pytest.register_assert_rewrite("asserts") diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py index 00dfd9d84..d9983b014 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py @@ -6,6 +6,7 @@ import vizro.plotly.express as px from langchain.output_parsers import PydanticOutputParser from langchain_community.llms.fake import FakeListLLM +from vizro.tables import dash_ag_grid from vizro_ai.dashboard._response_models.components import ComponentPlan from vizro_ai.dashboard._response_models.page import PagePlan from vizro_ai.dashboard.utils import AllDfMetadata, DfMetadata @@ -81,7 +82,7 @@ def df_metadata(df, df_schema, df_sample): @pytest.fixture -def component_card(): +def component_plan_card(): return ComponentPlan( component_type="Card", component_description="This is a card", @@ -100,6 +101,16 @@ def component_plan_graph(): ) +@pytest.fixture +def component_plan_ag_grid(): + return ComponentPlan( + component_type="AgGrid", + component_description="Ag grid showing columns 'a' and 'b' of dataframe", + component_id="ag_grid_1", + df_name="bar_chart", + ) + + @pytest.fixture def mock_vizro_ai_return(df): return px.scatter( @@ -109,6 +120,11 @@ def mock_vizro_ai_return(df): ) +@pytest.fixture +def mock_vizro_ai_return_ag_grid(df): + return dash_ag_grid(data_frame=df) + + @pytest.fixture def component_card_2(): return ComponentPlan( @@ -120,8 +136,8 @@ def component_card_2(): @pytest.fixture -def page_plan(component_card): - return PagePlan(title="Test Page", components_plan=[component_card], controls_plan=[], layout_plan=None) +def page_plan(component_plan_card): + return PagePlan(title="Test Page", components_plan=[component_plan_card], controls_plan=[], layout_plan=None) @pytest.fixture @@ -135,3 +151,8 @@ def filter_prompt(): @pytest.fixture def layout(): return vm.Layout(grid=[[0, 1]]) + + +@pytest.fixture +def expected_filter(): + return vm.Filter(targets=["bar_chart"], column="a") diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py index b4397e572..b44d37a0d 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py @@ -1,5 +1,6 @@ import pytest import vizro.models as vm +from tests_utils.asserts import assert_component_equal from vizro_ai.dashboard._response_models.components import ComponentPlan @@ -19,15 +20,15 @@ def test_component_plan_instantiation(self): assert component.df_name == "N/A" @pytest.mark.xfail(raises=ValueError, reason="Known issue: real model is required for .plot") - def test_card_create(self, component_card, fake_llm_card): - if component_card.component_type == "Card": - actual = component_card.create( + def test_card_create(self, component_plan_card, fake_llm_card): + if component_plan_card.component_type == "Card": + actual = component_plan_card.create( model=fake_llm_card, all_df_metadata=None, ) assert actual.type == "card" - def test_graph_create_valid(self, mocker, component_plan_graph, mock_vizro_ai_return, fake_llm, df_metadata, df): + def test_graph_create_valid(self, mocker, component_plan_graph, mock_vizro_ai_return, df_metadata): mock_vizro_ai_object = mocker.patch("vizro_ai.VizroAI.__init__") mock_vizro_ai_object.return_value = None mock_vizro_ai_call = mocker.patch("vizro_ai.VizroAI.plot") @@ -37,19 +38,31 @@ def test_graph_create_valid(self, mocker, component_plan_graph, mock_vizro_ai_re all_df_metadata=df_metadata, ) expected = vm.Graph(id="mock_id", figure=mock_vizro_ai_return) - assert result.dict(exclude={"id": True}) == expected.dict( - exclude={"id": True} - ) # TODO: this should really be assert_components_equal + assert_component_equal(result.build(), expected.build(), keys_to_strip={"id"}) -def test_card_create_valid(mocker, fake_llm_card, component_card, df): - mock_vizro_ai_call = mocker.patch("vizro_ai.VizroAI") - # Define the mock return value - mock_vizro_ai_call.return_value = vm.Card(text="This is card.") + def test_card_create_valid(self, mocker, fake_llm_card, component_plan_card, expected_card): + mock_vizro_ai_call = mocker.patch("vizro_ai.VizroAI") + # Define the mock return value + mock_vizro_ai_call.return_value = vm.Card(text="This is card.") - result = component_card.create( - model=fake_llm_card, - all_df_metadata=None, - ) + result = component_plan_card.create( + model=fake_llm_card, + all_df_metadata=None, + ) + + assert_component_equal(result.build(), expected_card.build(), keys_to_strip={"id"}) + + def test_ag_grid_create_valid( + self, + component_plan_ag_grid, + mock_vizro_ai_return_ag_grid, + df_metadata, + ): + result = component_plan_ag_grid.create( + model=None, + all_df_metadata=df_metadata, + ) + expected = vm.AgGrid(id="mock", figure=mock_vizro_ai_return_ag_grid) - assert result.dict(exclude={"id": True}) == vm.Card(text="this is a card").dict(exclude={"id": True}) + assert result.dict(exclude={"id": True, "figure": True}) == expected.dict(exclude={"id": True, "figure": True}) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py index 60bf7c5c4..46ee38de2 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py @@ -31,13 +31,11 @@ def test_create_filter_proxy_validate_columns(self, df_cols, df_schema, controll with pytest.raises(ValidationError, match="column must be one of"): filter_proxy(targets=["bar_chart"], column="x") - def test_create_filter_proxy(self, df_cols, df_schema, controllable_components): + def test_create_filter_proxy(self, df_cols, df_schema, controllable_components, expected_filter): filter_proxy = _create_filter_proxy(df_cols, df_schema, controllable_components) - actual_filter = filter_proxy(targets=["bar_chart"], column="a") + result = filter_proxy(targets=["bar_chart"], column="a") - assert actual_filter.dict(exclude={"id": True}) == vm.Filter(targets=["bar_chart"], column="a").dict( - exclude={"id": True} - ) + assert result.dict(exclude={"id": True}) == expected_filter.dict(exclude={"id": True}) class TestControlPlan: diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py index cf254e9b9..ac8556ed2 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py @@ -1,5 +1,6 @@ import pytest import vizro.models as vm +from tests_utils.asserts import assert_component_equal from vizro_ai.dashboard._pydantic_output import _get_pydantic_model from vizro_ai.dashboard._response_models.layout import LayoutPlan, _convert_to_grid @@ -8,18 +9,17 @@ class TestLayoutPlan: """Test layout creation.""" def test_structured_output_layout_create(self, fake_llm_layout, layout_description, layout): - structured_output = _get_pydantic_model( + result = _get_pydantic_model( query=layout_description, llm_model=fake_llm_layout, response_model=vm.Layout, df_info=None ) - assert structured_output.dict(exclude={"id": True}) == layout.dict(exclude={"id": True}) + assert_component_equal(result.build(), layout.build(), keys_to_strip={"id"}) def test_layout_plan(self, layout): layout_plan = LayoutPlan( layout_grid_template_areas=["graph card"], ) - actual_layout = layout_plan.create(["graph", "card"]) - - assert actual_layout.dict(exclude={"id": True}) == layout.dict(exclude={"id": True}) + result = layout_plan.create(["graph", "card"]) + assert_component_equal(result.build(), layout.build(), keys_to_strip={"id"}) @pytest.mark.parametrize( diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py index e67dc821b..242dc88d0 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py @@ -10,10 +10,10 @@ class TestPagePlan: """Test for page plan.""" - def test_dashboard_plan(self, component_card): + def test_dashboard_plan(self, component_plan_card): page_plan = PagePlan( title="Test Page", - components_plan=[component_card], + components_plan=[component_plan_card], controls_plan=[], layout_plan=None, ) @@ -34,10 +34,10 @@ def test_page_plan_invalid_components(self): layout_plan=None, ) - def test_page_plan_unsupported_specs(self, component_card): + def test_page_plan_unsupported_specs(self, component_plan_card): page_plan = PagePlan( title="Test Page", - components_plan=[component_card], + components_plan=[component_plan_card], controls_plan=[], layout_plan=None, unsupported_specs=["Unknown"], @@ -45,11 +45,11 @@ def test_page_plan_unsupported_specs(self, component_card): assert page_plan.unsupported_specs == [] - def test_page_plan_duplicate_components(self, component_card): + def test_page_plan_duplicate_components(self, component_plan_card): with pytest.raises(ValidationError): PagePlan( title="Test Page", - components_plan=[component_card, component_card], + components_plan=[component_plan_card, component_plan_card], controls_plan=[], layout_plan=None, ) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py index 27d063803..5f6ab21d8 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py @@ -1,5 +1,5 @@ from typing import Any, List - +import vizro.models as vm import pytest from langchain.output_parsers import PydanticOutputParser from langchain_community.llms.fake import FakeListLLM @@ -26,6 +26,10 @@ def fake_llm(): def component_description(): return "This is a card" +@pytest.fixture +def expected_card(): + return vm.Card(text="this is a card") + @pytest.fixture def query(): diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py index 1f8444176..870d463f8 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py @@ -1,15 +1,15 @@ import pytest import vizro.models as vm +from tests_utils.asserts import assert_component_equal from vizro_ai.dashboard._pydantic_output import _create_message_content, _create_prompt_template, _get_pydantic_model -def test_get_pydantic_model_valid(component_description, fake_llm): - pydantic_output = _get_pydantic_model( +def test_get_pydantic_model_valid(component_description, fake_llm, expected_card): + result = _get_pydantic_model( query=component_description, llm_model=fake_llm, response_model=vm.Card, df_info=None ) - assert pydantic_output.dict(exclude={"id": True}) == vm.Card(text="this is a card", href="").dict( - exclude={"id": True} - ) + + assert_component_equal(result.build(), expected_card.build(), keys_to_strip={"id"}) def test_get_pydantic_model_invalid(component_description, fake_llm_invalid): From 10022bc53b9668d9805ea5b3f6c3a9abcef94664 Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Thu, 15 Aug 2024 14:56:16 +0200 Subject: [PATCH 13/18] linting --- vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py | 4 +++- .../tests/unit/vizro-ai/dashboard/test_pydantic_output.py | 4 +--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py index 5f6ab21d8..d370c1830 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py @@ -1,6 +1,7 @@ from typing import Any, List -import vizro.models as vm + import pytest +import vizro.models as vm from langchain.output_parsers import PydanticOutputParser from langchain_community.llms.fake import FakeListLLM from langchain_core.messages import HumanMessage @@ -26,6 +27,7 @@ def fake_llm(): def component_description(): return "This is a card" + @pytest.fixture def expected_card(): return vm.Card(text="this is a card") diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py index 870d463f8..ae0e44ec7 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py @@ -5,9 +5,7 @@ def test_get_pydantic_model_valid(component_description, fake_llm, expected_card): - result = _get_pydantic_model( - query=component_description, llm_model=fake_llm, response_model=vm.Card, df_info=None - ) + result = _get_pydantic_model(query=component_description, llm_model=fake_llm, response_model=vm.Card, df_info=None) assert_component_equal(result.build(), expected_card.build(), keys_to_strip={"id"}) From 32236dc8f9591daa6b5d3542be08e537ee91a3a3 Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Fri, 16 Aug 2024 11:27:47 +0200 Subject: [PATCH 14/18] reverting use of asserts_component_equal --- vizro-ai/pyproject.toml | 2 -- vizro-ai/tests/conftest.py | 5 ----- .../dashboard/_response_models/test_components.py | 9 ++++++--- .../vizro-ai/dashboard/_response_models/test_layout.py | 5 ++--- .../unit/vizro-ai/dashboard/test_pydantic_output.py | 5 +++-- 5 files changed, 11 insertions(+), 15 deletions(-) delete mode 100644 vizro-ai/tests/conftest.py diff --git a/vizro-ai/pyproject.toml b/vizro-ai/pyproject.toml index 047cae64f..d6c027b84 100644 --- a/vizro-ai/pyproject.toml +++ b/vizro-ai/pyproject.toml @@ -63,5 +63,3 @@ filterwarnings = [ # Ignore LLMchian deprecation warning: "ignore:.*The class `LLMChain` was deprecated in LangChain 0.1.17" ] -norecursedirs = ["tests/tests_utils"] -pythonpath = ["tests/tests_utils"] diff --git a/vizro-ai/tests/conftest.py b/vizro-ai/tests/conftest.py deleted file mode 100644 index 35ee95331..000000000 --- a/vizro-ai/tests/conftest.py +++ /dev/null @@ -1,5 +0,0 @@ -import pytest - -# Allow our custom assert functions in tests_utils/asserts.py to do introspection nicely still. -# See https://pytest.org/en/7.4.x/how-to/assert.html#assertion-introspection-details -pytest.register_assert_rewrite("asserts") diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py index b44d37a0d..322108dbc 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py @@ -1,6 +1,5 @@ import pytest import vizro.models as vm -from tests_utils.asserts import assert_component_equal from vizro_ai.dashboard._response_models.components import ComponentPlan @@ -39,7 +38,9 @@ def test_graph_create_valid(self, mocker, component_plan_graph, mock_vizro_ai_re ) expected = vm.Graph(id="mock_id", figure=mock_vizro_ai_return) - assert_component_equal(result.build(), expected.build(), keys_to_strip={"id"}) + assert result.dict(exclude={"id": True}) == expected.dict( + exclude={"id": True} + ) def test_card_create_valid(self, mocker, fake_llm_card, component_plan_card, expected_card): mock_vizro_ai_call = mocker.patch("vizro_ai.VizroAI") @@ -51,7 +52,9 @@ def test_card_create_valid(self, mocker, fake_llm_card, component_plan_card, exp all_df_metadata=None, ) - assert_component_equal(result.build(), expected_card.build(), keys_to_strip={"id"}) + assert result.dict(exclude={"id": True}) == expected_card.dict( + exclude={"id": True} + ) def test_ag_grid_create_valid( self, diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py index ac8556ed2..435d70ac8 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py @@ -1,6 +1,5 @@ import pytest import vizro.models as vm -from tests_utils.asserts import assert_component_equal from vizro_ai.dashboard._pydantic_output import _get_pydantic_model from vizro_ai.dashboard._response_models.layout import LayoutPlan, _convert_to_grid @@ -12,14 +11,14 @@ def test_structured_output_layout_create(self, fake_llm_layout, layout_descripti result = _get_pydantic_model( query=layout_description, llm_model=fake_llm_layout, response_model=vm.Layout, df_info=None ) - assert_component_equal(result.build(), layout.build(), keys_to_strip={"id"}) + assert result.dict(exclude={"id": True}) == layout.dict(exclude={"id": True}) def test_layout_plan(self, layout): layout_plan = LayoutPlan( layout_grid_template_areas=["graph card"], ) result = layout_plan.create(["graph", "card"]) - assert_component_equal(result.build(), layout.build(), keys_to_strip={"id"}) + assert result.dict(exclude={"id": True}) == layout.dict(exclude={"id": True}) @pytest.mark.parametrize( diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py index ae0e44ec7..df9b14e0b 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py @@ -1,13 +1,14 @@ import pytest import vizro.models as vm -from tests_utils.asserts import assert_component_equal from vizro_ai.dashboard._pydantic_output import _create_message_content, _create_prompt_template, _get_pydantic_model def test_get_pydantic_model_valid(component_description, fake_llm, expected_card): result = _get_pydantic_model(query=component_description, llm_model=fake_llm, response_model=vm.Card, df_info=None) - assert_component_equal(result.build(), expected_card.build(), keys_to_strip={"id"}) + assert result.dict(exclude={"id": True}) == expected_card.dict( + exclude={"id": True} + ) def test_get_pydantic_model_invalid(component_description, fake_llm_invalid): From f87ae0904610a61852ab38aa07476b368e490667 Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Fri, 16 Aug 2024 12:15:07 +0200 Subject: [PATCH 15/18] updating tests' --- .../dashboard/_response_models/components.py | 4 +- .../dashboard/_response_models/conftest.py | 7 +++- .../_response_models/test_components.py | 39 +++++++------------ .../dashboard/test_pydantic_output.py | 4 +- 4 files changed, 22 insertions(+), 32 deletions(-) diff --git a/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py b/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py index feb0dfde8..c26fe84f1 100644 --- a/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py +++ b/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py @@ -45,10 +45,10 @@ def create(self, model, all_df_metadata) -> Union[vm.Card, vm.AgGrid, vm.Figure] """Create the component.""" from vizro_ai import VizroAI - vizro_ai = VizroAI(model=model) - try: if self.component_type == "Graph": + vizro_ai = VizroAI(model=model) + return vm.Graph( id=self.component_id, figure=vizro_ai.plot( diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py index d9983b014..7d08b86dc 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py @@ -121,10 +121,15 @@ def mock_vizro_ai_return(df): @pytest.fixture -def mock_vizro_ai_return_ag_grid(df): +def mock_dash_ag_grid(df): return dash_ag_grid(data_frame=df) +@pytest.fixture +def mock_return_ag_grid(mock_dash_ag_grid): + return vm.AgGrid(figure=mock_dash_ag_grid, id="aggrid") + + @pytest.fixture def component_card_2(): return ComponentPlan( diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py index 322108dbc..7ef1b1411 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py @@ -1,4 +1,3 @@ -import pytest import vizro.models as vm from vizro_ai.dashboard._response_models.components import ComponentPlan @@ -18,16 +17,7 @@ def test_component_plan_instantiation(self): assert component.component_description == "This is a card" assert component.df_name == "N/A" - @pytest.mark.xfail(raises=ValueError, reason="Known issue: real model is required for .plot") - def test_card_create(self, component_plan_card, fake_llm_card): - if component_plan_card.component_type == "Card": - actual = component_plan_card.create( - model=fake_llm_card, - all_df_metadata=None, - ) - assert actual.type == "card" - - def test_graph_create_valid(self, mocker, component_plan_graph, mock_vizro_ai_return, df_metadata): + def test_create_graph(self, mocker, component_plan_graph, mock_vizro_ai_return, df_metadata): mock_vizro_ai_object = mocker.patch("vizro_ai.VizroAI.__init__") mock_vizro_ai_object.return_value = None mock_vizro_ai_call = mocker.patch("vizro_ai.VizroAI.plot") @@ -38,34 +28,31 @@ def test_graph_create_valid(self, mocker, component_plan_graph, mock_vizro_ai_re ) expected = vm.Graph(id="mock_id", figure=mock_vizro_ai_return) - assert result.dict(exclude={"id": True}) == expected.dict( - exclude={"id": True} - ) - - def test_card_create_valid(self, mocker, fake_llm_card, component_plan_card, expected_card): - mock_vizro_ai_call = mocker.patch("vizro_ai.VizroAI") - # Define the mock return value - mock_vizro_ai_call.return_value = vm.Card(text="This is card.") + assert result.dict(exclude={"id": True}) == expected.dict(exclude={"id": True}) + def test_create_card(self, fake_llm_card, component_plan_card, expected_card): result = component_plan_card.create( model=fake_llm_card, all_df_metadata=None, ) - assert result.dict(exclude={"id": True}) == expected_card.dict( - exclude={"id": True} - ) + assert result.dict(exclude={"id": True}) == expected_card.dict(exclude={"id": True}) - def test_ag_grid_create_valid( + def test_create_ag_grid( self, + mocker, component_plan_ag_grid, - mock_vizro_ai_return_ag_grid, + mock_dash_ag_grid, + mock_return_ag_grid, df_metadata, ): + mocker.patch( + "vizro_ai.dashboard._response_models.components.ComponentPlan.create", return_value=mock_return_ag_grid + ) result = component_plan_ag_grid.create( model=None, all_df_metadata=df_metadata, ) - expected = vm.AgGrid(id="mock", figure=mock_vizro_ai_return_ag_grid) + expected = vm.AgGrid(id="ag_grid", figure=mock_dash_ag_grid) - assert result.dict(exclude={"id": True, "figure": True}) == expected.dict(exclude={"id": True, "figure": True}) + assert result.dict(exclude={"id": True}) == expected.dict(exclude={"id": True}) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py index df9b14e0b..e349b4bd3 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py @@ -6,9 +6,7 @@ def test_get_pydantic_model_valid(component_description, fake_llm, expected_card): result = _get_pydantic_model(query=component_description, llm_model=fake_llm, response_model=vm.Card, df_info=None) - assert result.dict(exclude={"id": True}) == expected_card.dict( - exclude={"id": True} - ) + assert result.dict(exclude={"id": True}) == expected_card.dict(exclude={"id": True}) def test_get_pydantic_model_invalid(component_description, fake_llm_invalid): From 39ff55f0d983c71d7a542fdc12ef07aaa19bbcd3 Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Mon, 19 Aug 2024 12:42:56 +0200 Subject: [PATCH 16/18] addressing pr comments --- .../dashboard/_response_models/conftest.py | 11 --- .../_response_models/test_components.py | 19 ---- .../_response_models/test_controls.py | 66 +++++++++----- .../_response_models/test_dashboard.py | 6 +- .../dashboard/_response_models/test_layout.py | 91 +++++++++++-------- .../dashboard/_response_models/test_page.py | 4 +- 6 files changed, 100 insertions(+), 97 deletions(-) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py index 7d08b86dc..18344b9d5 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py @@ -6,7 +6,6 @@ import vizro.plotly.express as px from langchain.output_parsers import PydanticOutputParser from langchain_community.llms.fake import FakeListLLM -from vizro.tables import dash_ag_grid from vizro_ai.dashboard._response_models.components import ComponentPlan from vizro_ai.dashboard._response_models.page import PagePlan from vizro_ai.dashboard.utils import AllDfMetadata, DfMetadata @@ -120,16 +119,6 @@ def mock_vizro_ai_return(df): ) -@pytest.fixture -def mock_dash_ag_grid(df): - return dash_ag_grid(data_frame=df) - - -@pytest.fixture -def mock_return_ag_grid(mock_dash_ag_grid): - return vm.AgGrid(figure=mock_dash_ag_grid, id="aggrid") - - @pytest.fixture def component_card_2(): return ComponentPlan( diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py index 7ef1b1411..f4c7e9aa4 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py @@ -37,22 +37,3 @@ def test_create_card(self, fake_llm_card, component_plan_card, expected_card): ) assert result.dict(exclude={"id": True}) == expected_card.dict(exclude={"id": True}) - - def test_create_ag_grid( - self, - mocker, - component_plan_ag_grid, - mock_dash_ag_grid, - mock_return_ag_grid, - df_metadata, - ): - mocker.patch( - "vizro_ai.dashboard._response_models.components.ComponentPlan.create", return_value=mock_return_ag_grid - ) - result = component_plan_ag_grid.create( - model=None, - all_df_metadata=df_metadata, - ) - expected = vm.AgGrid(id="ag_grid", figure=mock_dash_ag_grid) - - assert result.dict(exclude={"id": True}) == expected.dict(exclude={"id": True}) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py index 46ee38de2..d356d40fb 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py @@ -1,8 +1,10 @@ +import logging + import pytest import vizro.models as vm from vizro.managers import model_manager from vizro.models import VizroBaseModel -from vizro_ai.dashboard._response_models.controls import ControlPlan, _create_filter, _create_filter_proxy +from vizro_ai.dashboard._response_models.controls import ControlPlan, _create_filter_proxy try: from pydantic.v1 import ValidationError @@ -38,37 +40,51 @@ def test_create_filter_proxy(self, df_cols, df_schema, controllable_components, assert result.dict(exclude={"id": True}) == expected_filter.dict(exclude={"id": True}) -class TestControlPlan: - """Test control plan.""" +class TestControlCreate: + """Test control creation.""" - def test_control_plan_invalid_df_name(self, fake_llm_filter, df_metadata): + def test_control_create_valid(self, fake_llm_filter, controllable_components, df_metadata): control_plan = ControlPlan( control_type="Filter", - control_description="Create a filter that filters the data based on the column 'a'.", - df_name="population_chart", + control_description="Create a parameter that targets the data based on the column 'a'.", + df_name="bar_chart", + ) + result = control_plan.create( + model=fake_llm_filter, controllable_components=controllable_components, all_df_metadata=df_metadata ) - default_control = control_plan.create( - model=fake_llm_filter, controllable_components=["bar_chart"], all_df_metadata=df_metadata + assert result.dict(exclude={"id": True}) == vm.Filter(targets=["bar_chart"], column="a").dict( + exclude={"id": True} ) - assert default_control is None - def test_control_plan_invalid_type(self, fake_llm_filter, df_metadata): - with pytest.raises(ValidationError): - ControlPlan( - control_type="parameter", + def test_control_create_invalid_df_name( + self, fake_llm_filter, df_metadata, caplog + ): # testing the fallback when an invalid dataframe name is provided to ControlPlan. + with caplog.at_level(logging.WARNING): + control_plan = ControlPlan( + control_type="Filter", control_description="Create a parameter that targets the data based on the column 'a'.", - df_name="bar_chart", + df_name="line_chart", ) + result = control_plan.create( + model=fake_llm_filter, controllable_components=["bar_chart"], all_df_metadata=df_metadata + ) + + assert result is None + assert "Dataframe line_chart not found in metadata, returning default values." in caplog.text + def test_control_create_invalid_control_type( + self, fake_llm_filter, df_metadata, caplog + ): # testing the fallback when an invalid dataframe name is provided to ControlPlan. + with pytest.raises(ValidationError): + with caplog.at_level(logging.WARNING): + control_plan = ControlPlan( + control_type="Parameter", + control_description="Create a parameter that targets the data based on the column 'a'.", + df_name="bar_chart", + ) + result = control_plan.create( + model=fake_llm_filter, controllable_components=["bar_chart"], all_df_metadata=df_metadata + ) -def test_create_filter(filter_prompt, fake_llm_filter, df_cols, df_schema, controllable_components): - actual_filter = _create_filter( - filter_prompt=filter_prompt, - model=fake_llm_filter, - df_cols=df_cols, - df_schema=df_schema, - controllable_components=controllable_components, - ) - assert actual_filter.dict(exclude={"id": True}) == vm.Filter(targets=["bar_chart"], column="a").dict( - exclude={"id": True} - ) + assert result is None + assert "Build failed for `Control`, returning default values." in caplog.text diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_dashboard.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_dashboard.py index 25b56eb9a..b06a567ac 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_dashboard.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_dashboard.py @@ -1,10 +1,10 @@ from vizro_ai.dashboard._response_models.dashboard import DashboardPlan -class TestDashboardPlanner: - """Tests dashboard planner.""" +class TestDashboardCreate: + """Tests dashboard plan creation.""" - def test_dashboard_planner(self, page_plan): + def test_dashboard_plan_instantiation(self, page_plan): dashboard_plan = DashboardPlan( title="Test Dashboard", pages=[page_plan], diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py index 435d70ac8..44af9e2ef 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py @@ -1,47 +1,64 @@ +import logging + import pytest import vizro.models as vm -from vizro_ai.dashboard._pydantic_output import _get_pydantic_model -from vizro_ai.dashboard._response_models.layout import LayoutPlan, _convert_to_grid +from vizro_ai.dashboard._response_models.layout import LayoutPlan + +try: + from pydantic.v1 import ValidationError +except ImportError: # pragma: no cov + pass -class TestLayoutPlan: +class TestLayoutCreate: """Test layout creation.""" - def test_structured_output_layout_create(self, fake_llm_layout, layout_description, layout): - result = _get_pydantic_model( - query=layout_description, llm_model=fake_llm_layout, response_model=vm.Layout, df_info=None + @pytest.mark.parametrize( + "layout_grid_template_areas, component_ids, grid", + [ + ( + ["card_1 scatter_plot scatter_plot", "card_2 scatter_plot scatter_plot"], + ["card_1", "scatter_plot", "card_2"], + [[0, 1, 1], [2, 1, 1]], + ), + ( + ["card_1 scatter_plot scatter_plot", ". scatter_plot scatter_plot"], + ["card_1", "scatter_plot"], + [[0, 1, 1], [-1, 1, 1]], + ), + ], + ) + def test_layout_create_valid(self, layout_grid_template_areas, component_ids, grid): + layout_plan = LayoutPlan( + layout_grid_template_areas=layout_grid_template_areas, ) - assert result.dict(exclude={"id": True}) == layout.dict(exclude={"id": True}) + result = layout_plan.create(component_ids=component_ids) + expected = vm.Layout(grid=grid) - def test_layout_plan(self, layout): + assert result.dict(exclude={"id": True}) == expected.dict(exclude={"id": True}) + + @pytest.mark.parametrize( + "layout_grid_template_areas, component_ids, error_message", + [ + ( + ["card_1 scatter_plot scatter_plot", "card_2 scatter_plot scatter_plot"], + ["card_1", "scatter_plot"], + "Build failed for `Layout", + ), + ( + ["card_1 scatter_plot scatter_plot", "card_2 card_2 scatter_plot"], + ["card_1", "scatter_plot", "card_2"], + "Calculated grid which caused the error:", + ), + ], + ) + def test_layout_create_invalid(self, layout_grid_template_areas, component_ids, error_message, caplog): layout_plan = LayoutPlan( - layout_grid_template_areas=["graph card"], + layout_grid_template_areas=layout_grid_template_areas, ) - result = layout_plan.create(["graph", "card"]) - assert result.dict(exclude={"id": True}) == layout.dict(exclude={"id": True}) - - -@pytest.mark.parametrize( - "layout_grid_template_areas, component_ids, grid", - [ - ( - ["card_1 scatter_plot scatter_plot", "card_2 scatter_plot scatter_plot"], - ["card_1", "scatter_plot", "card_2"], - [[0, 1, 1], [2, 1, 1]], - ), - ( - ["card_1 scatter_plot scatter_plot", "card_2 scatter_plot scatter_plot"], - ["card_1", "scatter_plot"], - [], - ), - ( - ["card_1 scatter_plot scatter_plot", ". scatter_plot scatter_plot"], - ["card_1", "scatter_plot"], - [[0, 1, 1], [-1, 1, 1]], - ), - ], -) -def test_convert_to_grid(layout_grid_template_areas, component_ids, grid): - actual_grid = _convert_to_grid(layout_grid_template_areas=layout_grid_template_areas, component_ids=component_ids) - - assert actual_grid == grid + + with caplog.at_level(logging.WARNING): + result = layout_plan.create(component_ids=component_ids) + + assert error_message in caplog.text + assert result is None diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py index 242dc88d0..877c522f5 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py @@ -7,10 +7,10 @@ from pydantic import ValidationError -class TestPagePlan: +class TestPageCreate: """Test for page plan.""" - def test_dashboard_plan(self, component_plan_card): + def test_page_plan_instantiation(self, component_plan_card): page_plan = PagePlan( title="Test Page", components_plan=[component_plan_card], From 98db7edc8e91cc8f0b3cb779a23d1264248a5b09 Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Mon, 19 Aug 2024 13:17:43 +0200 Subject: [PATCH 17/18] remove unused import --- .../unit/vizro-ai/dashboard/_response_models/test_layout.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py index 44af9e2ef..38da05713 100644 --- a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py @@ -4,11 +4,6 @@ import vizro.models as vm from vizro_ai.dashboard._response_models.layout import LayoutPlan -try: - from pydantic.v1 import ValidationError -except ImportError: # pragma: no cov - pass - class TestLayoutCreate: """Test layout creation.""" From 5477c43f1b230cc90a960cb3afdf0e1d852c0e5b Mon Sep 17 00:00:00 2001 From: nadijagraca Date: Tue, 20 Aug 2024 09:54:47 +0200 Subject: [PATCH 18/18] addressing pr comments --- .../src/vizro_ai/dashboard/_response_models/components.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py b/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py index c26fe84f1..303df2239 100644 --- a/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py +++ b/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py @@ -43,10 +43,10 @@ class ComponentPlan(BaseModel): def create(self, model, all_df_metadata) -> Union[vm.Card, vm.AgGrid, vm.Figure]: """Create the component.""" - from vizro_ai import VizroAI - try: if self.component_type == "Graph": + from vizro_ai import VizroAI + vizro_ai = VizroAI(model=model) return vm.Graph(