diff --git a/vizro-ai/changelog.d/20240802_144558_nadija_ratkusic_graca_add_unit_tests.md b/vizro-ai/changelog.d/20240802_144558_nadija_ratkusic_graca_add_unit_tests.md new file mode 100644 index 000000000..f1f65e73c --- /dev/null +++ b/vizro-ai/changelog.d/20240802_144558_nadija_ratkusic_graca_add_unit_tests.md @@ -0,0 +1,48 @@ + + + + + + + + + diff --git a/vizro-ai/hatch.toml b/vizro-ai/hatch.toml index 27d7a20b0..032c0a9af 100644 --- a/vizro-ai/hatch.toml +++ b/vizro-ai/hatch.toml @@ -13,6 +13,7 @@ dependencies = [ "devtools[pygments]", "coverage[toml]>=6.5", "pytest", + "pytest-mock", "pytest-rerunfailures", "toml", "nbformat>=4.2.0", diff --git a/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py b/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py index feb0dfde8..303df2239 100644 --- a/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py +++ b/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py @@ -43,12 +43,12 @@ class ComponentPlan(BaseModel): def create(self, model, all_df_metadata) -> Union[vm.Card, vm.AgGrid, vm.Figure]: """Create the component.""" - from vizro_ai import VizroAI - - vizro_ai = VizroAI(model=model) - try: if self.component_type == "Graph": + from vizro_ai import VizroAI + + vizro_ai = VizroAI(model=model) + return vm.Graph( id=self.component_id, figure=vizro_ai.plot( diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_graph/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_graph/conftest.py new file mode 100644 index 000000000..28b14fa89 --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_graph/conftest.py @@ -0,0 +1,31 @@ +import pandas as pd +import pytest +from langchain_core.messages import HumanMessage +from vizro_ai.dashboard._graph.dashboard_creation import GraphState +from vizro_ai.dashboard.utils import AllDfMetadata, DfMetadata + + +@pytest.fixture +def dataframes(): + return [pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [4, 5, 6, 7, 8]})] + + +@pytest.fixture +def df_metadata(): + df_metadata = AllDfMetadata({}) + df_metadata.all_df_metadata["gdp_chart"] = DfMetadata( + df_schema={"a": "int64", "b": "int64"}, + df=pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [4, 5, 6, 7, 8]}), + df_sample=pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [4, 5, 6, 7, 8]}), + ) + return df_metadata + + +@pytest.fixture +def graph_state(dataframes, df_metadata): + return GraphState( + messages=[HumanMessage(content="contents of the message")], + dfs=dataframes, + all_df_metadata=df_metadata, + pages=[], + ) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_graph/test_dashboard_creation.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_graph/test_dashboard_creation.py new file mode 100644 index 000000000..e5bd38bd5 --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_graph/test_dashboard_creation.py @@ -0,0 +1,37 @@ +import pandas as pd +import pytest + +try: + from pydantic.v1 import ValidationError +except ImportError: # pragma: no cov + from pydantic import ValidationError + +from langchain_core.messages import HumanMessage +from vizro_ai.dashboard._graph.dashboard_creation import GraphState + + +class TestConfig: + """Test GraphState config creation.""" + + def test_graph_state_instantiation(self, graph_state, dataframes): + assert isinstance(graph_state, GraphState) + assert graph_state.messages[0].content == "contents of the message" + assert graph_state.dfs == dataframes + assert "gdp_chart" in graph_state.all_df_metadata.all_df_metadata + assert graph_state.pages == [] + + @pytest.mark.parametrize( + "dataframes, output_error", + [ + (pd.DataFrame(), "value is not a valid list"), + ([pd.DataFrame(), {}], "instance of DataFrame expected"), + ], + ) + def test_check_dataframes(self, dataframes, output_error, df_metadata): + with pytest.raises(ValidationError, match=output_error): + GraphState( + messages=[HumanMessage(content="contents of the message")], + dfs=dataframes, + all_df_metadata=df_metadata, + pages=[], + ) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py new file mode 100644 index 000000000..18344b9d5 --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/conftest.py @@ -0,0 +1,152 @@ +from typing import Any, List + +import pandas as pd +import pytest +import vizro.models as vm +import vizro.plotly.express as px +from langchain.output_parsers import PydanticOutputParser +from langchain_community.llms.fake import FakeListLLM +from vizro_ai.dashboard._response_models.components import ComponentPlan +from vizro_ai.dashboard._response_models.page import PagePlan +from vizro_ai.dashboard.utils import AllDfMetadata, DfMetadata + + +class MockStructuredOutputLLM(FakeListLLM): + def bind_tools(self, tools: List[Any]): + return super().bind(tools=tools) + + def with_structured_output(self, schema): + llm = self + output_parser = PydanticOutputParser(pydantic_object=schema) + return llm | output_parser + + +@pytest.fixture +def fake_llm_card(): + response = ['{"text":"this is a card","href":""}'] + return MockStructuredOutputLLM(responses=response) + + +@pytest.fixture +def fake_llm_layout(): + response = ['{"grid":[[0,1]]}'] + return MockStructuredOutputLLM(responses=response) + + +@pytest.fixture +def fake_llm_filter(): + response = ['{"column": "a", "targets": ["bar_chart"]}'] + return MockStructuredOutputLLM(responses=response) + + +@pytest.fixture +def controllable_components(): + return ["bar_chart"] + + +@pytest.fixture +def layout_description(): + return "The layout of this page should use `grid=[[0,1]]`" + + +@pytest.fixture +def df(): + return pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [4, 5, 6, 7, 8]}) + + +@pytest.fixture +def df_cols(): + return ["a", "b"] + + +@pytest.fixture +def df_sample(df): + return df.sample(5, replace=True, random_state=19) + + +@pytest.fixture +def df_schema(): + return {"a": "int64", "b": "int64"} + + +@pytest.fixture +def df_metadata(df, df_schema, df_sample): + df_metadata = AllDfMetadata({}) + df_metadata.all_df_metadata["bar_chart"] = DfMetadata( + df_schema=df_schema, + df=df, + df_sample=df_sample, + ) + return df_metadata + + +@pytest.fixture +def component_plan_card(): + return ComponentPlan( + component_type="Card", + component_description="This is a card", + component_id="card_1", + df_name="N/A", + ) + + +@pytest.fixture +def component_plan_graph(): + return ComponentPlan( + component_type="Graph", + component_description="Scatter chart with x-axis as 'a' and y-axis as 'b'", + component_id="graph_1", + df_name="bar_chart", + ) + + +@pytest.fixture +def component_plan_ag_grid(): + return ComponentPlan( + component_type="AgGrid", + component_description="Ag grid showing columns 'a' and 'b' of dataframe", + component_id="ag_grid_1", + df_name="bar_chart", + ) + + +@pytest.fixture +def mock_vizro_ai_return(df): + return px.scatter( + data_frame=df, + x="a", + y="b", + ) + + +@pytest.fixture +def component_card_2(): + return ComponentPlan( + component_type="Card", + component_description="This is a second card", + component_id="card_2", + df_name="N/A", + ) + + +@pytest.fixture +def page_plan(component_plan_card): + return PagePlan(title="Test Page", components_plan=[component_plan_card], controls_plan=[], layout_plan=None) + + +@pytest.fixture +def filter_prompt(): + return """ + Create a filter from the following instructions: Filter the bar chart by column `a`. + Do not make up things that are optional and DO NOT configure actions, action triggers or action chains. + If no options are specified, leave them out.""" + + +@pytest.fixture +def layout(): + return vm.Layout(grid=[[0, 1]]) + + +@pytest.fixture +def expected_filter(): + return vm.Filter(targets=["bar_chart"], column="a") diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py new file mode 100644 index 000000000..f4c7e9aa4 --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_components.py @@ -0,0 +1,39 @@ +import vizro.models as vm +from vizro_ai.dashboard._response_models.components import ComponentPlan + + +class TestComponentCreate: + """Tests component creation.""" + + def test_component_plan_instantiation(self): + component = ComponentPlan( + component_id="card_1", + component_type="Card", + component_description="This is a card", + df_name="N/A", + ) + assert component.component_id == "card_1" + assert component.component_type == "Card" + assert component.component_description == "This is a card" + assert component.df_name == "N/A" + + def test_create_graph(self, mocker, component_plan_graph, mock_vizro_ai_return, df_metadata): + mock_vizro_ai_object = mocker.patch("vizro_ai.VizroAI.__init__") + mock_vizro_ai_object.return_value = None + mock_vizro_ai_call = mocker.patch("vizro_ai.VizroAI.plot") + mock_vizro_ai_call.return_value = mock_vizro_ai_return + result = component_plan_graph.create( + model=None, + all_df_metadata=df_metadata, + ) + expected = vm.Graph(id="mock_id", figure=mock_vizro_ai_return) + + assert result.dict(exclude={"id": True}) == expected.dict(exclude={"id": True}) + + def test_create_card(self, fake_llm_card, component_plan_card, expected_card): + result = component_plan_card.create( + model=fake_llm_card, + all_df_metadata=None, + ) + + assert result.dict(exclude={"id": True}) == expected_card.dict(exclude={"id": True}) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py new file mode 100644 index 000000000..d356d40fb --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_controls.py @@ -0,0 +1,90 @@ +import logging + +import pytest +import vizro.models as vm +from vizro.managers import model_manager +from vizro.models import VizroBaseModel +from vizro_ai.dashboard._response_models.controls import ControlPlan, _create_filter_proxy + +try: + from pydantic.v1 import ValidationError +except ImportError: # pragma: no cov + from pydantic import ValidationError + +# Needed for testing control creation. +model_manager.__setitem__("bar_chart", VizroBaseModel) + + +class TestFilterProxyCreate: + """Tests filter proxy creation.""" + + def test_create_filter_proxy_validate_targets(self, df_cols, df_schema, controllable_components): + filter_proxy = _create_filter_proxy(df_cols, df_schema, controllable_components) + with pytest.raises(ValidationError, match="targets must be one of"): + filter_proxy(targets=["population_chart"], column="a") + + def test_create_filter_proxy_validate_targets_not_empty(self, df_cols, df_schema, controllable_components): + filter_proxy = _create_filter_proxy(df_cols=df_cols, df_schema=df_schema, controllable_components=[]) + with pytest.raises(ValidationError): + filter_proxy(targets=[], column="a") + + def test_create_filter_proxy_validate_columns(self, df_cols, df_schema, controllable_components): + filter_proxy = _create_filter_proxy(df_cols, df_schema, controllable_components) + with pytest.raises(ValidationError, match="column must be one of"): + filter_proxy(targets=["bar_chart"], column="x") + + def test_create_filter_proxy(self, df_cols, df_schema, controllable_components, expected_filter): + filter_proxy = _create_filter_proxy(df_cols, df_schema, controllable_components) + result = filter_proxy(targets=["bar_chart"], column="a") + + assert result.dict(exclude={"id": True}) == expected_filter.dict(exclude={"id": True}) + + +class TestControlCreate: + """Test control creation.""" + + def test_control_create_valid(self, fake_llm_filter, controllable_components, df_metadata): + control_plan = ControlPlan( + control_type="Filter", + control_description="Create a parameter that targets the data based on the column 'a'.", + df_name="bar_chart", + ) + result = control_plan.create( + model=fake_llm_filter, controllable_components=controllable_components, all_df_metadata=df_metadata + ) + assert result.dict(exclude={"id": True}) == vm.Filter(targets=["bar_chart"], column="a").dict( + exclude={"id": True} + ) + + def test_control_create_invalid_df_name( + self, fake_llm_filter, df_metadata, caplog + ): # testing the fallback when an invalid dataframe name is provided to ControlPlan. + with caplog.at_level(logging.WARNING): + control_plan = ControlPlan( + control_type="Filter", + control_description="Create a parameter that targets the data based on the column 'a'.", + df_name="line_chart", + ) + result = control_plan.create( + model=fake_llm_filter, controllable_components=["bar_chart"], all_df_metadata=df_metadata + ) + + assert result is None + assert "Dataframe line_chart not found in metadata, returning default values." in caplog.text + + def test_control_create_invalid_control_type( + self, fake_llm_filter, df_metadata, caplog + ): # testing the fallback when an invalid dataframe name is provided to ControlPlan. + with pytest.raises(ValidationError): + with caplog.at_level(logging.WARNING): + control_plan = ControlPlan( + control_type="Parameter", + control_description="Create a parameter that targets the data based on the column 'a'.", + df_name="bar_chart", + ) + result = control_plan.create( + model=fake_llm_filter, controllable_components=["bar_chart"], all_df_metadata=df_metadata + ) + + assert result is None + assert "Build failed for `Control`, returning default values." in caplog.text diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_dashboard.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_dashboard.py new file mode 100644 index 000000000..b06a567ac --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_dashboard.py @@ -0,0 +1,18 @@ +from vizro_ai.dashboard._response_models.dashboard import DashboardPlan + + +class TestDashboardCreate: + """Tests dashboard plan creation.""" + + def test_dashboard_plan_instantiation(self, page_plan): + dashboard_plan = DashboardPlan( + title="Test Dashboard", + pages=[page_plan], + ) + assert dashboard_plan.pages[0].title == "Test Page" + assert dashboard_plan.pages[0].components_plan[0].component_id == "card_1" + assert dashboard_plan.pages[0].components_plan[0].component_type == "Card" + assert dashboard_plan.pages[0].components_plan[0].component_description == "This is a card" + assert dashboard_plan.pages[0].components_plan[0].df_name == "N/A" + assert dashboard_plan.pages[0].layout_plan is None + assert dashboard_plan.pages[0].controls_plan == [] diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_df_info.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_df_info.py new file mode 100644 index 000000000..1483a270c --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_df_info.py @@ -0,0 +1,9 @@ +from pandas.testing import assert_frame_equal +from vizro_ai.dashboard._response_models.df_info import _get_df_info + + +def test_get_df_info(df, df_schema, df_sample): + actual_df_schema, actual_df_sample = _get_df_info(df=df) + + assert actual_df_schema == df_schema + assert_frame_equal(actual_df_sample, df_sample) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py new file mode 100644 index 000000000..38da05713 --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_layout.py @@ -0,0 +1,59 @@ +import logging + +import pytest +import vizro.models as vm +from vizro_ai.dashboard._response_models.layout import LayoutPlan + + +class TestLayoutCreate: + """Test layout creation.""" + + @pytest.mark.parametrize( + "layout_grid_template_areas, component_ids, grid", + [ + ( + ["card_1 scatter_plot scatter_plot", "card_2 scatter_plot scatter_plot"], + ["card_1", "scatter_plot", "card_2"], + [[0, 1, 1], [2, 1, 1]], + ), + ( + ["card_1 scatter_plot scatter_plot", ". scatter_plot scatter_plot"], + ["card_1", "scatter_plot"], + [[0, 1, 1], [-1, 1, 1]], + ), + ], + ) + def test_layout_create_valid(self, layout_grid_template_areas, component_ids, grid): + layout_plan = LayoutPlan( + layout_grid_template_areas=layout_grid_template_areas, + ) + result = layout_plan.create(component_ids=component_ids) + expected = vm.Layout(grid=grid) + + assert result.dict(exclude={"id": True}) == expected.dict(exclude={"id": True}) + + @pytest.mark.parametrize( + "layout_grid_template_areas, component_ids, error_message", + [ + ( + ["card_1 scatter_plot scatter_plot", "card_2 scatter_plot scatter_plot"], + ["card_1", "scatter_plot"], + "Build failed for `Layout", + ), + ( + ["card_1 scatter_plot scatter_plot", "card_2 card_2 scatter_plot"], + ["card_1", "scatter_plot", "card_2"], + "Calculated grid which caused the error:", + ), + ], + ) + def test_layout_create_invalid(self, layout_grid_template_areas, component_ids, error_message, caplog): + layout_plan = LayoutPlan( + layout_grid_template_areas=layout_grid_template_areas, + ) + + with caplog.at_level(logging.WARNING): + result = layout_plan.create(component_ids=component_ids) + + assert error_message in caplog.text + assert result is None diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py new file mode 100644 index 000000000..877c522f5 --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/_response_models/test_page.py @@ -0,0 +1,55 @@ +import pytest +from vizro_ai.dashboard._response_models.page import PagePlan + +try: + from pydantic.v1 import ValidationError +except ImportError: # pragma: no cov + from pydantic import ValidationError + + +class TestPageCreate: + """Test for page plan.""" + + def test_page_plan_instantiation(self, component_plan_card): + page_plan = PagePlan( + title="Test Page", + components_plan=[component_plan_card], + controls_plan=[], + layout_plan=None, + ) + assert page_plan.title == "Test Page" + assert page_plan.components_plan[0].component_id == "card_1" + assert page_plan.components_plan[0].component_type == "Card" + assert page_plan.components_plan[0].component_description == "This is a card" + assert page_plan.layout_plan is None + assert page_plan.controls_plan == [] + assert page_plan.unsupported_specs == [] + + def test_page_plan_invalid_components(self): + with pytest.raises(ValidationError, match="A page must contain at least one component."): + PagePlan( + title="Test Page", + components_plan=[], + controls_plan=[], + layout_plan=None, + ) + + def test_page_plan_unsupported_specs(self, component_plan_card): + page_plan = PagePlan( + title="Test Page", + components_plan=[component_plan_card], + controls_plan=[], + layout_plan=None, + unsupported_specs=["Unknown"], + ) + + assert page_plan.unsupported_specs == [] + + def test_page_plan_duplicate_components(self, component_plan_card): + with pytest.raises(ValidationError): + PagePlan( + title="Test Page", + components_plan=[component_plan_card, component_plan_card], + controls_plan=[], + layout_plan=None, + ) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py b/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py new file mode 100644 index 000000000..d370c1830 --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/conftest.py @@ -0,0 +1,58 @@ +from typing import Any, List + +import pytest +import vizro.models as vm +from langchain.output_parsers import PydanticOutputParser +from langchain_community.llms.fake import FakeListLLM +from langchain_core.messages import HumanMessage + + +class MockStructuredOutputLLM(FakeListLLM): + def bind_tools(self, tools: List[Any]): + return super().bind(tools=tools) + + def with_structured_output(self, schema): + llm = self + output_parser = PydanticOutputParser(pydantic_object=schema) + return llm | output_parser + + +@pytest.fixture +def fake_llm(): + response = ['{"text":"this is a card","href":""}'] + return MockStructuredOutputLLM(responses=response) + + +@pytest.fixture +def component_description(): + return "This is a card" + + +@pytest.fixture +def expected_card(): + return vm.Card(text="this is a card") + + +@pytest.fixture +def query(): + return "I need a page with one card saying: Simple card." + + +@pytest.fixture +def message_output_valid(): + return {"message": [HumanMessage(content="I need a page with one card saying: Simple card.")], "df_info": None} + + +@pytest.fixture +def message_output_error(): + return { + "message": [HumanMessage(content="I need a page with one card saying: Simple card.")], + "df_info": None, + "validation_error": "ValidationError", + } + + +@pytest.fixture +def fake_llm_invalid(): + response = ['{"text":"this is a card", "href": "", "icon": "summary"}'] + return MockStructuredOutputLLM(responses=response) diff --git a/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py new file mode 100644 index 000000000..e349b4bd3 --- /dev/null +++ b/vizro-ai/tests/unit/vizro-ai/dashboard/test_pydantic_output.py @@ -0,0 +1,33 @@ +import pytest +import vizro.models as vm +from vizro_ai.dashboard._pydantic_output import _create_message_content, _create_prompt_template, _get_pydantic_model + + +def test_get_pydantic_model_valid(component_description, fake_llm, expected_card): + result = _get_pydantic_model(query=component_description, llm_model=fake_llm, response_model=vm.Card, df_info=None) + + assert result.dict(exclude={"id": True}) == expected_card.dict(exclude={"id": True}) + + +def test_get_pydantic_model_invalid(component_description, fake_llm_invalid): + with pytest.raises(ValueError, match="1 validation error for Card"): + _get_pydantic_model( + query=component_description, llm_model=fake_llm_invalid, response_model=vm.Card, df_info=None + ) + + +def test_create_message_content_valid(query, message_output_valid): + message_content = _create_message_content(query=query, df_info=None) + + assert message_content == message_output_valid + + +def test_create_message_content_error(query, message_output_error): + message_content = _create_message_content(query=query, df_info=None, validation_error="ValidationError", retry=True) + assert message_content == message_output_error + + +def test_create_prompt_template(): + additional_info = "Pay special attention to the following error: {validation_error}" + model = _create_prompt_template(additional_info) + assert additional_info in model.messages[0].prompt.template