From 3ff2a075ba73e1fb61024e6c65d827cadc34546b Mon Sep 17 00:00:00 2001 From: Huong Nguyen <huong.nguyen@mckinsey.com> Date: Wed, 4 Dec 2024 10:10:06 +0000 Subject: [PATCH] remove test graphql folder Signed-off-by: Huong Nguyen <huong.nguyen@mckinsey.com> --- .../tests/test_api/test_graphql/__init__.py | 0 .../tests/test_api/test_graphql/conftest.py | 196 -------- .../test_api/test_graphql/test_mutations.py | 232 ---------- .../test_api/test_graphql/test_queries.py | 429 ------------------ .../test_api/test_graphql/test_serializers.py | 0 5 files changed, 857 deletions(-) delete mode 100644 package/tests/test_api/test_graphql/__init__.py delete mode 100644 package/tests/test_api/test_graphql/conftest.py delete mode 100644 package/tests/test_api/test_graphql/test_mutations.py delete mode 100644 package/tests/test_api/test_graphql/test_queries.py delete mode 100644 package/tests/test_api/test_graphql/test_serializers.py diff --git a/package/tests/test_api/test_graphql/__init__.py b/package/tests/test_api/test_graphql/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/package/tests/test_api/test_graphql/conftest.py b/package/tests/test_api/test_graphql/conftest.py deleted file mode 100644 index 78ee0156c0..0000000000 --- a/package/tests/test_api/test_graphql/conftest.py +++ /dev/null @@ -1,196 +0,0 @@ -import base64 -import json -from pathlib import Path - -import pytest -from kedro.io import DataCatalog, Version -from kedro_datasets import matplotlib, pandas, plotly, tracking - -from kedro_viz.api.graphql.types import Run -from kedro_viz.models.experiment_tracking import RunModel, UserRunDetailsModel - - -@pytest.fixture -def data_access_manager_with_no_run(data_access_manager, example_db_session, mocker): - data_access_manager.set_db_session(example_db_session) - mocker.patch( - "kedro_viz.api.graphql.schema.data_access_manager", data_access_manager - ) - yield data_access_manager - - -@pytest.fixture -def data_access_manager_with_runs( - data_access_manager, example_db_session_with_runs, mocker -): - data_access_manager.set_db_session(example_db_session_with_runs) - mocker.patch( - "kedro_viz.api.graphql.schema.data_access_manager", data_access_manager - ) - yield data_access_manager - - -@pytest.fixture -def save_version(example_run_ids): - yield example_run_ids[0] - - -@pytest.fixture -def example_tracking_catalog(example_run_ids, tmp_path): - example_run_id = example_run_ids[0] - metrics_dataset = tracking.MetricsDataset( - filepath=Path(tmp_path / "test.json").as_posix(), - version=Version(None, example_run_id), - ) - metrics_dataset.save({"col1": 1, "col2": 2, "col3": 3}) - - csv_dataset = pandas.CSVDataset( - filepath=Path(tmp_path / "metrics.csv").as_posix(), - version=Version(None, example_run_id), - ) - - more_metrics = tracking.MetricsDataset( - filepath=Path(tmp_path / "metrics.json").as_posix(), - version=Version(None, example_run_id), - ) - more_metrics.save({"col4": 4, "col5": 5, "col6": 6}) - - json_dataset = tracking.JSONDataset( - filepath=Path(tmp_path / "tracking.json").as_posix(), - version=Version(None, example_run_id), - ) - json_dataset.save({"col7": "column_seven", "col2": True, "col3": 3}) - - plotly_dataset = plotly.JSONDataset( - filepath=Path(tmp_path / "plotly.json").as_posix(), - version=Version(None, example_run_id), - ) - - class MockPlotlyData: - data = { - "data": [ - { - "x": ["giraffes", "orangutans", "monkeys"], - "y": [20, 14, 23], - "type": "bar", - } - ] - } - - @classmethod - def write_json(cls, fs_file, **kwargs): - json.dump(cls.data, fs_file, **kwargs) - - plotly_dataset.save(MockPlotlyData) - - matplotlib_dataset = matplotlib.MatplotlibWriter( - filepath=Path(tmp_path / "matplotlib.png").as_posix(), - version=Version(None, example_run_id), - ) - - class MockMatplotData: - data = base64.b64decode( - "iVBORw0KGgoAAAANSUhEUg" - "AAAAEAAAABCAQAAAC1HAwCAA" - "AAC0lEQVQYV2NgYAAAAAM" - "AAWgmWQ0AAAAASUVORK5CYII=" - ) - - @classmethod - def savefig(cls, bytes_buffer, **kwargs): - bytes_buffer.write(cls.data) - - matplotlib_dataset.save(MockMatplotData) - - catalog = DataCatalog( - datasets={ - "metrics": metrics_dataset, - "csv_dataset": csv_dataset, - "more_metrics": more_metrics, - "json_tracking": json_dataset, - "plotly_dataset": plotly_dataset, - "matplotlib_dataset": matplotlib_dataset, - } - ) - - yield catalog - - -@pytest.fixture -def example_multiple_run_tracking_catalog(example_run_ids, tmp_path): - new_metrics_dataset = tracking.MetricsDataset( - filepath=Path(tmp_path / "test.json").as_posix(), - version=Version(None, example_run_ids[1]), - ) - new_metrics_dataset.save({"col1": 1, "col3": 3}) - new_metrics_dataset = tracking.MetricsDataset( - filepath=Path(tmp_path / "test.json").as_posix(), - version=Version(None, example_run_ids[0]), - ) - new_data = {"col1": 3, "col2": 3.23} - new_metrics_dataset.save(new_data) - catalog = DataCatalog( - datasets={ - "new_metrics": new_metrics_dataset, - } - ) - - yield catalog - - -@pytest.fixture -def example_multiple_run_tracking_catalog_at_least_one_empty_run( - example_run_ids, tmp_path -): - new_metrics_dataset = tracking.MetricsDataset( - filepath=Path(tmp_path / "test.json").as_posix(), - version=Version(None, example_run_ids[1]), - ) - new_metrics_dataset.save({"col1": 1, "col3": 3}) - new_metrics_dataset = tracking.MetricsDataset( - filepath=Path(tmp_path / "test.json").as_posix(), - version=Version(None, example_run_ids[0]), - ) - catalog = DataCatalog( - datasets={ - "new_metrics": new_metrics_dataset, - } - ) - - yield catalog - - -@pytest.fixture -def example_multiple_run_tracking_catalog_all_empty_runs(example_run_ids, tmp_path): - new_metrics_dataset = tracking.MetricsDataset( - filepath=Path(tmp_path / "test.json").as_posix(), - version=Version(None, example_run_ids[1]), - ) - new_metrics_dataset = tracking.MetricsDataset( - filepath=Path(tmp_path / "test.json").as_posix(), - version=Version(None, example_run_ids[0]), - ) - catalog = DataCatalog( - datasets={ - "new_metrics": new_metrics_dataset, - } - ) - - yield catalog - - -@pytest.fixture -def example_runs(example_run_ids): - yield [ - Run( - id=run_id, - bookmark=False, - notes="Hello World", - title="Hello Kedro", - author="", - git_branch="", - git_sha="", - run_command="", - ) - for run_id in example_run_ids - ] diff --git a/package/tests/test_api/test_graphql/test_mutations.py b/package/tests/test_api/test_graphql/test_mutations.py deleted file mode 100644 index 5ff3285386..0000000000 --- a/package/tests/test_api/test_graphql/test_mutations.py +++ /dev/null @@ -1,232 +0,0 @@ -import json - -import pytest - -from kedro_viz.models.experiment_tracking import RunModel - - -@pytest.mark.usefixtures("data_access_manager_with_runs") -class TestGraphQLMutation: - @pytest.mark.parametrize( - "bookmark,notes,title", - [ - ( - False, - "new notes", - "new title", - ), - (True, "new notes", "new title"), - (True, "", ""), - ], - ) - def test_update_user_details_success( - self, - bookmark, - notes, - title, - client, - example_run_ids, - ): - example_run_id = example_run_ids[0] - query = f""" - mutation updateRun {{ - updateRunDetails( - runId: "{example_run_id}", - runInput: {{bookmark: {str(bookmark).lower()}, notes: "{notes}", title: "{title}"}} - ) {{ - __typename - ... on UpdateRunDetailsSuccess {{ - run {{ - id - title - bookmark - notes - }} - }} - ... on UpdateRunDetailsFailure {{ - id - errorMessage - }} - }} - }} - """ - response = client.post("/graphql", json={"query": query}) - assert response.json() == { - "data": { - "updateRunDetails": { - "__typename": "UpdateRunDetailsSuccess", - "run": { - "id": example_run_id, - "bookmark": bookmark, - "title": title if title != "" else example_run_id, - "notes": notes, - }, - } - } - } - - def test_update_user_details_only_bookmark( - self, - client, - example_run_ids, - ): - example_run_id = example_run_ids[0] - query = f""" - mutation updateRun {{ - updateRunDetails(runId: "{example_run_id}", runInput: {{bookmark: true}}) {{ - __typename - ... on UpdateRunDetailsSuccess {{ - run {{ - id - title - bookmark - notes - }} - }} - ... on UpdateRunDetailsFailure {{ - id - errorMessage - }} - }} - }} - """ - - response = client.post("/graphql", json={"query": query}) - assert response.json() == { - "data": { - "updateRunDetails": { - "__typename": "UpdateRunDetailsSuccess", - "run": { - "id": example_run_id, - "bookmark": True, - "title": example_run_id, - "notes": "", - }, - } - } - } - - def test_update_user_details_should_add_when_no_details_exist( - self, client, data_access_manager_with_no_run - ): - # add a new run - example_run_id = "test_id" - run = RunModel( - id=example_run_id, - blob=json.dumps( - {"session_id": example_run_id, "cli": {"command_path": "kedro run"}} - ), - ) - data_access_manager_with_no_run.runs.add_run(run) - - query = f""" - mutation updateRun {{ - updateRunDetails(runId: "{example_run_id}", runInput: {{bookmark: true}}) {{ - __typename - ... on UpdateRunDetailsSuccess {{ - run {{ - id - title - bookmark - notes - }} - }} - ... on UpdateRunDetailsFailure {{ - id - errorMessage - }} - }} - }} - """ - - response = client.post("/graphql", json={"query": query}) - assert response.json() == { - "data": { - "updateRunDetails": { - "__typename": "UpdateRunDetailsSuccess", - "run": { - "id": example_run_id, - "bookmark": True, - "title": example_run_id, - "notes": "", - }, - } - } - } - - def test_update_user_details_should_update_when_details_exist( - self, client, example_run_ids - ): - example_run_id = example_run_ids[0] - query = f""" - mutation updateRun {{ - updateRunDetails(runId: "{example_run_id}", runInput: {{title:"new title", notes: "new notes"}}) {{ - __typename - ... on UpdateRunDetailsSuccess {{ - run {{ - id - title - bookmark - notes - }} - }} - ... on UpdateRunDetailsFailure {{ - id - errorMessage - }} - }} - }} - """ - - response = client.post("/graphql", json={"query": query}) - assert response.json() == { - "data": { - "updateRunDetails": { - "__typename": "UpdateRunDetailsSuccess", - "run": { - "id": example_run_id, - "bookmark": True, - "title": "new title", - "notes": "new notes", - }, - } - } - } - - def test_update_user_details_should_fail_when_run_doesnt_exist(self, client): - response = client.post( - "/graphql", - json={ - "query": """ - mutation { - updateRunDetails( - runId: "I don't exist", - runInput: { bookmark: false, title: "Hello Kedro", notes: "There are notes"} - ) { - __typename - ... on UpdateRunDetailsSuccess { - run { - id - title - notes - bookmark - } - } - ... on UpdateRunDetailsFailure { - id - errorMessage - } - } - } - """ - }, - ) - assert response.json() == { - "data": { - "updateRunDetails": { - "__typename": "UpdateRunDetailsFailure", - "id": "I don't exist", - "errorMessage": "Given run_id: I don't exist doesn't exist", - } - } - } diff --git a/package/tests/test_api/test_graphql/test_queries.py b/package/tests/test_api/test_graphql/test_queries.py deleted file mode 100644 index 05dcf6fcda..0000000000 --- a/package/tests/test_api/test_graphql/test_queries.py +++ /dev/null @@ -1,429 +0,0 @@ -import json - -import pytest -from packaging.version import parse - -from kedro_viz import __version__ - - -class TestQueryNoSessionStore: - def test_graphql_run_list_endpoint(self, client): - response = client.post("/graphql", json={"query": "{runsList {id bookmark}}"}) - assert response.json() == {"data": {"runsList": []}} - - def test_graphql_runs_metadata_endpoint(self, client): - response = client.post( - "/graphql", - json={"query": '{runMetadata(runIds: ["id"]) {id bookmark}}'}, - ) - assert response.json() == {"data": {"runMetadata": []}} - - -@pytest.mark.usefixtures("data_access_manager_with_no_run") -class TestQueryNoRun: - def test_graphql_run_list_endpoint(self, client): - response = client.post("/graphql", json={"query": "{runsList {id bookmark}}"}) - assert response.json() == {"data": {"runsList": []}} - - def test_graphql_runs_metadata_endpoint(self, client): - response = client.post( - "/graphql", - json={"query": '{runMetadata(runIds: ["invalid run id"]) {id bookmark}}'}, - ) - assert response.json() == {"data": {"runMetadata": []}} - - -@pytest.mark.usefixtures("data_access_manager_with_runs") -class TestQueryWithRuns: - def test_run_list_query( - self, - client, - example_run_ids, - ): - response = client.post("/graphql", json={"query": "{runsList {id bookmark}}"}) - assert response.json() == { - "data": { - "runsList": [ - {"id": run_id, "bookmark": True} for run_id in example_run_ids - ] - } - } - - def test_graphql_runs_metadata_endpoint(self, example_run_ids, client): - response = client.post( - "/graphql", - json={ - "query": f"""{{runMetadata(runIds: ["{ example_run_ids[0] }"]) {{id bookmark}}}}""" - }, - ) - assert response.json() == { - "data": {"runMetadata": [{"id": example_run_ids[0], "bookmark": True}]} - } - - def test_run_tracking_data_query( - self, - example_run_ids, - client, - example_tracking_catalog, - data_access_manager_with_runs, - example_pipelines, - ): - data_access_manager_with_runs.add_catalog( - example_tracking_catalog, example_pipelines - ) - example_run_id = example_run_ids[0] - - response = client.post( - "/graphql", - json={ - "query": f""" - {{ - metrics: runTrackingData(runIds:["{example_run_id}"],group:METRIC) - {{datasetName, datasetType, data}} - json: runTrackingData(runIds:["{example_run_id}"],group:JSON) - {{datasetName, datasetType, data}} - plots: runTrackingData(runIds:["{example_run_id}"],group:PLOT) - {{datasetName, datasetType, data}} - }} - """ - }, - ) - - expected_response = { - "data": { - "metrics": [ - { - "datasetName": "metrics", - "datasetType": "tracking.metrics_dataset.MetricsDataset", - "data": { - "col1": [{"runId": example_run_id, "value": 1.0}], - "col2": [{"runId": example_run_id, "value": 2.0}], - "col3": [{"runId": example_run_id, "value": 3.0}], - }, - }, - { - "datasetName": "more_metrics", - "datasetType": "tracking.metrics_dataset.MetricsDataset", - "data": { - "col4": [{"runId": example_run_id, "value": 4.0}], - "col5": [{"runId": example_run_id, "value": 5.0}], - "col6": [{"runId": example_run_id, "value": 6.0}], - }, - }, - ], - "json": [ - { - "datasetName": "json_tracking", - "datasetType": "tracking.json_dataset.JSONDataset", - "data": { - "col2": [{"runId": example_run_id, "value": True}], - "col3": [{"runId": example_run_id, "value": 3}], - "col7": [ - { - "runId": example_run_id, - "value": "column_seven", - } - ], - }, - }, - ], - "plots": [ - { - "datasetName": "plotly_dataset", - "datasetType": "plotly.json_dataset.JSONDataset", - "data": { - "plotly.json": [ - { - "runId": "2021-11-03T18.24.24.379Z", - "value": { - "data": [ - { - "x": [ - "giraffes", - "orangutans", - "monkeys", - ], - "y": [20, 14, 23], - "type": "bar", - } - ] - }, - } - ] - }, - }, - { - "datasetName": "matplotlib_dataset", - "datasetType": "matplotlib.matplotlib_writer.MatplotlibWriter", - "data": { - "matplotlib.png": [ - { - "runId": "2021-11-03T18.24.24.379Z", - "value": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=", - } - ] - }, - }, - ], - } - } - - assert response.json() == expected_response - - def test_metrics_data( - self, - client, - example_tracking_catalog, - data_access_manager_with_runs, - example_pipelines, - ): - data_access_manager_with_runs.add_catalog( - example_tracking_catalog, example_pipelines - ) - - response = client.post( - "/graphql", - json={ - "query": "query MyQuery {\n runMetricsData(limit: 3) {\n data\n }\n}\n" - }, - ) - - expected = { - "data": { - "runMetricsData": { - "data": { - "metrics": { - "metrics.col1": [1.0, None], - "metrics.col2": [2.0, None], - "metrics.col3": [3.0, None], - "more_metrics.col4": [4.0, None], - "more_metrics.col5": [5.0, None], - "more_metrics.col6": [6.0, None], - }, - "runs": { - "2021-11-02T18.24.24.379Z": [ - None, - None, - None, - None, - None, - None, - ], - "2021-11-03T18.24.24.379Z": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], - }, - } - } - } - } - - assert response.json() == expected - - @pytest.mark.parametrize( - "show_diff,expected_response", - [ - ( - True, - { - "data": { - "runTrackingData": [ - { - "datasetName": "new_metrics", - "datasetType": "tracking.metrics_dataset.MetricsDataset", - "data": { - "col1": [ - { - "runId": "2021-11-03T18.24.24.379Z", - "value": 3.0, - }, - { - "runId": "2021-11-02T18.24.24.379Z", - "value": 1.0, - }, - ], - "col2": [ - { - "runId": "2021-11-03T18.24.24.379Z", - "value": 3.23, - }, - ], - "col3": [ - { - "runId": "2021-11-02T18.24.24.379Z", - "value": 3.0, - }, - ], - }, - } - ] - } - }, - ), - ( - False, - { - "data": { - "runTrackingData": [ - { - "datasetName": "new_metrics", - "datasetType": "tracking.metrics_dataset.MetricsDataset", - "data": { - "col1": [ - { - "runId": "2021-11-03T18.24.24.379Z", - "value": 3.0, - }, - { - "runId": "2021-11-02T18.24.24.379Z", - "value": 1.0, - }, - ], - }, - }, - ] - } - }, - ), - ], - ) - def test_graphql_run_tracking_data( - self, - example_run_ids, - client, - example_multiple_run_tracking_catalog, - data_access_manager_with_runs, - show_diff, - expected_response, - example_pipelines, - ): - data_access_manager_with_runs.add_catalog( - example_multiple_run_tracking_catalog, example_pipelines - ) - - response = client.post( - "/graphql", - json={ - "query": f"""{{runTrackingData - (group: METRIC runIds:{json.dumps(example_run_ids)}, showDiff: {json.dumps(show_diff)}) - {{datasetName, datasetType, data}}}}""" - }, - ) - assert response.json() == expected_response - - @pytest.mark.parametrize( - "show_diff,expected_response", - [ - ( - True, - { - "data": { - "runTrackingData": [ - { - "datasetName": "new_metrics", - "datasetType": "tracking.metrics_dataset.MetricsDataset", - "data": { - "col1": [ - { - "runId": "2021-11-02T18.24.24.379Z", - "value": 1.0, - }, - ], - "col3": [ - { - "runId": "2021-11-02T18.24.24.379Z", - "value": 3.0, - }, - ], - }, - } - ] - } - }, - ), - ( - False, - {"data": {"runTrackingData": []}}, - ), - ], - ) - def test_graphql_run_tracking_data_at_least_one_empty_run( - self, - example_run_ids, - client, - example_multiple_run_tracking_catalog_at_least_one_empty_run, - data_access_manager_with_runs, - show_diff, - expected_response, - example_pipelines, - ): - data_access_manager_with_runs.add_catalog( - example_multiple_run_tracking_catalog_at_least_one_empty_run, - example_pipelines, - ) - - response = client.post( - "/graphql", - json={ - "query": f"""{{runTrackingData - (group: METRIC runIds:{json.dumps(example_run_ids)}, showDiff: {json.dumps(show_diff)}) - {{datasetName, datasetType, data}}}}""" - }, - ) - assert response.json() == expected_response - - @pytest.mark.parametrize( - "show_diff,expected_response", - [ - ( - True, - {"data": {"runTrackingData": []}}, - ), - ( - False, - {"data": {"runTrackingData": []}}, - ), - ], - ) - def test_graphql_run_tracking_data_all_empty_runs( - self, - example_run_ids, - client, - example_multiple_run_tracking_catalog_all_empty_runs, - data_access_manager_with_runs, - show_diff, - expected_response, - example_pipelines, - ): - data_access_manager_with_runs.add_catalog( - example_multiple_run_tracking_catalog_all_empty_runs, example_pipelines - ) - - response = client.post( - "/graphql", - json={ - "query": f"""{{runTrackingData - (group: METRIC runIds:{json.dumps(example_run_ids)}, showDiff: {json.dumps(show_diff)}) - {{datasetName, datasetType, data}}}}""" - }, - ) - assert response.json() == expected_response - - -class TestQueryVersion: - def test_graphql_version_endpoint(self, client, mocker): - mocker.patch( - "kedro_viz.api.graphql.schema.get_latest_version", - return_value=parse("1.0.0"), - ) - response = client.post( - "/graphql", - json={"query": "{version {installed isOutdated latest}}"}, - ) - assert response.json() == { - "data": { - "version": { - "installed": __version__, - "isOutdated": False, - "latest": "1.0.0", - } - } - } diff --git a/package/tests/test_api/test_graphql/test_serializers.py b/package/tests/test_api/test_graphql/test_serializers.py deleted file mode 100644 index e69de29bb2..0000000000