Skip to content

Commit

Permalink
refactor tests structure and naming
Browse files Browse the repository at this point in the history
  • Loading branch information
l0uden committed Jan 17, 2025
1 parent dde3430 commit 91e88ab
Show file tree
Hide file tree
Showing 16 changed files with 64 additions and 94 deletions.
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Score tests for VizroAI
name: e2e dashboard score tests for VizroAI

defaults:
run:
Expand All @@ -12,9 +12,9 @@ env:
FORCE_COLOR: 1

jobs:
test-score-vizro-ai-fork:
test-e2e-dashboard-score-vizro-ai-fork:
if: ${{ github.event.pull_request.head.repo.fork }}
name: test-score-vizro-ai on Py${{ matrix.config.python-version }} ${{ matrix.config.label }}
name: test-e2e-dashboard-score-vizro-ai on Py${{ matrix.config.python-version }} ${{ matrix.config.label }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
Expand All @@ -38,9 +38,9 @@ jobs:
- name: Passed fork step
run: echo "Success!"

test-score-vizro-ai:
test-e2e-dashboard-score-vizro-ai:
if: ${{ ! github.event.pull_request.head.repo.fork }}
name: test-score-vizro-ai on Py${{ matrix.config.python-version }} ${{ matrix.config.label }}
name: test-e2e-dashboard-score-vizro-ai on Py${{ matrix.config.python-version }} ${{ matrix.config.label }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
Expand Down Expand Up @@ -72,19 +72,19 @@ jobs:
- name: Show dependency tree
run: hatch run ${{ matrix.config.hatch-env }}:pip tree

- name: Run vizro-ai score tests with PyPI vizro
run: hatch run ${{ matrix.config.hatch-env }}:test-score
- name: Run vizro-ai e2e dashboard score tests with PyPI vizro
run: hatch run ${{ matrix.config.hatch-env }}:test-e2e-dashboard-score
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_API_BASE: ${{ secrets.OPENAI_API_BASE }}
VIZRO_TYPE: pypi
BRANCH: ${{ github.head_ref }}
PYTHON_VERSION: ${{ matrix.config.python-version }}

- name: Run vizro-ai score tests with local vizro
- name: Run vizro-ai e2e dashboard score tests with local vizro
run: |
hatch run ${{ matrix.config.hatch-env }}:pip install ../vizro-core
hatch run ${{ matrix.config.hatch-env }}:test-score
hatch run ${{ matrix.config.hatch-env }}:test-e2e-dashboard-score
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_API_BASE: ${{ secrets.OPENAI_API_BASE }}
Expand All @@ -99,7 +99,7 @@ jobs:
with:
payload: |
{
"text": "Vizro-ai ${{ matrix.config.hatch-env }} score tests build result: ${{ job.status }}\nBranch: ${{ github.head_ref }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
"text": "Vizro-ai ${{ matrix.config.hatch-env }} e2e dashboard score tests build result: ${{ job.status }}\nBranch: ${{ github.head_ref }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
Expand All @@ -111,10 +111,10 @@ jobs:
with:
name: Report-${{ matrix.config.python-version }}-${{ matrix.config.label }}
path: |
/home/runner/work/vizro/vizro/vizro-ai/tests/score/reports/report*.csv
/home/runner/work/vizro/vizro/vizro-ai/tests/e2e/reports/report*.csv
test-score-vizro-ai-report:
needs: test-score-vizro-ai
test-e2e-dashboard-score-vizro-ai-report:
needs: test-e2e-dashboard-score-vizro-ai
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Integration tests for VizroAI
name: e2e plot tests for VizroAI

defaults:
run:
Expand All @@ -20,9 +20,9 @@ env:
FORCE_COLOR: 1

jobs:
test-integration-vizro-ai-fork:
test-e2e-plot-vizro-ai-fork:
if: ${{ github.event.pull_request.head.repo.fork }}
name: test-integration-vizro-ai on Py${{ matrix.config.python-version }} ${{ matrix.config.label }}
name: test-e2e-plot-vizro-ai on Py${{ matrix.config.python-version }} ${{ matrix.config.label }}
runs-on: ${{ matrix.config.os }}
strategy:
fail-fast: false
Expand Down Expand Up @@ -69,9 +69,9 @@ jobs:
- name: Passed fork step
run: echo "Success!"

test-integration-vizro-ai:
test-e2e-plot-vizro-ai:
if: ${{ ! github.event.pull_request.head.repo.fork }}
name: test-integration-vizro-ai on Py${{ matrix.config.python-version }} ${{ matrix.config.label }}
name: test-e2e-plot-vizro-ai on Py${{ matrix.config.python-version }} ${{ matrix.config.label }}
runs-on: ${{ matrix.config.os }}
strategy:
fail-fast: false
Expand Down Expand Up @@ -126,17 +126,17 @@ jobs:
- name: Show dependency tree
run: hatch run ${{ matrix.config.hatch-env }}:pip tree

- name: Run vizro-ai integration tests with PyPI vizro
run: hatch run ${{ matrix.config.hatch-env }}:test-integration
- name: Run vizro-ai e2e plot tests with PyPI vizro
run: hatch run ${{ matrix.config.hatch-env }}:test-e2e-plot
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_API_BASE: ${{ secrets.OPENAI_API_BASE }}
VIZRO_TYPE: pypi

- name: Run vizro-ai integration tests with local vizro
- name: Run vizro-ai e2e plot tests with local vizro
run: |
hatch run ${{ matrix.config.hatch-env }}:pip install ../vizro-core
hatch run ${{ matrix.config.hatch-env }}:test-integration
hatch run ${{ matrix.config.hatch-env }}:test-e2e-plot
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_API_BASE: ${{ secrets.OPENAI_API_BASE }}
Expand All @@ -149,7 +149,7 @@ jobs:
with:
payload: |
{
"text": "Vizro-ai ${{ matrix.config.hatch-env }} integration tests build result: ${{ job.status }}\nBranch: ${{ github.head_ref }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
"text": "Vizro-ai ${{ matrix.config.hatch-env }} e2e plot tests build result: ${{ job.status }}\nBranch: ${{ github.head_ref }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: e2e tests for VizroAI UI
name: tests for VizroAI UI

defaults:
run:
Expand All @@ -20,7 +20,7 @@ env:
PYTHON_VERSION: "3.12"

jobs:
test-e2e-vizro-ai-ui-fork:
test-vizro-ai-ui-fork:
if: ${{ github.event.pull_request.head.repo.fork }}
runs-on: ubuntu-latest

Expand All @@ -30,7 +30,7 @@ jobs:
- name: Passed fork step
run: echo "Success!"

test-e2e-vizro-ai-ui:
test-vizro-ai-ui:
if: ${{ ! github.event.pull_request.head.repo.fork }}
runs-on: ubuntu-latest

Expand All @@ -48,11 +48,10 @@ jobs:
- name: Show dependency tree
run: hatch run pip tree

- name: Run e2e VizroAI UI tests
- name: Run VizroAI UI tests
run: |
hatch run vizro-ai-ui
tests/tests_utils/wait-for-it.sh 127.0.0.1:8050 -t 30
hatch run test-e2e-vizro-ai-ui
hatch run test-vizro-ai-ui
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_API_BASE: ${{ secrets.OPENAI_API_BASE }}
Expand All @@ -61,6 +60,6 @@ jobs:
if: failure()
uses: ./.github/actions/failed-artifacts-and-slack-notifications
env:
TESTS_NAME: e2e VizroAI UI tests
TESTS_NAME: VizroAI UI tests
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
PROJECT_PATH: /home/runner/work/vizro/vizro/vizro-ai/
19 changes: 19 additions & 0 deletions tools/tests/e2e_common_asserts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from hamcrest import any_of, assert_that, contains_string


def browser_console_warnings_checker(log_level, log_levels):
assert_that(
log_level["message"],
any_of(
contains_string("Invalid prop `persisted_props[0]` of value `on` supplied to `t`"),
contains_string("React does not recognize the `%s` prop on a DOM element"),
contains_string("_scrollZoom"),
contains_string("unstable_flushDiscreteUpdates: Cannot flush updates when React is already rendering"),
contains_string("React state update on an unmounted component"),
contains_string("componentWillMount has been renamed"),
contains_string("componentWillReceiveProps has been renamed"),
contains_string("GPU stall due to ReadPixels"),
contains_string("WebGL"), # https://issues.chromium.org/issues/40277080
),
reason=f"Error outoput: {log_levels}",
)
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import time

from e2e_constants import TIMEOUT
from selenium.common.exceptions import (
StaleElementReferenceException,
)
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait

TIMEOUT = 30


def wait_for(condition_function, *args):
"""Function wait for any condition to be True."""
Expand Down
File renamed without changes.
17 changes: 7 additions & 10 deletions vizro-ai/hatch.toml
Original file line number Diff line number Diff line change
Expand Up @@ -50,16 +50,19 @@ prep-release = [
]
pypath = "hatch run python -c 'import sys; print(sys.executable)'"
test = "pytest tests {args}"
test-e2e-vizro-ai-ui = "pytest -vs --reruns 1 tests/e2e/test_vizro_ai_ui.py --headless {args}"
test-integration = "pytest -vs --reruns 1 tests/integration --headless {args}"
test-score = "pytest -vs --reruns 1 tests/score --headless {args}"
test-e2e-dashboard-score = "pytest -vs tests/e2e/test_dashboard.py --headless {args}"
test-e2e-plot = "pytest -vs --reruns 1 tests/e2e/test_plot.py --headless {args}"
test-unit = "pytest tests/unit {args}"
test-unit-coverage = [
"coverage run -m pytest tests/unit {args}",
"- coverage combine",
"coverage report"
]
vizro-ai-ui = "python examples/dashboard_ui/app.py &"
test-vizro-ai-ui = "pytest -vs --reruns 1 tests/vizro_ai_ui/test_vizro_ai_ui.py --headless {args}"
vizro-ai-ui = [
"python examples/dashboard_ui/app.py &",
"../tools/tests/wait-for-it.sh 127.0.0.1:8051 -t 30"
]

[envs.docs]
dependencies = [
Expand All @@ -79,12 +82,6 @@ build = "mkdocs build --strict"
# throwing 403 errors, but these are not real errors.
link-check = "linkchecker site --check-extern --no-warnings --ignore=404.html --ignore-url=127.0.0.1 --ignore-url=https://vizro.readthedocs.io/ --ignore-url=https://platform.openai.com/docs/models --ignore-url=openai.com --ignore-url=https://openai.com/"
pip = '"{env:HATCH_UV}" pip {args}'
serve = "mkdocs serve --open"

[envs.hatch-uv]
dependencies = [
"uv<0.5.10" # https://github.com/astral-sh/uv/issues/10039
]

[envs.lower-bounds]
extra-dependencies = ["pydantic==1.10.16"]
Expand Down
2 changes: 1 addition & 1 deletion vizro-ai/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ filterwarnings = [
# Ignore deprecation warning until this is solved: https://github.com/plotly/dash/issues/2590:
"ignore:HTTPResponse.getheader():DeprecationWarning"
]
pythonpath = ["tests/tests_utils", "../tools/tests"]
pythonpath = ["../tools/tests"]

[tool.ruff]
extend = "../pyproject.toml"
Expand Down
2 changes: 1 addition & 1 deletion vizro-ai/tests/e2e/test_dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def logic( # noqa: PLR0912, PLR0915
config: json config of the expected dashboard
"""
report_dir = "tests/score/reports"
report_dir = "tests/e2e/reports"
os.makedirs(report_dir, exist_ok=True)

app = Vizro().build(dashboard).dash
Expand Down
Empty file.
30 changes: 0 additions & 30 deletions vizro-ai/tests/tests_utils/e2e_asserts.py

This file was deleted.

11 changes: 0 additions & 11 deletions vizro-ai/tests/tests_utils/e2e_constants.py

This file was deleted.

2 changes: 1 addition & 1 deletion vizro-ai/tests/vizro_ai_ui/conftest.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from datetime import datetime

import pytest
from e2e_asserts import browser_console_warnings_checker
from e2e_common_asserts import browser_console_warnings_checker
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

Expand Down
8 changes: 4 additions & 4 deletions vizro-ai/tests/vizro_ai_ui/test_vizro_ai_ui.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
import os

import pytest
from e2e_fake_data_generator import create_genre_popularity_by_country
from e2e_waiters import (
from e2e_common_waiters import (
wait_for,
webdriver_click_waiter,
webdriver_waiter,
webdriver_waiter_css,
)
from fake_data_generator import create_genre_popularity_by_country
from selenium.common import InvalidSelectorException, TimeoutException


Expand All @@ -20,7 +20,7 @@ def test_chart_ui(chromedriver):
# Create test dataset
popularity_dataset = create_genre_popularity_by_country(start_year=1980, end_year=2023, records_per_year=10)
# Save to a CSV file
popularity_dataset.to_csv("tests/tests_utils/genre_popularity_by_country.csv", index=False)
popularity_dataset.to_csv("tests/vizro_ai_ui/genre_popularity_by_country.csv", index=False)

# fill in values
api_key = webdriver_waiter(chromedriver, '//*[@id="settings-api-key"]')
Expand All @@ -33,7 +33,7 @@ def test_chart_ui(chromedriver):

# upload file
file_input = webdriver_waiter_css(chromedriver, 'input[type="file"]')
file_input.send_keys(os.path.abspath("tests/tests_utils/genre_popularity_by_country.csv"))
file_input.send_keys(os.path.abspath("tests/vizro_ai_ui/genre_popularity_by_country.csv"))
webdriver_click_waiter(chromedriver, '//*[@id="data-upload"]')

# enter prompt
Expand Down
5 changes: 0 additions & 5 deletions vizro-core/hatch.toml
Original file line number Diff line number Diff line change
Expand Up @@ -111,11 +111,6 @@ template = "examples"
DASH_DEBUG = "true"
VIZRO_LOG_LEVEL = "DEBUG"

[envs.hatch-uv]
dependencies = [
"uv<0.5.10" # https://github.com/astral-sh/uv/issues/10039
]

[envs.lower-bounds]
extra-dependencies = [
"pydantic==1.10.16",
Expand Down

0 comments on commit 91e88ab

Please sign in to comment.