diff --git a/openapi_tester/constants.py b/openapi_tester/constants.py index b4bf3ce1..6ad56b2f 100644 --- a/openapi_tester/constants.py +++ b/openapi_tester/constants.py @@ -47,3 +47,4 @@ UNDOCUMENTED_SCHEMA_SECTION_ERROR = "Error: Unsuccessfully tried to index the OpenAPI schema by `{key}`. {error_addon}" ONE_OF_ERROR = "Expected data to match one and only one of the oneOf schema types; found {matches} matches." ANY_OF_ERROR = "Expected data to match one or more of the documented anyOf schema types, but found no matches." +INIT_ERROR = "Unable to configure loader." diff --git a/openapi_tester/exceptions.py b/openapi_tester/exceptions.py index 01ce3d80..3fac2c1d 100644 --- a/openapi_tester/exceptions.py +++ b/openapi_tester/exceptions.py @@ -34,7 +34,10 @@ def _sort_data(data_object: Any) -> Any: if isinstance(data_object, dict): return dict(sorted(data_object.items())) if isinstance(data_object, list): - return sorted(data_object) + try: + return sorted(data_object) + except TypeError: + pass return data_object @staticmethod diff --git a/openapi_tester/schema_tester.py b/openapi_tester/schema_tester.py index abe8d324..6ddb2754 100644 --- a/openapi_tester/schema_tester.py +++ b/openapi_tester/schema_tester.py @@ -13,6 +13,7 @@ from openapi_tester.constants import ( ANY_OF_ERROR, EXCESS_RESPONSE_KEY_ERROR, + INIT_ERROR, INVALID_PATTERN_ERROR, MISSING_RESPONSE_KEY_ERROR, NONE_ERROR, @@ -69,7 +70,70 @@ def __init__( elif "drf_yasg" in settings.INSTALLED_APPS: self.loader = DrfYasgSchemaLoader() else: - raise ImproperlyConfigured("No loader is configured.") + raise ImproperlyConfigured(INIT_ERROR) + + @staticmethod + def _get_key_value(schema: dict, key: str, error_addon: str = "") -> dict: + """ + Returns the value of a given key + """ + try: + return schema[key] + except KeyError as e: + raise UndocumentedSchemaSectionError( + UNDOCUMENTED_SCHEMA_SECTION_ERROR.format(key=key, error_addon=error_addon) + ) from e + + @staticmethod + def _get_status_code(schema: dict, status_code: Union[str, int], error_addon: str = "") -> dict: + """ + Returns the status code section of a schema, handles both str and int status codes + """ + if str(status_code) in schema: + return schema[str(status_code)] + if int(status_code) in schema: + return schema[int(status_code)] + raise UndocumentedSchemaSectionError( + UNDOCUMENTED_SCHEMA_SECTION_ERROR.format(key=status_code, error_addon=error_addon) + ) + + def get_response_schema_section(self, response: td.Response) -> dict: + """ + Fetches the response section of a schema, wrt. the route, method, status code, and schema version. + + :param response: DRF Response Instance + :return dict + """ + schema = self.loader.get_schema() + parameterized_path = self.loader.parameterize_path(response.request["PATH_INFO"]) + paths_object = self._get_key_value(schema, "paths") + + pretty_routes = "\n\t• ".join(paths_object.keys()) + route_object = self._get_key_value( + paths_object, + parameterized_path, + f"\n\nFor debugging purposes, other valid routes include: \n\n\t• {pretty_routes}", + ) + + str_methods = ", ".join(method.upper() for method in route_object.keys() if method.upper() != "PARAMETERS") + method_object = self._get_key_value( + route_object, response.request["REQUEST_METHOD"].lower(), f"\n\nAvailable methods include: {str_methods}." + ) + + responses_object = self._get_key_value(method_object, "responses") + keys = ", ".join(str(key) for key in responses_object.keys()) + status_code_object = self._get_status_code( + responses_object, + response.status_code, + f"\n\nUndocumented status code: {response.status_code}.\n\nDocumented responses include: {keys}. ", + ) + + if "openapi" not in schema: + # openapi 2.0, i.e. "swagger" has a different structure than openapi 3.0 status sub-schemas + return self._get_key_value(status_code_object, "schema") + content_object = self._get_key_value(status_code_object, "content") + json_object = self._get_key_value(content_object, "application/json") + return self._get_key_value(json_object, "schema") def handle_all_of( self, @@ -150,69 +214,6 @@ def handle_any_of( hint="", ) - @staticmethod - def _get_key_value(schema: dict, key: str, error_addon: str = "") -> dict: - """ - Returns the value of a given key - """ - try: - return schema[key] - except KeyError as e: - raise UndocumentedSchemaSectionError( - UNDOCUMENTED_SCHEMA_SECTION_ERROR.format(key=key, error_addon=error_addon) - ) from e - - @staticmethod - def _get_status_code(schema: dict, status_code: Union[str, int], error_addon: str = "") -> dict: - """ - Returns the status code section of a schema, handles both str and int status codes - """ - if str(status_code) in schema: - return schema[str(status_code)] - if int(status_code) in schema: - return schema[int(status_code)] - raise UndocumentedSchemaSectionError( - UNDOCUMENTED_SCHEMA_SECTION_ERROR.format(key=status_code, error_addon=error_addon) - ) - - def get_response_schema_section(self, response: td.Response) -> dict: - """ - Fetches the response section of a schema, wrt. the route, method, status code, and schema version. - - :param response: DRF Response Instance - :return dict - """ - schema = self.loader.get_schema() - parameterized_path = self.loader.parameterize_path(response.request["PATH_INFO"]) - paths_object = self._get_key_value(schema, "paths") - - pretty_routes = "\n\t• ".join(paths_object.keys()) - route_object = self._get_key_value( - paths_object, - parameterized_path, - f"\n\nFor debugging purposes, other valid routes include: \n\n\t• {pretty_routes}", - ) - - str_methods = ", ".join(method.upper() for method in route_object.keys() if method.upper() != "PARAMETERS") - method_object = self._get_key_value( - route_object, response.request["REQUEST_METHOD"].lower(), f"\n\nAvailable methods include: {str_methods}." - ) - - responses_object = self._get_key_value(method_object, "responses") - keys = ", ".join(str(key) for key in responses_object.keys()) - status_code_object = self._get_status_code( - responses_object, - response.status_code, - f"\n\nUndocumented status code: {response.status_code}.\n\nDocumented responses include: {keys}. ", - ) - - if "openapi" not in schema: - # openapi 2.0, i.e. "swagger" has a different structure than openapi 3.0 status sub-schemas - return self._get_key_value(status_code_object, "schema") - content_object = self._get_key_value(status_code_object, "content") - json_object = self._get_key_value(content_object, "application/json") - return self._get_key_value(json_object, "schema") - @staticmethod def is_nullable(schema_item: dict) -> bool: """ @@ -267,11 +268,7 @@ def _validate_format(schema_section: dict, data: Any) -> Optional[str]: valid = isinstance(data, bytes) elif schema_format in ["date", "date-time"]: parser = parse_date if schema_format == "date" else parse_datetime - try: - result = parser(data) - valid = result is not None - except ValueError: - valid = False + valid = parser(data) is not None return None if valid else VALIDATE_FORMAT_ERROR.format(expected=schema_section["format"], received=str(data)) def _validate_openapi_type(self, schema_section: dict, data: Any) -> Optional[str]: @@ -281,14 +278,17 @@ def _validate_openapi_type(self, schema_section: dict, data: Any) -> Optional[st return None if schema_type in ["file", "string"]: valid = isinstance(data, (str, bytes)) + elif schema_type == "boolean": + valid = isinstance(data, bool) elif schema_type == "integer": - valid = isinstance(data, int) + valid = isinstance(data, int) and not isinstance(data, bool) elif schema_type == "number": - valid = isinstance(data, (int, float)) + valid = isinstance(data, (int, float)) and not isinstance(data, bool) elif schema_type == "object": valid = isinstance(data, dict) elif schema_type == "array": valid = isinstance(data, list) + return ( None if valid diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py index ca6d5bd7..f6dcde14 100644 --- a/tests/test_exceptions.py +++ b/tests/test_exceptions.py @@ -45,3 +45,12 @@ def test_documentation_error_message(): def test_case_error_message(): error = CaseError(key="test-key", case="camelCase", expected="testKey") assert error.args[0].strip() == "The response key `test-key` is not properly camelCase. Expected value: testKey" + + +def test_documentation_error_sort_data_type(): + assert DocumentationError._sort_data([1, 3, 2]) == [1, 2, 3] # list + assert DocumentationError._sort_data({"1", "3", "2"}) == {"1", "2", "3"} # set + assert DocumentationError._sort_data({"1": "a", "3": "a", "2": "a"}) == {"1": "a", "2": "a", "3": "a"} # dict + + # Test sort failure scenario - expect the method to succeed and default to no reordering + assert DocumentationError._sort_data(["1", {}, []]) == ["1", {}, []] diff --git a/tests/test_schema_tester.py b/tests/test_schema_tester.py index 042b6a3b..2f9d22e7 100644 --- a/tests/test_schema_tester.py +++ b/tests/test_schema_tester.py @@ -5,15 +5,41 @@ from unittest.mock import patch import pytest +from django.core.exceptions import ImproperlyConfigured from openapi_tester import ( CaseError, DocumentationError, + DrfSpectacularSchemaLoader, + DrfYasgSchemaLoader, + OpenAPISchemaError, + SchemaTester, StaticSchemaLoader, UndocumentedSchemaSectionError, is_pascal_case, ) -from openapi_tester.schema_tester import SchemaTester +from openapi_tester.constants import ( + ANY_OF_ERROR, + EXCESS_RESPONSE_KEY_ERROR, + INIT_ERROR, + MISSING_RESPONSE_KEY_ERROR, + NONE_ERROR, + ONE_OF_ERROR, + OPENAPI_PYTHON_MAPPING, + VALIDATE_ENUM_ERROR, + VALIDATE_FORMAT_ERROR, + VALIDATE_MAX_ARRAY_LENGTH_ERROR, + VALIDATE_MAX_LENGTH_ERROR, + VALIDATE_MAXIMUM_ERROR, + VALIDATE_MAXIMUM_NUMBER_OF_PROPERTIES_ERROR, + VALIDATE_MIN_ARRAY_LENGTH_ERROR, + VALIDATE_MIN_LENGTH_ERROR, + VALIDATE_MINIMUM_ERROR, + VALIDATE_MINIMUM_NUMBER_OF_PROPERTIES_ERROR, + VALIDATE_MULTIPLE_OF_ERROR, + VALIDATE_TYPE_ERROR, + VALIDATE_UNIQUE_ITEMS_ERROR, +) from tests.utils import CURRENT_PATH, iterate_schema, load_schema, pass_mock_value, response_factory parameterized_path = "/api/{version}/cars/correct" @@ -29,207 +55,650 @@ def _mocked(): return _mocked -def test_validate_response_success_scenario_with_autogenerated_data(): - static_tester = SchemaTester(schema_file_path=str(CURRENT_PATH) + "/schemas/test_project_schema.yaml") - schema = deepcopy(load_schema("test_project_schema.yaml")) - for schema_section, response, _ in iterate_schema(schema): - if schema_section and response: - static_tester.validate_response(response) - assert sorted(static_tester.get_response_schema_section(response)) == sorted(schema_section) +# region template tests -def test_validate_response_success_scenario_with_predefined_data(client): - tester = SchemaTester() - good_test_data = [ - { - "url": "/api/v1/cars/correct", - "expected_response": [ - { - "name": "Saab", - "color": "Yellow", - "height": "Medium height", - "width": "Very wide", - "length": "2 meters", - }, - {"name": "Volvo", "color": "Red", "height": "Medium height", "width": "Not wide", "length": "2 meters"}, - {"name": "Tesla", "color": "black", "height": "Medium height", "width": "Wide", "length": "2 meters"}, - ], - }, - { - "url": "/api/v1/trucks/correct", - "expected_response": [ - { - "name": "Saab", - "color": "Yellow", - "height": "Medium height", - "width": "Very wide", - "length": "2 meters", - }, - {"name": "Volvo", "color": "Red", "height": "Medium height", "width": "Not wide", "length": "2 meters"}, - {"name": "Tesla", "color": "black", "height": "Medium height", "width": "Wide", "length": "2 meters"}, - ], - }, - ] - for item in good_test_data: - response = client.get(item["url"]) - assert response.status_code == 200 - assert response.json() == item["expected_response"] - tester.validate_response(response=response) +class TestSchemaExamples: + def test_validate_response_success_scenario_with_autogenerated_data(self): + static_tester = SchemaTester(schema_file_path=str(CURRENT_PATH) + "/schemas/test_project_schema.yaml") + schema = deepcopy(load_schema("test_project_schema.yaml")) + for schema_section, response, _ in iterate_schema(schema): + if schema_section and response: + static_tester.validate_response(response) + assert sorted(static_tester.get_response_schema_section(response)) == sorted(schema_section) + + def test_validate_response_success_scenario_with_predefined_data(self, client): + tester = SchemaTester() + good_test_data = [ + { + "url": "/api/v1/cars/correct", + "expected_response": [ + { + "name": "Saab", + "color": "Yellow", + "height": "Medium height", + "width": "Very wide", + "length": "2 meters", + }, + { + "name": "Volvo", + "color": "Red", + "height": "Medium height", + "width": "Not wide", + "length": "2 meters", + }, + { + "name": "Tesla", + "color": "black", + "height": "Medium height", + "width": "Wide", + "length": "2 meters", + }, + ], + }, + { + "url": "/api/v1/trucks/correct", + "expected_response": [ + { + "name": "Saab", + "color": "Yellow", + "height": "Medium height", + "width": "Very wide", + "length": "2 meters", + }, + { + "name": "Volvo", + "color": "Red", + "height": "Medium height", + "width": "Not wide", + "length": "2 meters", + }, + { + "name": "Tesla", + "color": "black", + "height": "Medium height", + "width": "Wide", + "length": "2 meters", + }, + ], + }, + ] + for item in good_test_data: + response = client.get(item["url"]) + assert response.status_code == 200 + assert response.json() == item["expected_response"] + tester.validate_response(response=response) + + def test_validate_response_failure_scenario_with_predefined_data(self, client): + tester = SchemaTester() + bad_test_data = [ + { + "url": "/api/v1/cars/incorrect", + "expected_response": [ + {"name": "Saab", "color": "Yellow", "height": "Medium height"}, + {"name": "Volvo", "color": "Red", "width": "Not very wide", "length": "2 meters"}, + {"name": "Tesla", "height": "Medium height", "width": "Medium width", "length": "2 meters"}, + ], + }, + { + "url": "/api/v1/trucks/incorrect", + "expected_response": [ + {"name": "Saab", "color": "Yellow", "height": "Medium height"}, + {"name": "Volvo", "color": "Red", "width": "Not very wide", "length": "2 meters"}, + {"name": "Tesla", "height": "Medium height", "width": "Medium width", "length": "2 meters"}, + ], + }, + { + "url": "/api/v1/trucks/incorrect", + "expected_response": [ + {"name": "Saab", "color": "Yellow", "height": "Medium height"}, + {"name": "Volvo", "color": "Red", "width": "Not very wide", "length": "2 meters"}, + {"name": "Tesla", "height": "Medium height", "width": "Medium width", "length": "2 meters"}, + ], + }, + ] + for item in bad_test_data: + response = client.get(item["url"]) + assert response.status_code == 200 + assert response.json() == item["expected_response"] + with pytest.raises( + DocumentationError, match="The following property is missing from the tested data: width" + ): + tester.validate_response(response) + def test_validate_response_failure_scenario_undocumented_path(self, monkeypatch): + tester = SchemaTester() + schema = deepcopy(tester.loader.get_schema()) + schema_section = schema["paths"][parameterized_path][method]["responses"][status]["content"][ + "application/json" + ]["schema"] + del schema["paths"][parameterized_path] + monkeypatch.setattr(tester.loader, "get_schema", _mock_schema(schema)) + response = response_factory(schema_section, de_parameterized_path, method, status) + with pytest.raises( + UndocumentedSchemaSectionError, + match=f"Unsuccessfully tried to index the OpenAPI schema by `{parameterized_path}`.", + ): + tester.validate_response(response) -def test_validate_response_failure_scenario_with_predefined_data(client): - tester = SchemaTester() - bad_test_data = [ - { - "url": "/api/v1/cars/incorrect", - "expected_response": [ - {"name": "Saab", "color": "Yellow", "height": "Medium height"}, - {"name": "Volvo", "color": "Red", "width": "Not very wide", "length": "2 meters"}, - {"name": "Tesla", "height": "Medium height", "width": "Medium width", "length": "2 meters"}, - ], - }, - { - "url": "/api/v1/trucks/incorrect", - "expected_response": [ - {"name": "Saab", "color": "Yellow", "height": "Medium height"}, - {"name": "Volvo", "color": "Red", "width": "Not very wide", "length": "2 meters"}, - {"name": "Tesla", "height": "Medium height", "width": "Medium width", "length": "2 meters"}, - ], - }, - { - "url": "/api/v1/trucks/incorrect", - "expected_response": [ - {"name": "Saab", "color": "Yellow", "height": "Medium height"}, - {"name": "Volvo", "color": "Red", "width": "Not very wide", "length": "2 meters"}, - {"name": "Tesla", "height": "Medium height", "width": "Medium width", "length": "2 meters"}, - ], - }, - ] - for item in bad_test_data: - response = client.get(item["url"]) - assert response.status_code == 200 - assert response.json() == item["expected_response"] - with pytest.raises(DocumentationError, match="The following property is missing from the tested data: width"): + def test_validate_response_failure_scenario_undocumented_method(self, monkeypatch): + tester = SchemaTester() + schema = deepcopy(tester.loader.get_schema()) + schema_section = schema["paths"][parameterized_path][method]["responses"][status]["content"][ + "application/json" + ]["schema"] + del schema["paths"][parameterized_path][method] + monkeypatch.setattr(tester.loader, "get_schema", _mock_schema(schema)) + response = response_factory(schema_section, de_parameterized_path, method, status) + with pytest.raises( + UndocumentedSchemaSectionError, + match=f"Unsuccessfully tried to index the OpenAPI schema by `{method}`.", + ): tester.validate_response(response) + def test_validate_response_failure_scenario_undocumented_status_code(self, monkeypatch): + tester = SchemaTester() + schema = deepcopy(tester.loader.get_schema()) + schema_section = schema["paths"][parameterized_path][method]["responses"][status]["content"][ + "application/json" + ]["schema"] + del schema["paths"][parameterized_path][method]["responses"][status] + monkeypatch.setattr(tester.loader, "get_schema", _mock_schema(schema)) + response = response_factory(schema_section, de_parameterized_path, method, status) + with pytest.raises( + UndocumentedSchemaSectionError, + match=f"Unsuccessfully tried to index the OpenAPI schema by `{status}`.", + ): + tester.validate_response(response) -def test_validate_response_failure_scenario_undocumented_path(monkeypatch): - tester = SchemaTester() - schema = deepcopy(tester.loader.get_schema()) - schema_section = schema["paths"][parameterized_path][method]["responses"][status]["content"]["application/json"][ - "schema" - ] - del schema["paths"][parameterized_path] - monkeypatch.setattr(tester.loader, "get_schema", _mock_schema(schema)) - response = response_factory(schema_section, de_parameterized_path, method, status) - with pytest.raises( - UndocumentedSchemaSectionError, - match=f"Unsuccessfully tried to index the OpenAPI schema by `{parameterized_path}`.", - ): - tester.validate_response(response) + def test_validate_response_global_case_tester(self, client): + tester_with_case_tester = SchemaTester(case_tester=is_pascal_case) + response = client.get(de_parameterized_path) + with pytest.raises( + CaseError, match="The response key `name` is not properly PascalCased. Expected value: Name" + ): + tester_with_case_tester.validate_response(response=response) + + def test_validate_response_global_ignored_case(self, client): + tester_with_case_tester = SchemaTester( + case_tester=is_pascal_case, ignore_case=["name", "color", "height", "width", "length"] + ) + response = client.get(de_parameterized_path) + tester_with_case_tester.validate_response(response=response) + def test_validate_response_passed_in_case_tester(self, client): + tester = SchemaTester() + response = client.get(de_parameterized_path) + with pytest.raises( + CaseError, match="The response key `name` is not properly PascalCased. Expected value: Name" + ): + tester.validate_response(response=response, case_tester=is_pascal_case) + + def test_validate_response_passed_in_ignored_case(self, client): + tester = SchemaTester() + response = client.get(de_parameterized_path) + tester.validate_response( + response=response, case_tester=is_pascal_case, ignore_case=["name", "color", "height", "width", "length"] + ) + + def test_reference_schema(self): + schema_path = str(CURRENT_PATH) + "/schemas" + for schema_file in [ + f"{schema_path}/openapi_v2_reference_schema.yaml", + f"{schema_path}/openapi_v3_reference_schema.yaml", + ]: + tester = SchemaTester(schema_file_path=schema_file) + schema = tester.loader.load_schema() + de_referenced_schema = tester.loader.de_reference_schema(schema) + for schema_section, response, url_fragment in iterate_schema(de_referenced_schema): + if schema_section and response: + with patch.object( + StaticSchemaLoader, "parameterize_path", side_effect=pass_mock_value(url_fragment) + ): + tester.validate_response(response) + # assert sorted(tester.get_response_schema_section(response)) == sorted(schema_section) # TODO: Uncomment and fix -def test_validate_response_failure_scenario_undocumented_method(monkeypatch): - tester = SchemaTester() - schema = deepcopy(tester.loader.get_schema()) - schema_section = schema["paths"][parameterized_path][method]["responses"][status]["content"]["application/json"][ - "schema" + def test_one_of_any_of_schemas(self): + tester = SchemaTester(schema_file_path=str(CURRENT_PATH) + "/schemas/one_of_any_of_test_schema.yaml") + schema = tester.loader.load_schema() + de_referenced_schema = tester.loader.de_reference_schema(schema) + tester.loader.schema = de_referenced_schema + for schema_section, response, url_fragment in iterate_schema(de_referenced_schema): + if schema_section and response: + with patch.object(StaticSchemaLoader, "parameterize_path", side_effect=pass_mock_value(url_fragment)): + tester.validate_response(response) + assert sorted(tester.get_response_schema_section(response)) == sorted(schema_section) + + def test_sample_schemas(self): + for filename in glob.iglob(str(CURRENT_PATH) + "/schemas/sample-schemas/**/**", recursive=True): + if os.path.isfile(filename) and "metadata" not in filename: + tester = SchemaTester(schema_file_path=filename) + schema = tester.loader.load_schema() + de_referenced_schema = tester.loader.de_reference_schema(schema) + tester.loader.schema = de_referenced_schema + for schema_section, response, url_fragment in iterate_schema(de_referenced_schema): + if schema_section and response: + with patch.object( + StaticSchemaLoader, "parameterize_path", side_effect=pass_mock_value(url_fragment) + ): + tester.validate_response(response) + assert sorted(tester.get_response_schema_section(response)) == sorted(schema_section) + + +example_schema_array = {"type": "array", "items": {"type": "string"}} +example_array = ["string"] +example_schema_integer = {"type": "integer", "minimum": 3, "maximum": 5} +example_integer = 3 +example_schema_number = {"type": "number", "minimum": 3, "maximum": 5} +example_number = 3.2 +example_schema_object = {"type": "object", "properties": {"value": {"type": "integer"}}, "required": ["value"]} +example_object = {"value": 1} +example_schema_string = {"type": "string", "minLength": 3, "maxLength": 5} +example_string = "str" + +example_response_types = [example_array, example_integer, example_number, example_object, example_string] +example_schema_types = [ + example_schema_array, + example_schema_integer, + example_schema_number, + example_schema_object, + example_schema_string, +] + +tester = SchemaTester() + + +class TestTypeValidation: + def test_successful_type_validation(self): + # The examples we've set up should always pass + for schema, response in zip(example_schema_types, example_response_types): + tester.test_schema_section(schema, response) + + # An empty array should always pass + tester.test_schema_section(example_schema_array, []) + + # Schemas with no 'type' property should always pass + for response in example_response_types: + tester.test_schema_section({}, response) + + def test_nullable(self): + for schema in example_schema_types: + # A null value should always raise an error + with pytest.raises( + DocumentationError, match=NONE_ERROR.format(expected=OPENAPI_PYTHON_MAPPING[schema["type"]]) + ): + tester.test_schema_section(schema, None) + + # Unless the schema specifies it should be nullable + + # OpenAPI 3+ + schema["nullable"] = True + tester.test_schema_section(schema, None) + + # Swagger 2.0 + del schema["nullable"] + schema["x-nullable"] = True + tester.test_schema_section(schema, None) + + def test_unsuccessful_type_validation(self): + """ Type mismatches should raise errors """ + for schema in example_schema_types: + for response in example_response_types: + + response_python_type = type(response).__name__ + schema_python_type = OPENAPI_PYTHON_MAPPING[schema["type"]] + + if response_python_type in schema_python_type: + # Skip testing if the types are the same + # Use `in` because the number type is 'int or float', not just float + continue + + with pytest.raises( + DocumentationError, + match=VALIDATE_TYPE_ERROR.format(expected=schema_python_type, received=response_python_type), + ): + tester.test_schema_section(schema, response) + + +class TestTypeAttributes: + def test_min_and_max_length(self): + # Not adhering to minlength limitations should raise an error + with pytest.raises(DocumentationError, match=VALIDATE_MIN_LENGTH_ERROR.format(data="a" * 2, min_length=3)): + tester.test_schema_section(example_schema_string, "a" * 2) + + # Not adhering to maxlength limitations should raise an error + with pytest.raises(DocumentationError, match=VALIDATE_MAX_LENGTH_ERROR.format(data="a" * 6, max_length=5)): + tester.test_schema_section(example_schema_string, "a" * 6) + + def test_min_and_max_items(self): + # Not adhering to minlength limitations should raise an error + with pytest.raises( + DocumentationError, match=VALIDATE_MIN_ARRAY_LENGTH_ERROR.format(data=r"\['string'\]", min_length=2) + ): + schema = {"type": "array", "items": {"type": "string"}, "minItems": 2} + tester.test_schema_section(schema, ["string"]) + + # Not adhering to maxlength limitations should raise an error + with pytest.raises( + DocumentationError, + match=VALIDATE_MAX_ARRAY_LENGTH_ERROR.format( + data=r"\['string', 'string', 'string', 'string', 'string', 'string'\]", max_length=5 + ), + ): + schema = {"type": "array", "items": {"type": "string"}, "maxItems": 5} + tester.test_schema_section(schema, ["string"] * 6) + + def test_min_and_max_number_of_properties(self): + # Not adhering to minlength limitations should raise an error + with pytest.raises(DocumentationError, match=VALIDATE_MINIMUM_NUMBER_OF_PROPERTIES_ERROR[:10]): + schema = {"type": "object", "properties": {"oneKey": {"type": "string"}}, "minProperties": 2} + tester.test_schema_section(schema, {"oneKey": "test"}) + + # Not adhering to minlength limitations should raise an error + with pytest.raises(DocumentationError, match=VALIDATE_MAXIMUM_NUMBER_OF_PROPERTIES_ERROR[:10]): + schema = { + "type": "object", + "properties": {"oneKey": {"type": "string"}, "twoKey": {"type": "string"}}, + "maxProperties": 1, + } + tester.test_schema_section(schema, {"oneKey": "test", "twoKey": "test"}) + + def test_pattern(self): + """ The a regex pattern can be passed to describe how a string should look """ + schema = {"type": "string", "pattern": r"^\d{3}-\d{2}-\d{4}$"} + + # Should pass + tester.test_schema_section(schema, "123-45-6789") + + # Bad pattern should fail + with pytest.raises(DocumentationError, match="String 'test' does not validate using the specified pattern"): + tester.test_schema_section(schema, "test") + + # And if we get compile errors, we need to handle this too + schema = {"type": "string", "pattern": r"**"} + with pytest.raises(OpenAPISchemaError): + tester.test_schema_section(schema, "test") + + def test_exclusives(self): + """ The minimum is included, unless specified """ + + # Pass when set to minimum + schema = {"type": "integer", "minimum": 3, "exclusiveMinimum": False, "maximum": 5} + tester.test_schema_section(schema, 3) + + # Fail when we exclude the minimum + schema["exclusiveMinimum"] = True + with pytest.raises(DocumentationError, match=VALIDATE_MINIMUM_ERROR.format(data=3, minimum=4)): + tester.test_schema_section(schema, 3) + + # Fail when we exclude the maximum + schema["exclusiveMaximum"] = True + with pytest.raises(DocumentationError, match=VALIDATE_MAXIMUM_ERROR.format(data=5, maximum=4)): + tester.test_schema_section(schema, 5) + + # Pass when we include the maximum + schema["exclusiveMaximum"] = False + tester.test_schema_section(schema, 5) + + def test_maximum_and_minimum(self): + # Not adhering to maximum limitations should raise an error + for num, schema in [(6, example_schema_integer), (6.12, example_schema_number)]: + with pytest.raises(DocumentationError, match=VALIDATE_MAXIMUM_ERROR.format(data=num, maximum=5)): + tester.test_schema_section(schema, num) + + # Not adhering to minimum limitations should raise an error + for num, schema in [(2, example_schema_integer), (2.22, example_schema_number)]: + with pytest.raises(DocumentationError, match=VALIDATE_MINIMUM_ERROR.format(data=num, minimum=3)): + tester.test_schema_section(schema, num) + + def test_enum_validation(self): + tester.test_schema_section({"type": "string", "enum": ["Cat", "Dog"]}, "Cat") + tester.test_schema_section({"type": "string", "enum": ["Cat", "Dog"]}, "Dog") + + with pytest.raises( + DocumentationError, match=VALIDATE_ENUM_ERROR.format(enum=r"\['Cat', 'Dog'\]", received="Turtle") + ): + tester.test_schema_section({"type": "string", "enum": ["Cat", "Dog"]}, "Turtle") + + def test_multiple_of(self): + for num, _type in [(5, "integer"), (5, "number")]: + # Pass + schema = {"multipleOf": num, "type": _type} + for integer in [5, 10, 15, 20, 25]: + tester.test_schema_section(schema, integer) + + # Fail + with pytest.raises(DocumentationError, match=VALIDATE_MULTIPLE_OF_ERROR.format(data=num + 2, multiple=num)): + tester.test_schema_section(schema, num + 2) + + def test_unique_items_validator(self): + with pytest.raises(DocumentationError, match=VALIDATE_UNIQUE_ITEMS_ERROR): + schema = {"type": "array", "items": {"type": "string"}, "uniqueItems": True} + tester.test_schema_section(schema, ["identical value", "identical value", "non-identical value"]) + + +class TestFormats: + def test_date_format(self): + # ISO8601 is valid + tester.test_schema_section({"type": "string", "format": "date"}, "2040-01-01") + + # This is invalid + with pytest.raises( + DocumentationError, match=VALIDATE_FORMAT_ERROR.format(expected="date", received="01-31-2019") + ): + tester.test_schema_section({"type": "string", "format": "date"}, "01-31-2019") + + def test_datetime(self): + # ISO8601 is valid + tester.test_schema_section({"type": "string", "format": "date-time"}, "2040-01-01 08:00") + + # This is invalid + with pytest.raises( + DocumentationError, match=VALIDATE_FORMAT_ERROR.format(expected="date-time", received="2040-01-01 0800") + ): + tester.test_schema_section({"type": "string", "format": "date-time"}, "2040-01-01 0800") + + def test_byte(self): + tester.test_schema_section({"type": "string", "format": "byte"}, b"test") + + with pytest.raises(DocumentationError, match=VALIDATE_FORMAT_ERROR.format(expected="byte", received="test")): + tester.test_schema_section({"type": "string", "format": "byte"}, "test") + + +example_anyof_response = { + "type": "object", + "anyOf": [ + {"properties": {"oneKey": {"type": "string"}}}, + {"properties": {"anotherKey": {"type": "integer"}}}, + ], +} + + +def test_anyof(): + # Test first possibility + tester.test_schema_section(example_anyof_response, {"oneKey": "test"}) + + # Test second possibility + tester.test_schema_section(example_anyof_response, {"anotherKey": 1}) + + # Test a few bad responses + data = [ + {"oneKey": 1}, # bad type + {"anotherKey": "test"}, # bad type + {"thirdKey": "test"}, # bad key + {"thirdKey": 1}, # bad key + [], # bad type + "test", # bad type + 1, # bad type ] - del schema["paths"][parameterized_path][method] - monkeypatch.setattr(tester.loader, "get_schema", _mock_schema(schema)) - response = response_factory(schema_section, de_parameterized_path, method, status) - with pytest.raises( - UndocumentedSchemaSectionError, - match=f"Unsuccessfully tried to index the OpenAPI schema by `{method}`.", - ): - tester.validate_response(response) - + for datum in data: + with pytest.raises(DocumentationError, match=ANY_OF_ERROR): + tester.test_schema_section(example_anyof_response, datum) -def test_validate_response_failure_scenario_undocumented_status_code(monkeypatch): - tester = SchemaTester() - schema = deepcopy(tester.loader.get_schema()) - schema_section = schema["paths"][parameterized_path][method]["responses"][status]["content"]["application/json"][ - "schema" - ] - del schema["paths"][parameterized_path][method]["responses"][status] - monkeypatch.setattr(tester.loader, "get_schema", _mock_schema(schema)) - response = response_factory(schema_section, de_parameterized_path, method, status) - with pytest.raises( - UndocumentedSchemaSectionError, - match=f"Unsuccessfully tried to index the OpenAPI schema by `{status}`.", - ): - tester.validate_response(response) +docs_anyof_example = { + "type": "object", + "anyOf": [ + { + "required": ["age"], + "properties": { + "age": {"type": "integer"}, + "nickname": {"type": "string"}, + }, + }, + { + "required": ["pet_type"], + "properties": { + "pet_type": {"type": "string", "enum": ["Cat", "Dog"]}, + "hunts": {"type": "boolean"}, + }, + }, + ], +} -def test_validate_response_global_case_tester(client): - tester_with_case_tester = SchemaTester(case_tester=is_pascal_case) - response = client.get(de_parameterized_path) - with pytest.raises(CaseError, match="The response key `name` is not properly PascalCased. Expected value: Name"): - tester_with_case_tester.validate_response(response=response) +def test_anyof_official_documentation_example(): + """ + This test makes sure our anyOf implementation works as described in the official example docs: + https://swagger.io/docs/specification/data-models/oneof-anyof-allof-not/#anyof + """ + tester.test_schema_section(docs_anyof_example, {"age": 50}) + tester.test_schema_section(docs_anyof_example, {"pet_type": "Cat", "hunts": True}) + tester.test_schema_section(docs_anyof_example, {"nickname": "Fido", "pet_type": "Dog", "age": 44}) -def test_validate_response_global_ignored_case(client): - tester_with_case_tester = SchemaTester( - case_tester=is_pascal_case, ignore_case=["name", "color", "height", "width", "length"] - ) - response = client.get(de_parameterized_path) - tester_with_case_tester.validate_response(response=response) + with pytest.raises(DocumentationError): + tester.test_schema_section(docs_anyof_example, {"nickname": "Mr. Paws", "hunts": False}) -def test_validate_response_passed_in_case_tester(client): - tester = SchemaTester() - response = client.get(de_parameterized_path) - with pytest.raises(CaseError, match="The response key `name` is not properly PascalCased. Expected value: Name"): - tester.validate_response(response=response, case_tester=is_pascal_case) +# endregion -def test_validate_response_passed_in_ignored_case(client): - tester = SchemaTester() - response = client.get(de_parameterized_path) - tester.validate_response( - response=response, case_tester=is_pascal_case, ignore_case=["name", "color", "height", "width", "length"] - ) +def test_loader_inference(settings): + # Test drf-spectacular + assert isinstance(SchemaTester().loader, DrfSpectacularSchemaLoader) + # Test drf-yasg + settings.INSTALLED_APPS.pop(settings.INSTALLED_APPS.index("drf_spectacular")) + settings.INSTALLED_APPS.append("drf_yasg") + assert isinstance(SchemaTester().loader, DrfYasgSchemaLoader) -def test_reference_schema(): - schema_path = str(CURRENT_PATH) + "/schemas" - for schema_file in [ - f"{schema_path}/openapi_v2_reference_schema.yaml", - f"{schema_path}/openapi_v3_reference_schema.yaml", - ]: - tester = SchemaTester(schema_file_path=schema_file) - schema = tester.loader.load_schema() - de_referenced_schema = tester.loader.de_reference_schema(schema) - for schema_section, response, url_fragment in iterate_schema(de_referenced_schema): - if schema_section and response: - with patch.object(StaticSchemaLoader, "parameterize_path", side_effect=pass_mock_value(url_fragment)): - tester.validate_response(response) - # assert sorted(tester.get_response_schema_section(response)) == sorted(schema_section) # TODO: Uncomment and fix + # Test static loader + assert isinstance(SchemaTester(schema_file_path="test").loader, StaticSchemaLoader) + # Test no loader + settings.INSTALLED_APPS = [] + with pytest.raises(ImproperlyConfigured, match=INIT_ERROR): + SchemaTester() -def test_one_of_any_of_schemas(): - tester = SchemaTester(schema_file_path=str(CURRENT_PATH) + "/schemas/one_of_any_of_test_schema.yaml") - schema = tester.loader.load_schema() - de_referenced_schema = tester.loader.de_reference_schema(schema) - tester.loader.schema = de_referenced_schema - for schema_section, response, url_fragment in iterate_schema(de_referenced_schema): - if schema_section and response: - with patch.object(StaticSchemaLoader, "parameterize_path", side_effect=pass_mock_value(url_fragment)): - tester.validate_response(response) - assert sorted(tester.get_response_schema_section(response)) == sorted(schema_section) +def test_one_of(): + all_types = [ + {"type": "string"}, + {"type": "number"}, + {"type": "integer"}, + {"type": "boolean"}, + {"type": "array", "items": {}}, + {"type": "object"}, + ] -def test_sample_schemas(): - for filename in glob.iglob(str(CURRENT_PATH) + "/schemas/sample-schemas/**/**", recursive=True): - if os.path.isfile(filename) and "metadata" not in filename: - tester = SchemaTester(schema_file_path=filename) - schema = tester.loader.load_schema() - de_referenced_schema = tester.loader.de_reference_schema(schema) - tester.loader.schema = de_referenced_schema - for schema_section, response, url_fragment in iterate_schema(de_referenced_schema): - if schema_section and response: - with patch.object( - StaticSchemaLoader, "parameterize_path", side_effect=pass_mock_value(url_fragment) - ): - tester.validate_response(response) - assert sorted(tester.get_response_schema_section(response)) == sorted(schema_section) + # Make sure integers are validated correctly + non_int_types = all_types[:1] + all_types[3:] + int_types = all_types[1:3] + int_value = 1 + for type in non_int_types: + with pytest.raises(DocumentationError, match=ONE_OF_ERROR.format(matches=0)): + tester.test_schema_section({"oneOf": [type]}, int_value) + for type in int_types: + tester.test_schema_section({"oneOf": [type]}, int_value) + + # Make sure strings are validated correctly + non_string_types = all_types[1:] + string_types = all_types[:1] + string_value = "test" + for type in non_string_types: + with pytest.raises(DocumentationError, match=ONE_OF_ERROR.format(matches=0)): + tester.test_schema_section({"oneOf": [type]}, string_value) + for type in string_types: + tester.test_schema_section({"oneOf": [type]}, string_value) + + # Make sure booleans are validated correctly + non_boolean_types = all_types[:3] + all_types[4:] + boolean_types = [all_types[3]] + boolean_value = False + for type in non_boolean_types: + with pytest.raises(DocumentationError, match=ONE_OF_ERROR.format(matches=0)): + tester.test_schema_section({"oneOf": [type]}, boolean_value) + for type in boolean_types: + tester.test_schema_section({"oneOf": [type]}, boolean_value) + + # Make sure arrays are validated correctly + non_array_types = all_types[:4] + all_types[5:] + array_types = [all_types[4]] + array_value = [] + for type in non_array_types: + with pytest.raises(DocumentationError, match=ONE_OF_ERROR.format(matches=0)): + tester.test_schema_section({"oneOf": [type]}, array_value) + for type in array_types: + tester.test_schema_section({"oneOf": [type]}, array_value) + + # Make sure arrays are validated correctly + non_object_types = all_types[:5] + object_types = [all_types[5]] + object_value = {} + for type in non_object_types: + with pytest.raises(DocumentationError, match=ONE_OF_ERROR.format(matches=0)): + tester.test_schema_section({"oneOf": [type]}, object_value) + for type in object_types: + tester.test_schema_section({"oneOf": [type]}, object_value) + + # Make sure we raise the appropriate error when we find several matches + with pytest.raises(DocumentationError, match=ONE_OF_ERROR.format(matches=2)): + tester.test_schema_section( + { + "oneOf": [ + {"type": "string"}, + {"type": "number"}, + {"type": "integer"}, + {"type": "boolean"}, + {"type": "array", "items": {}}, + {"type": "object"}, + ] + }, + 1, + ) + + # Make sure we raise the appropriate error when we find no matches + with pytest.raises(DocumentationError, match=ONE_OF_ERROR.format(matches=0)): + tester.test_schema_section( + { + "oneOf": [ + {"type": "number"}, + {"type": "integer"}, + {"type": "boolean"}, + {"type": "array", "items": {}}, + {"type": "object"}, + ] + }, + "test", + ) + + +def test_response_is_missing_keys(): + with pytest.raises(DocumentationError, match=MISSING_RESPONSE_KEY_ERROR.format(missing_key="value")): + # If a required key is missing, we should raise an error + required_key = {"type": "object", "properties": {"value": {"type": "integer"}}, "required": ["value"]} + tester.test_schema_section(required_key, {}) + + # If not required, it should pass + optional_key = {"type": "object", "properties": {"value": {"type": "integer"}}} + tester.test_schema_section(optional_key, {}) + + +def test_schema_object_is_missing_keys(): + """ Excess keys in a response should raise an error """ + with pytest.raises( + DocumentationError, + match=EXCESS_RESPONSE_KEY_ERROR.format(excess_key="value"), + ): + schema = {"type": "object", "properties": {}} + tester.test_schema_section(schema, example_object) diff --git a/tests/test_test_schema_section.py b/tests/test_test_schema_section.py deleted file mode 100644 index d6439902..00000000 --- a/tests/test_test_schema_section.py +++ /dev/null @@ -1,335 +0,0 @@ -import pytest - -from openapi_tester import DocumentationError, OpenAPISchemaError, SchemaTester -from openapi_tester.constants import ( - ANY_OF_ERROR, - EXCESS_RESPONSE_KEY_ERROR, - MISSING_RESPONSE_KEY_ERROR, - NONE_ERROR, - OPENAPI_PYTHON_MAPPING, - VALIDATE_FORMAT_ERROR, - VALIDATE_MAX_ARRAY_LENGTH_ERROR, - VALIDATE_MAX_LENGTH_ERROR, - VALIDATE_MAXIMUM_ERROR, - VALIDATE_MAXIMUM_NUMBER_OF_PROPERTIES_ERROR, - VALIDATE_MIN_ARRAY_LENGTH_ERROR, - VALIDATE_MIN_LENGTH_ERROR, - VALIDATE_MINIMUM_ERROR, - VALIDATE_MINIMUM_NUMBER_OF_PROPERTIES_ERROR, - VALIDATE_MULTIPLE_OF_ERROR, - VALIDATE_TYPE_ERROR, - VALIDATE_UNIQUE_ITEMS_ERROR, -) - -example_schema_array = {"type": "array", "items": {"type": "string"}} -example_array = ["string"] -example_schema_integer = {"type": "integer", "minimum": 3, "maximum": 5} -example_integer = 3 -example_schema_number = {"type": "number", "minimum": 3, "maximum": 5} -example_number = 3.2 -example_schema_object = {"type": "object", "properties": {"value": {"type": "integer"}}, "required": ["value"]} -example_object = {"value": 1} -example_schema_string = {"type": "string", "minLength": 3, "maxLength": 5} -example_string = "str" - -example_response_types = [example_array, example_integer, example_number, example_object, example_string] -example_schema_types = [ - example_schema_array, - example_schema_integer, - example_schema_number, - example_schema_object, - example_schema_string, -] - -tester = SchemaTester() - - -def test_nothing_wrong(): - """ This should always pass """ - for schema, response in zip(example_schema_types, example_response_types): - tester.test_schema_section(schema, response) - - -def test_empty_list(): - """ An empty array should always pass """ - tester.test_schema_section(example_schema_array, []) - - -def test_nullable(): - for schema in example_schema_types: - # A null value should always raise an error - with pytest.raises( - DocumentationError, match=NONE_ERROR.format(expected=OPENAPI_PYTHON_MAPPING[schema["type"]]) - ): - tester.test_schema_section(schema, None) - - # Unless the schema specifies it should be nullable - - # OpenAPI 3+ - schema["nullable"] = True - tester.test_schema_section(schema, None) - - # Swagger 2.0 - del schema["nullable"] - schema["x-nullable"] = True - tester.test_schema_section(schema, None) - - -def test_wrong_type(): - """ Type mismatches should raise errors """ - for schema in example_schema_types: - for response in example_response_types: - - response_python_type = type(response).__name__ - schema_python_type = OPENAPI_PYTHON_MAPPING[schema["type"]] - - if response_python_type in schema_python_type: - # Skip testing if the types are the same - # Use `in` because the number type is 'int or float', not just float - continue - - with pytest.raises( - DocumentationError, - match=VALIDATE_TYPE_ERROR.format(expected=schema_python_type, received=response_python_type), - ): - tester.test_schema_section(schema, response) - - -def test_min_length_violated(): - """ Not adhering to minlength limitations should raise an error """ - with pytest.raises(DocumentationError, match=VALIDATE_MIN_LENGTH_ERROR.format(data="a" * 2, min_length=3)): - tester.test_schema_section(example_schema_string, "a" * 2) - - -def test_max_length_violated(): - """ Not adhering to maxlength limitations should raise an error """ - with pytest.raises(DocumentationError, match=VALIDATE_MAX_LENGTH_ERROR.format(data="a" * 6, max_length=5)): - tester.test_schema_section(example_schema_string, "a" * 6) - - -def test_min_array_length_violated(): - """ Not adhering to minlength limitations should raise an error """ - with pytest.raises( - DocumentationError, match=VALIDATE_MIN_ARRAY_LENGTH_ERROR.format(data=r"\['string'\]", min_length=2) - ): - schema = {"type": "array", "items": {"type": "string"}, "minItems": 2} - tester.test_schema_section(schema, ["string"]) - - -def test_max_array_length_violated(): - """ Not adhering to maxlength limitations should raise an error """ - with pytest.raises( - DocumentationError, - match=VALIDATE_MAX_ARRAY_LENGTH_ERROR.format( - data=r"\['string', 'string', 'string', 'string', 'string', 'string'\]", max_length=5 - ), - ): - schema = {"type": "array", "items": {"type": "string"}, "maxItems": 5} - tester.test_schema_section(schema, ["string"] * 6) - - -def test_min_number_of_properties_violated(): - """ Not adhering to minlength limitations should raise an error """ - with pytest.raises(DocumentationError, match=VALIDATE_MINIMUM_NUMBER_OF_PROPERTIES_ERROR[:10]): - schema = {"type": "object", "properties": {"oneKey": {"type": "string"}}, "minProperties": 2} - tester.test_schema_section(schema, {"oneKey": "test"}) - - -def test_max_number_of_properties_violated(): - """ Not adhering to minlength limitations should raise an error """ - with pytest.raises(DocumentationError, match=VALIDATE_MAXIMUM_NUMBER_OF_PROPERTIES_ERROR[:10]): - schema = { - "type": "object", - "properties": {"oneKey": {"type": "string"}, "twoKey": {"type": "string"}}, - "maxProperties": 1, - } - tester.test_schema_section(schema, {"oneKey": "test", "twoKey": "test"}) - - -def test_date_format(): - # ISO8601 is valid - tester.test_schema_section({"type": "string", "format": "date"}, "2040-01-01") - - # This is invalid - with pytest.raises(DocumentationError, match=VALIDATE_FORMAT_ERROR.format(expected="date", received="01-31-2019")): - tester.test_schema_section({"type": "string", "format": "date"}, "01-31-2019") - - -def test_datetime(): - # ISO8601 is valid - tester.test_schema_section({"type": "string", "format": "date-time"}, "2040-01-01 08:00") - - # This is invalid - with pytest.raises( - DocumentationError, match=VALIDATE_FORMAT_ERROR.format(expected="date-time", received="2040-01-01 0800") - ): - tester.test_schema_section({"type": "string", "format": "date-time"}, "2040-01-01 0800") - - -def test_byte(): - tester.test_schema_section({"type": "string", "format": "byte"}, b"test") - - with pytest.raises(DocumentationError, match=VALIDATE_FORMAT_ERROR.format(expected="byte", received="test")): - tester.test_schema_section({"type": "string", "format": "byte"}, "test") - - -def test_pattern(): - """ The a regex pattern can be passed to describe how a string should look """ - schema = {"type": "string", "pattern": r"^\d{3}-\d{2}-\d{4}$"} - - # Should pass - tester.test_schema_section(schema, "123-45-6789") - - # Bad pattern should fail - with pytest.raises(DocumentationError, match="String 'test' does not validate using the specified pattern:"): - tester.test_schema_section(schema, "test") - - # And if we get compile errors, we need to handle this too - schema = {"type": "string", "pattern": r"**"} - with pytest.raises(OpenAPISchemaError): - tester.test_schema_section(schema, "test") - - -def test_exclusives(): - """ The minimum is included, unless specified """ - - # Pass when set to minimum - schema = {"type": "integer", "minimum": 3, "exclusiveMinimum": False, "maximum": 5} - tester.test_schema_section(schema, 3) - - # Fail when we exclude the minimum - schema["exclusiveMinimum"] = True - with pytest.raises(DocumentationError, match=VALIDATE_MINIMUM_ERROR.format(data=3, minimum=4)): - tester.test_schema_section(schema, 3) - - # Fail when we exclude the maximum - schema["exclusiveMaximum"] = True - with pytest.raises(DocumentationError, match=VALIDATE_MAXIMUM_ERROR.format(data=5, maximum=4)): - tester.test_schema_section(schema, 5) - - # Pass when we include the maximum - schema["exclusiveMaximum"] = False - tester.test_schema_section(schema, 5) - - -def test_maximum_violated(): - """ Not adhering to maximum limitations should raise an error """ - for num, schema in [(6, example_schema_integer), (6.12, example_schema_number)]: - with pytest.raises(DocumentationError, match=VALIDATE_MAXIMUM_ERROR.format(data=num, maximum=5)): - tester.test_schema_section(schema, num) - - -def test_minimum_violated(): - """ Not adhering to minimum limitations should raise an error """ - for num, schema in [(2, example_schema_integer), (2.22, example_schema_number)]: - with pytest.raises(DocumentationError, match=VALIDATE_MINIMUM_ERROR.format(data=num, minimum=3)): - tester.test_schema_section(schema, num) - - -def test_multiple_of(): - for num, _type in [(5, "integer"), (5, "number")]: - # Pass - schema = {"multipleOf": num, "type": _type} - for integer in [5, 10, 15, 20, 25]: - tester.test_schema_section(schema, integer) - - # Fail - with pytest.raises(DocumentationError, match=VALIDATE_MULTIPLE_OF_ERROR.format(data=num + 2, multiple=num)): - tester.test_schema_section(schema, num + 2) - - -def test_response_is_missing_keys(): - # If a required key is missing, we should raise an error - required_key = {"type": "object", "properties": {"value": {"type": "integer"}}, "required": ["value"]} - with pytest.raises(DocumentationError, match=MISSING_RESPONSE_KEY_ERROR.format(missing_key="value")): - tester.test_schema_section(required_key, {}) - - # If not required, it should pass - optional_key = {"type": "object", "properties": {"value": {"type": "integer"}}} - tester.test_schema_section(optional_key, {}) - - -def test_schema_object_is_missing_keys(): - """ Excess keys in a response should raise an error """ - with pytest.raises( - DocumentationError, - match=EXCESS_RESPONSE_KEY_ERROR.format(excess_key="value"), - ): - schema = {"type": "object", "properties": {}} - tester.test_schema_section(schema, example_object) - - -# region: anyOf unit tests - -example_anyof_response = { - "type": "object", - "anyOf": [ - {"properties": {"oneKey": {"type": "string"}}}, - {"properties": {"anotherKey": {"type": "integer"}}}, - ], -} - - -def test_anyof(): - # Test first possibility - tester.test_schema_section(example_anyof_response, {"oneKey": "test"}) - - # Test second possibility - tester.test_schema_section(example_anyof_response, {"anotherKey": 1}) - - # Test a few bad responses - data = [ - {"oneKey": 1}, # bad type - {"anotherKey": "test"}, # bad type - {"thirdKey": "test"}, # bad key - {"thirdKey": 1}, # bad key - [], # bad type - "test", # bad type - 1, # bad type - ] - for datum in data: - with pytest.raises(DocumentationError, match=ANY_OF_ERROR): - tester.test_schema_section(example_anyof_response, datum) - - -docs_anyof_example = { - "type": "object", - "anyOf": [ - { - "required": ["age"], - "properties": { - "age": {"type": "integer"}, - "nickname": {"type": "string"}, - }, - }, - { - "required": ["pet_type"], - "properties": { - "pet_type": {"type": "string", "enum": ["Cat", "Dog"]}, - "hunts": {"type": "boolean"}, - }, - }, - ], -} - - -def test_anyof_official_documentation_example(): - """ - This test makes sure our anyOf implementation works as described in the official example docs: - https://swagger.io/docs/specification/data-models/oneof-anyof-allof-not/#anyof - """ - tester.test_schema_section(docs_anyof_example, {"age": 50}) - tester.test_schema_section(docs_anyof_example, {"pet_type": "Cat", "hunts": True}) - tester.test_schema_section(docs_anyof_example, {"nickname": "Fido", "pet_type": "Dog", "age": 44}) - - with pytest.raises(DocumentationError): - tester.test_schema_section(docs_anyof_example, {"nickname": "Mr. Paws", "hunts": False}) - - -# endregion - - -def test_unique_items_validator(): - with pytest.raises(DocumentationError, match=VALIDATE_UNIQUE_ITEMS_ERROR): - schema = {"type": "array", "items": {"type": "string"}, "uniqueItems": True} - tester.test_schema_section(schema, ["identical value", "identical value", "non-identical value"])