From 1f48b534ec1693db3265f25789f359778e8a2d35 Mon Sep 17 00:00:00 2001 From: Kevin Messiaen Date: Thu, 4 Jan 2024 12:04:11 +0700 Subject: [PATCH] Moved model.meta.property to model.property --- giskard/llm/generators/adversarial.py | 8 ++--- giskard/llm/generators/sycophancy.py | 14 ++++---- .../generators/test_base_llm_generators.py | 36 +++++++++---------- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/giskard/llm/generators/adversarial.py b/giskard/llm/generators/adversarial.py index f602f01eb8..ca27f50802 100644 --- a/giskard/llm/generators/adversarial.py +++ b/giskard/llm/generators/adversarial.py @@ -1,6 +1,6 @@ +from .base import BaseDataGenerator from ...models.base.model import BaseModel from ...utils.display import truncate -from .base import BaseDataGenerator GENERATE_INPUTS_PROMPT = """You are auditing AI models. Your task is to generate a set of inputs used to evaluate the behavior of a given AI model. These inputs should highlight the model's performance in scenarios where it may fail or exhibit suboptimal behavior related to the following issue category: @@ -48,9 +48,9 @@ def _make_dataset_name(self, model: BaseModel, num_samples): def _make_generate_input_prompt(self, model: BaseModel, num_inputs: int): input_prompt = self.prompt.format( issue_description=self.issue_description, - model_name=model.meta.name, - model_description=model.meta.description, - feature_names=", ".join(model.meta.feature_names), + model_name=model.name, + model_description=model.description, + feature_names=", ".join(model.feature_names), num_samples=num_inputs, requirement=self.requirement, ) diff --git a/giskard/llm/generators/sycophancy.py b/giskard/llm/generators/sycophancy.py index b725f288af..cb10a5cf1c 100644 --- a/giskard/llm/generators/sycophancy.py +++ b/giskard/llm/generators/sycophancy.py @@ -40,9 +40,9 @@ class SycophancyDataGenerator(LLMGenerator): def _make_generate_input_prompt(self, model: BaseModel, num_samples): input_prompt = self.prompt.format( - model_name=model.meta.name, - model_description=model.meta.description, - feature_names=", ".join(model.meta.feature_names), + model_name=model.name, + model_description=model.description, + feature_names=", ".join(model.feature_names), num_samples=num_samples, ) if self.languages: @@ -64,11 +64,11 @@ def _make_generate_input_functions(self, model: BaseModel): "properties": { "input_version_1": { "type": "object", - "properties": {name: {"type": "string"} for name in model.meta.feature_names}, + "properties": {name: {"type": "string"} for name in model.feature_names}, }, "input_version_2": { "type": "object", - "properties": {name: {"type": "string"} for name in model.meta.feature_names}, + "properties": {name: {"type": "string"} for name in model.feature_names}, }, }, }, @@ -100,12 +100,12 @@ def generate_dataset(self, model: BaseModel, num_samples=10, column_types=None): dataset_1 = Dataset( pd.DataFrame([p["input_version_1"] for p in input_pairs]), - name=f"Sycophancy examples for {model.meta.name} (set 1)", + name=f"Sycophancy examples for {model.name} (set 1)", column_types=column_types, ) dataset_2 = Dataset( pd.DataFrame([p["input_version_2"] for p in input_pairs]), - name=f"Sycophancy examples for {model.meta.name} (set 2)", + name=f"Sycophancy examples for {model.name} (set 2)", column_types=column_types, ) diff --git a/tests/llm/generators/test_base_llm_generators.py b/tests/llm/generators/test_base_llm_generators.py index 2aaef480f2..7ef220de02 100644 --- a/tests/llm/generators/test_base_llm_generators.py +++ b/tests/llm/generators/test_base_llm_generators.py @@ -37,9 +37,9 @@ def test_generator_returns_dataset(Generator, args, kwargs): ] model = Mock() - model.meta.feature_names = ["question", "other_feature"] - model.meta.name = "Mock model for test" - model.meta.description = "This is a model for testing purposes" + model.feature_names = ["question", "other_feature"] + model.name = "Mock model for test" + model.description = "This is a model for testing purposes" generator = Generator( *args, @@ -86,9 +86,9 @@ def test_generator_raises_generation_error_if_function_call_fails(Generator, arg llm_client.complete.side_effect = [LLMOutput("Sorry, I can't.", None)] model = Mock() - model.meta.feature_names = ["question", "other_feature"] - model.meta.name = "Mock model for test" - model.meta.description = "This is a model for testing purposes" + model.feature_names = ["question", "other_feature"] + model.name = "Mock model for test" + model.description = "This is a model for testing purposes" generator = Generator(*args, **kwargs, llm_client=llm_client) @@ -100,9 +100,9 @@ def test_generator_raises_generation_error_if_function_call_fails(Generator, arg llm_client.complete.side_effect = [LLMOutput(None, LLMFunctionCall("wrong_function", {"have_no_inputs": True}))] model = Mock() - model.meta.feature_names = ["question", "other_feature"] - model.meta.name = "Mock model for test" - model.meta.description = "This is a model for testing purposes" + model.feature_names = ["question", "other_feature"] + model.name = "Mock model for test" + model.description = "This is a model for testing purposes" generator = Generator(*args, **kwargs, llm_client=llm_client) @@ -135,9 +135,9 @@ def test_generator_casts_based_on_column_types(Generator, args, kwargs): ] * 2 model = Mock() - model.meta.feature_names = ["question", "other_feature"] - model.meta.name = "Mock model for test" - model.meta.description = "This is a model for testing purposes" + model.feature_names = ["question", "other_feature"] + model.name = "Mock model for test" + model.description = "This is a model for testing purposes" generator = Generator( *args, @@ -190,9 +190,9 @@ def test_generator_adds_languages_requirements_in_prompts(Generator, args, kwarg ] model = Mock() - model.meta.feature_names = ["question", "other_feature"] - model.meta.name = "Mock model for test" - model.meta.description = "This is a model for testing purposes" + model.feature_names = ["question", "other_feature"] + model.name = "Mock model for test" + model.description = "This is a model for testing purposes" generator = Generator( *args, @@ -243,9 +243,9 @@ def test_generator_empty_languages_requirements(Generator, args, kwargs): ] model = Mock() - model.meta.feature_names = ["question", "other_feature"] - model.meta.name = "Mock model for test" - model.meta.description = "This is a model for testing purposes" + model.feature_names = ["question", "other_feature"] + model.name = "Mock model for test" + model.description = "This is a model for testing purposes" generator = Generator( *args,