From 42c4ca848b65efe373f8902ebb179c5ee417fd71 Mon Sep 17 00:00:00 2001 From: Edoardo Abati <29585319+EdAbati@users.noreply.github.com> Date: Wed, 19 Feb 2025 22:51:47 +0100 Subject: [PATCH 1/6] fix contribute --- docs/community/contribute.md | 7 ------- 1 file changed, 7 deletions(-) diff --git a/docs/community/contribute.md b/docs/community/contribute.md index 21dc4581e..78c713b18 100644 --- a/docs/community/contribute.md +++ b/docs/community/contribute.md @@ -48,13 +48,6 @@ conda env create -f environment.yml Then install the dependencies in editable mode, and install the `pre-commit` hooks: -```shell -python -m venv .venv -source .venv/bin/activate -``` - -Then install the dependencies in editable mode, and install the pre-commit hooks: - ```shell pip install -e ".[test]" pre-commit install From 8f35963edb6a0e09b7d5819e366e4114a8283e2a Mon Sep 17 00:00:00 2001 From: Edoardo Abati <29585319+EdAbati@users.noreply.github.com> Date: Wed, 19 Feb 2025 23:09:03 +0100 Subject: [PATCH 2/6] all models in API reference --- docs/api/models.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/api/models.md b/docs/api/models.md index 27ad297fc..cb9497be1 100644 --- a/docs/api/models.md +++ b/docs/api/models.md @@ -1,3 +1 @@ -::: outlines.models.transformers - -::: outlines.models.openai +::: outlines.models From abdbb98c11eb3109cd1d71d35f0804dd06c8ff7b Mon Sep 17 00:00:00 2001 From: Edoardo Abati <29585319+EdAbati@users.noreply.github.com> Date: Wed, 19 Feb 2025 23:09:15 +0100 Subject: [PATCH 3/6] fix llamacpp docstrings --- outlines/models/llamacpp.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/outlines/models/llamacpp.py b/outlines/models/llamacpp.py index 904b193c4..7c7ad64df 100644 --- a/outlines/models/llamacpp.py +++ b/outlines/models/llamacpp.py @@ -248,8 +248,8 @@ def generate( ) -> str: """Generate text using `llama-cpp-python`. - Arguments - --------- + Parameters + ---------- prompts A prompt or list of prompts. generation_parameters @@ -302,8 +302,8 @@ def stream( ) -> Iterator[str]: """Stream text using `llama-cpp-python`. - Arguments - --------- + Parameters + ---------- prompts A prompt or list of prompts. generation_parameters @@ -372,8 +372,8 @@ def llamacpp( a path to the downloaded model. One can still load a local model by initializing `llama_cpp.Llama` directly. - Arguments - --------- + Parameters + ---------- repo_id The name of the model repository. filename: From 96a1425342a6a8b1c7c333ec66faa957e8c3b108 Mon Sep 17 00:00:00 2001 From: Edoardo Abati <29585319+EdAbati@users.noreply.github.com> Date: Wed, 19 Feb 2025 23:26:32 +0100 Subject: [PATCH 4/6] improve prompts docstring --- outlines/prompts.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/outlines/prompts.py b/outlines/prompts.py index 1cc264226..b04f91856 100644 --- a/outlines/prompts.py +++ b/outlines/prompts.py @@ -125,6 +125,7 @@ def prompt( manipulation by providing some degree of encapsulation. It uses the `render` function internally to render templates. + ```pycon >>> import outlines >>> >>> @outlines.prompt @@ -132,35 +133,40 @@ def prompt( ... "I have a ${question}" ... >>> prompt = build_prompt("How are you?") + ``` This API can also be helpful in an "agent" context where parts of the prompt are set when the agent is initialized and never modified later. In this situation we can partially apply the prompt function at initialization. + ```pycon >>> import outlines >>> import functools as ft ... >>> @outlines.prompt ... def solve_task(name: str, objective: str, task: str): - ... '''Your name is {{name}}. - .. Your overall objective is to {{objective}}. + ... \"""Your name is {{name}}. + ... Your overall objective is to {{objective}}. ... Please solve the following task: {{task}} - ... ''' + ... \""" ... >>> hal = ft.partial(solve_task, "HAL", "Travel to Jupiter") + ``` Additional Jinja2 filters can be provided as keyword arguments to the decorator. + ```pycon >>> def reverse(s: str) -> str: ... return s[::-1] ... >>> @outlines.prompt(filters={ 'reverse': reverse }) ... def reverse_prompt(text): - ... '''{{ text | reverse }}''' + ... \"""{{ text | reverse }}\""" ... >>> prompt = reverse_prompt("Hello") >>> print(prompt) ... "olleH" + ``` Returns ------- From 3af0b4c18c18e816a47db37af1daa4afeacaa834 Mon Sep 17 00:00:00 2001 From: Edoardo Abati <29585319+EdAbati@users.noreply.github.com> Date: Wed, 19 Feb 2025 23:35:17 +0100 Subject: [PATCH 5/6] fix models docstrings --- outlines/models/exllamav2.py | 8 +++++--- outlines/models/mlxlm.py | 25 +++++++++++++++---------- outlines/models/openai.py | 2 +- outlines/models/transformers.py | 6 +++--- outlines/models/transformers_vision.py | 4 ++-- outlines/models/vllm.py | 6 +++--- 6 files changed, 29 insertions(+), 22 deletions(-) diff --git a/outlines/models/exllamav2.py b/outlines/models/exllamav2.py index d2aa84b0d..efd70f96a 100644 --- a/outlines/models/exllamav2.py +++ b/outlines/models/exllamav2.py @@ -118,9 +118,11 @@ def reformat_output( self, output: Union[str, List[str]], sampling_parameters: SamplingParameters ): """ - The purpose of this function is to reformat the output from exllamav2's output format to outline's output format - For exllamav2, it mainly accepts only a list or a string(they also do cfg sampling with tuples but we will ignore this for now) - The exllamav2's logic is + The purpose of this function is to reformat the output from exllamav2's output format to outline's output format. + + For exllamav2, it mainly accepts only a list or a string(they also do cfg sampling with tuples but we will ignore this for now). + The exllamav2's logic is: + 1. If the prompt is a string, return a string. This is the same as outlines 2. If a prompt is a list, return a list. This is not the same as outlines output in that if the list is only one element, the string is expected to be outputted. 3. There is no such thing as num_samples, so the prompts had to be duplicated by num_samples times. Then, we had the function output a list of lists diff --git a/outlines/models/mlxlm.py b/outlines/models/mlxlm.py index d8b7e032c..843107d66 100644 --- a/outlines/models/mlxlm.py +++ b/outlines/models/mlxlm.py @@ -49,8 +49,8 @@ def stream( ) -> Iterator[str]: """Generate text using `mlx_lm`. - Arguments - --------- + Parameters + ---------- prompts A prompt or list of prompts. generation_parameters @@ -63,6 +63,7 @@ def stream( An instance of `SamplingParameters`, a dataclass that contains the name of the sampler to use and related parameters as available in Outlines. + Returns ------- The generated text. @@ -135,14 +136,18 @@ def generate_step( A generator producing token ids based on the given prompt from the model. - Args: - prompt (mx.array): The input prompt. - temp (float): The temperature for sampling, if 0 the argmax is used. - Default: ``0``. - top_p (float, optional): Nulceus sampling, higher means model considers - more less likely words. - sampler (str): The sampler string defined by SequenceGeneratorAdapter - logits_processor (OutlinesLogitsProcessor): Augment logits before sampling. + Parameters + ---------- + prompt + The input prompt. + temp + The temperature for sampling, if 0 the argmax is used. + top_p + Nulceus sampling, higher means model considers more less likely words. + sampler + The sampler string defined by SequenceGeneratorAdapter + logits_processor + Augment logits before sampling. """ import mlx.core as mx import mlx_lm diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 89c26f217..415652b49 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -20,7 +20,7 @@ class OpenAIConfig: properties that are specific to the OpenAI API. Not all these properties are supported by Outlines. - Properties + Parameters ---------- model The name of the model. Available models can be found on OpenAI's website. diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index 444492500..6b204ec6e 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -203,8 +203,8 @@ def generate( ) -> Union[str, List[str], List[List[str]]]: """Generate text using `transformers`. - Arguments - --------- + Parameters + ---------- prompts A prompt or list of prompts. generation_parameters @@ -304,7 +304,7 @@ def _get_generation_kwargs( sampling_parameters: SamplingParameters, ) -> dict: """ - Conert outlines generation parameters into model.generate kwargs + Convert outlines generation parameters into model.generate kwargs """ from transformers import GenerationConfig, LogitsProcessorList, set_seed diff --git a/outlines/models/transformers_vision.py b/outlines/models/transformers_vision.py index 772645b80..c8a86536e 100644 --- a/outlines/models/transformers_vision.py +++ b/outlines/models/transformers_vision.py @@ -22,8 +22,8 @@ def generate( # type: ignore ) -> Union[str, List[str], List[List[str]]]: """Generate text using `transformers`. - Arguments - --------- + Parameters + ---------- prompts A prompt or list of prompts. media diff --git a/outlines/models/vllm.py b/outlines/models/vllm.py index 778c27c6f..b9b035d1f 100644 --- a/outlines/models/vllm.py +++ b/outlines/models/vllm.py @@ -52,8 +52,8 @@ def generate( ): """Generate text using vLLM. - Arguments - --------- + Parameters + ---------- prompts A prompt or list of prompts. generation_parameters @@ -171,7 +171,7 @@ def load_lora(self, adapter_path: Optional[str]): def vllm(model_name: str, **vllm_model_params): """Load a vLLM model. - Arguments + Parameters --------- model_name The name of the model to load from the HuggingFace hub. From 259cd24ebeb3691c2b9ffb416d915e9b67046421 Mon Sep 17 00:00:00 2001 From: Edoardo Abati <29585319+EdAbati@users.noreply.github.com> Date: Wed, 19 Feb 2025 23:37:29 +0100 Subject: [PATCH 6/6] typo --- outlines/models/openai.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 415652b49..40ade1c25 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -24,7 +24,7 @@ class OpenAIConfig: ---------- model The name of the model. Available models can be found on OpenAI's website. - frequence_penalty + frequency_penalty Number between 2.0 and -2.0. Positive values penalize new tokens based on their existing frequency in the text, logit_bias @@ -49,7 +49,6 @@ class OpenAIConfig: Number between 0 and 1. Parameter for nucleus sampling. user A unique identifier for the end-user. - """ model: str = ""