Skip to content

Commit

Permalink
Update the examples
Browse files Browse the repository at this point in the history
  • Loading branch information
rlouf committed Feb 22, 2025
1 parent c0eff25 commit 361c4d8
Show file tree
Hide file tree
Showing 16 changed files with 122 additions and 80 deletions.
8 changes: 5 additions & 3 deletions examples/babyagi.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,16 @@
The original repo can be found at https://github.com/yoheinakajima/babyagi
"""

from collections import deque
from typing import Deque, List

from openai import OpenAI

import outlines
import outlines.models as models

model = models.openai("gpt-4o-mini")
complete = outlines.generate.text(model)
model = outlines.from_openai(OpenAI(), "gpt-4o-mini")
complete = outlines.Generator(model)


#################
Expand Down
6 changes: 1 addition & 5 deletions examples/bentoml/service.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,7 @@ def __init__(self) -> None:

import outlines

self.model = outlines.models.transformers(
self.bento_model_ref.path,
device="cuda",
model_kwargs={"torch_dtype": torch.float16},
)
self.model = outlines.from_transformers()

@bentoml.api
async def generate(
Expand Down
14 changes: 9 additions & 5 deletions examples/cerebrium/main.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
import outlines
from transformers import AutoModelForCausalLM, AutoTokenizer

model = outlines.from_transformers(
AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2"),
AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2"),
)

model = outlines.models.transformers("mistralai/Mistral-7B-Instruct-v0.2")

schema = {
"title": "Character",
Expand Down Expand Up @@ -29,14 +34,13 @@
},
}

generator = outlines.generate.json(model, schema)


def generate(
prompt: str = "Amiri, a 53 year old warrior woman with a sword and leather armor.",
):
character = generator(
f"<s>[INST]Give me a character description. Describe {prompt}.[/INST]"
character = model(
f"<s>[INST]Give me a character description. Describe {prompt}.[/INST]",
outlines.JsonType(schema),
)

print(character)
Expand Down
17 changes: 12 additions & 5 deletions examples/cfg.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import outlines.generate as generate
import outlines.models as models
from transformers import AutoModelForCausalLM, AutoTokenizer

import outlines
from outlines.types import Cfg

nlamb_grammar = r"""
start: sentence
Expand Down Expand Up @@ -75,11 +77,16 @@
%ignore WS
"""

model = models.transformers("hf-internal-testing/tiny-random-gpt2")
model_name = "hf-internal-testing/tiny-random-gpt2"
model = outlines.from_transformers(
AutoModelForCausalLM.from_pretrained(model_name),
AutoTokenizer.from_pretrained(model_name),
)

batch_size = 10
for grammar in [nlamb_grammar, calc_grammar, dyck_grammar, json_grammar]:
generator = generate.cfg(model, grammar, max_tokens=model.model.config.n_positions)
sequences = generator([" "] * batch_size)
generator = outlines.Generator(model, Cfg(grammar))
sequences = generator([" "] * batch_size, max_tokens=model.model.config.n_positions)
for seq in sequences:
try:
parse = generator.fsm.parser.parse(seq)
Expand Down
23 changes: 12 additions & 11 deletions examples/dating_profile.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from pydantic import BaseModel, conlist

import outlines
from outlines import models


class QuestionChoice(str, Enum):
Expand Down Expand Up @@ -103,25 +102,27 @@ def dating_profile_prompt(description: str, examples: list[Example]):
# Below requires ~13GB of GPU memory
# https://huggingface.co/mosaicml/mpt-7b-8k-instruct
# Motivation: Reasonably large model that fits on a single GPU and has been fine-tuned for a larger context window
model_name = "mosaicml/mpt-7b-8k-instruct"
config = transformers.AutoConfig.from_pretrained(
"mosaicml/mpt-7b-8k-instruct", trust_remote_code=True
)
config.init_device = "meta"
model = models.transformers(
model_name="mosaicml/mpt-7b-8k-instruct",
device="cuda",
model_kwargs={
"config": config,
"trust_remote_code": True,
"torch_dtype": torch.bfloat16,
"device_map": {"": 0},
},
model = outlines.from_transformers(
transformers.AutoModelForCausalLM.from_pretrained(
model_name,
device="cuda",
config=config,
trust_remote_code=True,
torch_dtype=torch.bfloat16,
device_map={"": 0},
),
transformers.AutoTokenizer.from_pretrained(model_name),
)

new_description = "I'm a laid-back lawyer who spends a lot of his free-time gaming. I work in a corporate office, but ended up here after the start-up I cofounded got acquired, so still play ping pong with my cool coworkers every day. I have a bar at home where I make cocktails, which is great for entertaining friends. I secretly like to wear suits and get a new one tailored every few months. I also like weddings because I get to wear those suits, and it's a good excuse for a date. I watch the latest series because I'm paying, with my hard-earned money, for every streaming service."

prompt = dating_profile_prompt(description=new_description, examples=samples)
profile = outlines.generate.json(model, DatingProfile)(prompt) # type: ignore
profile = model(prompt, outlines.JsonType(DatingProfile)) # type: ignore
print(profile)

# Sample generated profiles
Expand Down
5 changes: 3 additions & 2 deletions examples/llamacpp_example.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from enum import Enum

from pydantic import BaseModel, constr
from llama_cpp import Llama

import outlines

Expand Down Expand Up @@ -30,10 +31,10 @@ class Character(BaseModel):

if __name__ == "__main__":
# curl -L -o mistral-7b-instruct-v0.2.Q5_K_M.gguf https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q5_K_M.gguf
model = outlines.models.llamacpp("./mistral-7b-instruct-v0.2.Q5_K_M.gguf")
model = outlines.from_llamacpp(Llama("./mistral-7b-instruct-v0.2.Q5_K_M.gguf"))

# Construct structured sequence generator
generator = outlines.generate.json(model, Character)
generator = outlines.Generator(model, outlines.JsonType(Character))

# Draw a sample
seed = 789005
Expand Down
7 changes: 4 additions & 3 deletions examples/math_generate_code.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""Example from https://dust.tt/spolu/a/d12ac33169"""

import outlines
import outlines.models as models
import openai

examples = [
{"question": "What is 37593 * 67?", "code": "37593 * 67"},
Expand Down Expand Up @@ -35,7 +36,7 @@ def execute_code(code):


prompt = answer_with_code_prompt(question, examples)
model = models.openai("gpt-4o-mini")
answer = outlines.generate.text(model)(prompt)
model = outlines.from_openai(openai.OpenAI(), "gpt-4o-mini")
answer = model(prompt)
result = execute_code(answer)
print(f"It takes Carla {result:.0f} minutes to download the file.")
34 changes: 17 additions & 17 deletions examples/meta_prompting.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,14 @@
https://arxiv.org/abs/2102.07350.
"""

import argparse

import openai
import outlines
import outlines.models as models


client = openai.OpenAI()


def split_into_steps(question, model_name: str):
Expand All @@ -22,16 +26,15 @@ def solve(question):
Rephrase : : as a true or false statement, identify an Object, relationship and subject
"""

model = models.openai(model_name)
generator = outlines.generate.text(model)
model = outlines.from_openai(client, model_name)

prompt = solve(question)
answer = generator(prompt, 500)
answer = model(prompt, 500)
prompt += (
answer
+ "\n what is the only option that displays the same type of relationship as : :?"
)
answer = generator(prompt, 500)
answer = model(prompt, 500)
completed = prompt + answer

return completed
Expand All @@ -49,13 +52,12 @@ def determine_goal(question):
def solve(memory):
"""{{memory}}. Let's begin."""

model = models.openai(model_name)
generator = outlines.generate.text(model)
model = outlines.from_openai(client, model_name)

prompt = determine_goal(question)
answer = generator(prompt, stop_at=["."])
answer = model(prompt, stop_at=["."])
prompt = solve(prompt + answer)
answer = generator(prompt, max_tokens=500)
answer = model(prompt, max_tokens=500)
completed = prompt + answer

return completed
Expand Down Expand Up @@ -89,13 +91,12 @@ def get_answer(question, expert, memory):
{{question}}
"""

model = models.openai(model_name)
generator = outlines.generate.text(model)
model = outlines.from_openai(client, model_name)

prompt = find_expert(question)
expert = generator(prompt, stop_at=['"'])
expert = model(prompt, stop_at=['"'])
prompt = get_answer(question, expert, prompt + expert)
answer = generator(prompt, max_tokens=500)
answer = model(prompt, max_tokens=500)
completed = prompt + answer

return completed
Expand All @@ -117,13 +118,12 @@ def get_answer(expert, memory):
For instance, {{expert}} would answer
"""

model = models.openai(model_name)
generator = outlines.generate.text(model)
model = outlines.from_openai(client, model_name)

prompt = find_expert(question)
expert = generator(prompt, stop_at=["\n", "."])
expert = model(prompt, stop_at=["\n", "."])
prompt = get_answer(expert, prompt + expert)
answer = generator(prompt, max_tokens=500)
answer = model(prompt, max_tokens=500)
completed = prompt + answer

return completed
Expand Down
21 changes: 14 additions & 7 deletions examples/modal_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,19 @@


outlines_image = modal.Image.debian_slim(python_version="3.11").pip_install(
"outlines==0.0.37",
"outlines==1.0.0",
"transformers==4.38.2",
"datasets==2.18.0",
"accelerate==0.27.2",
)


def import_model():
import outlines
from transformers import AutoModelForCausalLM, AutoTokenizer

outlines.models.transformers("mistralai/Mistral-7B-Instruct-v0.2")
model_id = "mistralai/Mistral-7B-Instruct-v0.2"
_ = AutoTokenizer.from_pretrained(model_id)
_ = AutoModelForCausalLM.from_pretrained(model_id)


outlines_image = outlines_image.run_function(import_model)
Expand Down Expand Up @@ -63,12 +65,17 @@ def generate(
prompt: str = "Amiri, a 53 year old warrior woman with a sword and leather armor.",
):
import outlines
from transformers import AutoModelForCausalLM, AutoTokenizer

model = outlines.models.transformers("mistralai/Mistral-7B-v0.1", device="cuda")
model_id = "mistralai/Mistral-7B-Instruct-v0.2"
model = outlines.from_transformers(
tokenizer=AutoTokenizer.from_pretrained(model_id),
model=AutoModelForCausalLM.from_pretrained(model_id, device="cuda"),
)

generator = outlines.generate.json(model, schema)
character = generator(
f"<s>[INST]Give me a character description. Describe {prompt}.[/INST]"
character = model(
f"<s>[INST]Give me a character description. Describe {prompt}.[/INST]",
outlines.JsonType(schema),
)

print(character)
Expand Down
1 change: 1 addition & 0 deletions examples/parsing.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""An example illustrating parser-based masking."""

import math
import time
from copy import copy
Expand Down
5 changes: 3 additions & 2 deletions examples/pick_odd_one_out.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,9 @@
arXiv preprint arXiv:2212.06094.
"""

import openai
import outlines
import outlines.models as models


@outlines.prompt
Expand All @@ -31,7 +32,7 @@ def build_ooo_prompt(options):

options = ["sea", "mountains", "plains", "sock"]

model = models.openai("gpt-4o-mini")
model = outlines.from_openai(openai.OpenAI(), "gpt-4o-mini")
gen_text = outlines.generate.text(model)
gen_choice = outlines.generate.choice(model, options)

Expand Down
13 changes: 7 additions & 6 deletions examples/react.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,12 @@
.. [2] Yao, S., Zhao, J., Yu, D., Du, N., Shafran, I., Narasimhan, K., & Cao, Y. (2022). React: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629.
"""

from openai import OpenAI
import requests # type: ignore

import outlines
import outlines.generate as generate
import outlines.models as models
from outlines import Generator, Choice


@outlines.prompt
Expand Down Expand Up @@ -46,11 +47,11 @@ def search_wikipedia(query: str):


prompt = build_reAct_prompt("Where is Apple Computers headquarted? ")
model = models.openai("gpt-4o-mini")
model = outlines.from_openai(OpenAI(), "gpt-4o-mini")

mode_generator = generate.choice(model, choices=["Tho", "Act"])
action_generator = generate.choice(model, choices=["Search", "Finish"])
text_generator = generate.text(model)
mode_generator = Generator(model, Choice(["Tho", "Act"]))
action_generator = Generator(model, Choice(["Search", "Finish"]))
text_generator = Generator(model)

for i in range(1, 10):
mode = mode_generator(prompt, max_tokens=128)
Expand Down
Loading

0 comments on commit 361c4d8

Please sign in to comment.