From 0b9a3f1378c62449472cd6430d18eab9ac39c2bd Mon Sep 17 00:00:00 2001 From: Cameron Pfiffer Date: Fri, 13 Sep 2024 11:44:14 -0700 Subject: [PATCH] Correct pathways, update site color, front page fixes (#1146) Follow up to #1143 and moving the repo to dottxt-ai. - A lot of our links were broken because they pointed to the old site. - We changed the logo and the color but it wasn't fully integrated across the site. I fixed these, and made some improvements to the front page so that it's a little cleaner. New mobile/desktop views ![image](https://github.com/user-attachments/assets/9c068733-9cbc-4864-a5cd-763cb7403fa5) ![image](https://github.com/user-attachments/assets/ddc5c638-6129-439d-8645-403af1ec9acf) Current for reference ![image](https://github.com/user-attachments/assets/2bcebe41-a4db-4fba-9a0e-d8adfaf8b1f9) GitHub contributors are still available off the welcome page: ![image](https://github.com/user-attachments/assets/ce4b21e6-2969-47f6-b351-19cf91759868) --- .github/ISSUE_TEMPLATE/config.yml | 2 +- .../pull_request_template.md | 2 +- Dockerfile | 2 +- README.md | 24 ++-- benchmarks/asv.conf.json | 4 +- docs/blog/posts/roadmap-2024.md | 10 +- docs/community/contribute.md | 8 +- docs/community/feedback.md | 4 +- docs/community/versioning.md | 2 +- docs/cookbook/chain_of_density.md | 2 +- docs/cookbook/chain_of_thought.md | 2 +- docs/cookbook/deploy-using-bentoml.md | 6 +- docs/cookbook/deploy-using-modal.md | 4 +- docs/cookbook/knowledge_graph_extraction.md | 2 +- docs/cookbook/react_agent.md | 2 +- docs/cookbook/simtom.md | 4 +- docs/index.md | 20 +-- docs/installation.md | 2 +- docs/overrides/home.html | 120 ++++++++++++++++++ docs/overrides/index.html | 11 -- docs/overrides/main.html | 20 --- docs/quickstart.md | 2 +- docs/reference/generation/cfg.md | 4 +- docs/reference/generation/generation.md | 2 +- docs/reference/generation/types.md | 2 +- docs/reference/serve/vllm.md | 2 +- docs/stylesheets/extra.css | 1 + docs/welcome.md | 20 +-- environment.yml | 4 +- examples/dating_profile.py | 2 +- mkdocs.yml | 4 +- outlines/fsm/guide.py | 4 +- outlines/generate/cfg.py | 2 +- pyproject.toml | 6 +- tests/fsm/test_json_schema.py | 2 +- tests/fsm/test_regex.py | 8 +- tests/generate/test_generate.py | 4 +- tests/generate/test_integration_llamacpp.py | 2 +- tests/test_function.py | 10 +- 39 files changed, 203 insertions(+), 131 deletions(-) create mode 100644 docs/overrides/home.html delete mode 100644 docs/overrides/index.html diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index a396c1e76..90a4af686 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,4 +1,4 @@ contact_links: - name: 🤔 Questions & Help - url: https://github.com/outlines-dev/outlines/discussions/new + url: https://github.com/dottxt-ai/outlines/discussions/new about: "If you have a question about how to use Outlines, please start a discussion." diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md index bc181466b..ce0e89999 100644 --- a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +++ b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md @@ -18,6 +18,6 @@ A few important guidelines and requirements before we can merge your PR: Consider opening a **Draft PR** if your work is still in progress but you would like some feedback from other contributors. -[issues]: https://github.com/outlines-dev/outlines/issues +[issues]: https://github.com/dottxt-ai/outlines/issues [git-guidelines]: https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html [docstring-guidelines]: https://numpydoc.readthedocs.io/en/latest/format.html diff --git a/Dockerfile b/Dockerfile index c6e5f0672..117e39e88 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,5 +13,5 @@ COPY outlines ./outlines RUN --mount=source=.git,target=.git,type=bind \ pip install --no-cache-dir .[serve] -# https://outlines-dev.github.io/outlines/reference/vllm/ +# https://dottxt-ai.github.io/outlines/reference/vllm/ ENTRYPOINT ["python3", "-m", "outlines.serve.serve"] diff --git a/README.md b/README.md index 4988c77cc..87109d182 100644 --- a/README.md +++ b/README.md @@ -22,11 +22,11 @@ Made with ❤👷️ by the team at [.txt](https://dottxt.co). pip install outlines ``` -First time here? Go to our [setup guide](https://outlines-dev.github.io/outlines/welcome) +First time here? Go to our [setup guide](https://dottxt-ai.github.io/outlines/welcome) ## Features -- [x] 🤖 [Multiple model integrations](https://outlines-dev.github.io/outlines/installation): OpenAI, transformers, llama.cpp, exllama2, mamba +- [x] 🤖 [Multiple model integrations](https://dottxt-ai.github.io/outlines/installation): OpenAI, transformers, llama.cpp, exllama2, mamba - [x] 🖍️ Simple and powerful prompting primitives based on the [Jinja templating engine](https://jinja.palletsprojects.com/) - [x] 🚄 [Multiple choices](#multiple-choices), [type constraints](#type-constraint) and dynamic stopping - [x] ⚡ Fast [regex-structured generation](#efficient-regex-structured-generation) @@ -36,10 +36,10 @@ First time here? Go to our [setup guide](https://outlines-dev.github.io/outlines - [x] 💾 Caching of generations - [x] 🗂️ Batch inference - [x] 🎲 Sample with the greedy, multinomial and beam search algorithms (and more to come!) -- [x] 🚀 [Serve with vLLM](https://outlines-dev.github.io/outlines/reference/serve/vllm), with official Docker image, [`outlinesdev/outlines`](https://hub.docker.com/r/outlinesdev/outlines)! +- [x] 🚀 [Serve with vLLM](https://dottxt-ai.github.io/outlines/reference/serve/vllm), with official Docker image, [`outlinesdev/outlines`](https://hub.docker.com/r/outlinesdev/outlines)! -Outlines 〰 has new releases and features coming every week. Make sure to ⭐ star and 👀 watch this repository, follow [@dottxtai][dottxt-twitter] to stay up to date! +Outlines has new releases and features coming every week. Make sure to ⭐ star and 👀 watch this repository, follow [@dottxtai][dottxt-twitter] to stay up to date! ## Why should I use structured generation? @@ -145,7 +145,7 @@ as non-structured generation. ### Efficient JSON generation following a Pydantic model -Outlines 〰 allows to guide the generation process so the output is *guaranteed* to follow a [JSON schema](https://json-schema.org/) or [Pydantic model](https://docs.pydantic.dev/latest/): +Outlines allows to guide the generation process so the output is *guaranteed* to follow a [JSON schema](https://json-schema.org/) or [Pydantic model](https://docs.pydantic.dev/latest/): ```python from enum import Enum @@ -197,7 +197,7 @@ print(repr(character)) # Character(name='Vivian Thr', age=44, armor=, weapon=, strength=125) ``` -The method works with union types, optional types, arrays, nested schemas, etc. Some field constraints are [not supported yet](https://github.com/outlines-dev/outlines/issues/215), but everything else should work. +The method works with union types, optional types, arrays, nested schemas, etc. Some field constraints are [not supported yet](https://github.com/dottxt-ai/outlines/issues/215), but everything else should work. ### Efficient JSON generation following a JSON Schema @@ -277,7 +277,7 @@ print(sequence) # (8-2) ``` -This was a very simple grammar, and you can use `outlines.generate.cfg` to generate syntactically valid Python, SQL, and much more than this. Any kind of structured text, really. All you have to do is search for "X EBNF grammar" on the web, and take a look at the [Outlines `grammars` module](https://github.com/outlines-dev/outlines/tree/main/outlines/grammars). +This was a very simple grammar, and you can use `outlines.generate.cfg` to generate syntactically valid Python, SQL, and much more than this. Any kind of structured text, really. All you have to do is search for "X EBNF grammar" on the web, and take a look at the [Outlines `grammars` module](https://github.com/dottxt-ai/outlines/tree/main/outlines/grammars). ### Open functions @@ -339,8 +339,8 @@ answer = outlines.generate.text(model)(prompt, max_tokens=100) ## Join us - 💡 **Have an idea?** Come chat with us on [Discord][discord] -- 🔨 **Want to contribute?** Consult our [contribution guide](https://outlines-dev.github.io/outlines/community/contribute/). -- 🐞 **Found a bug?** Open an [issue](https://github.com/outlines-dev/outlines/issues) +- 🔨 **Want to contribute?** Consult our [contribution guide](https://dottxt-ai.github.io/outlines/community/contribute/). +- 🐞 **Found a bug?** Open an [issue](https://github.com/dottxt-ai/outlines/issues) ## Cite Outlines @@ -354,10 +354,10 @@ answer = outlines.generate.text(model)(prompt, max_tokens=100) } ``` -[documentation]: https://outlines-dev.github.io/outlines/welcome/ +[documentation]: https://dottxt-ai.github.io/outlines/welcome/ [documentation-badge]: https://img.shields.io/readthedocs/outlines -[contributors]: https://github.com/outlines-dev/outlines/graphs/contributors -[contributors-badge]: https://img.shields.io/github/contributors/outlines-dev/outlines?style=flat-square&logo=github&logoColor=white&color=ECEFF4 +[contributors]: https://github.com/dottxt-ai/outlines/graphs/contributors +[contributors-badge]: https://img.shields.io/github/contributors/dottxt-ai/outlines?style=flat-square&logo=github&logoColor=white&color=ECEFF4 [dottxt-twitter]: https://twitter.com/dottxtai [outlines-twitter]: https://twitter.com/OutlinesOSS [discord]: https://discord.gg/R9DSu34mGd diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json index f57db9a0b..92808ce83 100644 --- a/benchmarks/asv.conf.json +++ b/benchmarks/asv.conf.json @@ -1,7 +1,7 @@ { "version": 1, "project": "Outlines", - "project_url": "https://outlines-dev.github.io/outlines/", + "project_url": "https://dottxt-ai.github.io/outlines/", "repo": "..", "branches": [ "HEAD" @@ -11,7 +11,7 @@ "PIP_NO_BUILD_ISOLATION=false python -mpip wheel --no-deps --no-index -w {build_cache_dir} {build_dir}", ], "environment_type": "virtualenv", - "show_commit_url": "https://github.com/outlines-dev/outlines/commit/", + "show_commit_url": "https://github.com/dottxt-ai/outlines/commit/", "benchmark_dir": ".", "env_dir": "env", "results_dir": "results", diff --git a/docs/blog/posts/roadmap-2024.md b/docs/blog/posts/roadmap-2024.md index d1413b05f..2832660fc 100644 --- a/docs/blog/posts/roadmap-2024.md +++ b/docs/blog/posts/roadmap-2024.md @@ -23,13 +23,13 @@ Thanks to a refactor of the library, it is now possible to use our constrained g *We would like expand our work to the whole sampling layer*, and add new sampling methods that should make structured generation more accurate. This means we will keep the `transformers` integration as it is today and will expand our text generation logic around this library. -Making workflows re-usable and easy to share is difficult today. That is why *we are big believers in [outlines functions](https://github.com/outlines-dev/functions)*. We will keep improving the interface and adding examples. +Making workflows re-usable and easy to share is difficult today. That is why *we are big believers in [outlines functions](https://github.com/dottxt-ai/functions)*. We will keep improving the interface and adding examples. Finally, *we want to add a CLI tool*, `outlines serve`. This will allows you to either serve an API that does general constrained generation, or to serve Outlines function. ## Detailed roadmap -Here is a more detailed roadmap for the next 12 months. Outlines is a [community](https://discord.gg/ZxBxyWmW5n) effort, and we invite you to pick either topic and [contribute to the library](https://github.com/outlines-dev/outlines). I will progressively add related [issues](https://github.com/outlines-dev/outlines/issues) in the repository. +Here is a more detailed roadmap for the next 12 months. Outlines is a [community](https://discord.gg/ZxBxyWmW5n) effort, and we invite you to pick either topic and [contribute to the library](https://github.com/dottxt-ai/outlines). I will progressively add related [issues](https://github.com/dottxt-ai/outlines/issues) in the repository. ### Many more examples and tutorials @@ -44,7 +44,7 @@ Let's be honest, Outlines is lacking clear and thorough examples. We want to cha We want to keep the current integrations but lower the maintenance cost so we can focus on what we bring to the table. -* Deprecate every obsolete integration: `transformers` has recently integrated `autoawq` and `autogptq` for instance. ([PR](https://github.com/outlines-dev/outlines/pull/527)) +* Deprecate every obsolete integration: `transformers` has recently integrated `autoawq` and `autogptq` for instance. ([PR](https://github.com/dottxt-ai/outlines/pull/527)) * See if we can integrate to a library that provides state-space models via a logit processing function; * Integrate with llama.cpp via a logits processor; * Integrate with exllamav2 via a logits processor; @@ -55,14 +55,14 @@ We're just getting started! * Improve the performance of existing structured generation algorithms; * Improve the correctness of structured generation algorithms; -* Add ready-to-use grammars in the [grammars](https://github.com/outlines-dev/grammars) repository or in a submodule in Outlines. +* Add ready-to-use grammars in the [grammars](https://github.com/dottxt-ai/grammars) repository or in a submodule in Outlines. ### Keep developing Outlines functions Functions are awesome, use them! * Implement a CLI `outlines serve` that allows to serve Outlines functions locally; -* Add more functions to the [functions](https://github.com/outlines-dev/functions) repository. +* Add more functions to the [functions](https://github.com/dottxt-ai/functions) repository. ### Serve structured generation diff --git a/docs/community/contribute.md b/docs/community/contribute.md index d5568f47c..d29576b75 100644 --- a/docs/community/contribute.md +++ b/docs/community/contribute.md @@ -16,7 +16,7 @@ Note that the [issue tracker][issues] is only intended for actionable items. In ### Setup -First, [fork the repository on GitHub](https://github.com/outlines-dev/outlines/fork) and clone the fork locally: +First, [fork the repository on GitHub](https://github.com/dottxt-ai/outlines/fork) and clone the fork locally: ```bash git clone git@github.com/YourUserName/outlines.git @@ -127,6 +127,6 @@ Then you can [open a pull request][pull-requests] on GitHub. It should prompt yo Do not hesitate to open a draft PR before your contribution is ready, especially if you have questions and/or need feedback. If you need help, come tell us on [Discord][discord]. [discord]: https://discord.gg/R9DSu34mGd -[discussions]: https://github.com/outlines-dev/outlines/discussions -[issues]: https://github.com/outlines-dev/outlines/issues -[pull-requests]: https://github.com/outlines-dev/outlines/pulls +[discussions]: https://github.com/dottxt-ai/outlines/discussions +[issues]: https://github.com/dottxt-ai/outlines/issues +[pull-requests]: https://github.com/dottxt-ai/outlines/pulls diff --git a/docs/community/feedback.md b/docs/community/feedback.md index 942809542..033e91870 100644 --- a/docs/community/feedback.md +++ b/docs/community/feedback.md @@ -53,7 +53,7 @@ If Outlines has been helpful to you, let us know on [Discord][discord] or give u - + @@ -75,7 +75,7 @@ We highly value the insights of our users, and we would love to hear from you. I - What challenges are you facing? - What do you think could be improved? -To schedule an appointment follow [this link](https://cal.com/dottxt/outlines). This is exclusively intended to share your experience, please go on [Discord][discord] or [GitHub](https://github.com/outlines-dev/outlines/discussions) for support. +To schedule an appointment follow [this link](https://cal.com/dottxt/outlines). This is exclusively intended to share your experience, please go on [Discord][discord] or [GitHub](https://github.com/dottxt-ai/outlines/discussions) for support. [discord]: https://discord.gg/UppQmhEpe8 [twitter]: https://twitter.com/dottxtai diff --git a/docs/community/versioning.md b/docs/community/versioning.md index d64a56e7f..023b92537 100644 --- a/docs/community/versioning.md +++ b/docs/community/versioning.md @@ -15,7 +15,7 @@ Each part of the version number (`major.minor.patch`) conveys information about ## Releases -Releases along with release notes can be found on the [Outlines Releases GitHub Page](https://github.com/outlines-dev/outlines/releases). +Releases along with release notes can be found on the [Outlines Releases GitHub Page](https://github.com/dottxt-ai/outlines/releases). ## Version Pinning Recommendations diff --git a/docs/cookbook/chain_of_density.md b/docs/cookbook/chain_of_density.md index 16c2838f2..2a6b4eb39 100644 --- a/docs/cookbook/chain_of_density.md +++ b/docs/cookbook/chain_of_density.md @@ -122,4 +122,4 @@ print(result.model_dump()) Not bad, considering we used a smallish model to generate the summary! Chain of Density seems to be a very effective prompting technique to generate dense summaries, even with small quantized models. Its implementation in Outlines is also very short. -Note that this is the first article I tried and it worked out of the box. Try it out on other articles, and please share the results on Twitter, or by opening [a new discussion](https://github.com/outlines-dev/outlines/discussions/categories/show-and-tell) on the Outlines repository! +Note that this is the first article I tried and it worked out of the box. Try it out on other articles, and please share the results on Twitter, or by opening [a new discussion](https://github.com/dottxt-ai/outlines/discussions/categories/show-and-tell) on the Outlines repository! diff --git a/docs/cookbook/chain_of_thought.md b/docs/cookbook/chain_of_thought.md index bd76f40b7..b814ae048 100644 --- a/docs/cookbook/chain_of_thought.md +++ b/docs/cookbook/chain_of_thought.md @@ -3,7 +3,7 @@ Chain of thought is a prompting technique introduced in the paper ["Chain-of-Thought Prompting Elicits Reasoning in Large Language Models"](https://arxiv.org/abs/2201.11903) where throught prompting the authors generate a series of intermediate reasoning steps which improves the ability of LLMs to perform complex reasoning. -In this guide, we use [outlines](https://outlines-dev.github.io/outlines/) to apply chain of thought through structured output. +In this guide, we use [outlines](https://dottxt-ai.github.io/outlines/) to apply chain of thought through structured output. We use [llama.cpp](https://github.com/ggerganov/llama.cpp) using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) library. Outlines supports llama-cpp-python, but we need to install it ourselves: diff --git a/docs/cookbook/deploy-using-bentoml.md b/docs/cookbook/deploy-using-bentoml.md index 6bee77441..2926df0f6 100644 --- a/docs/cookbook/deploy-using-bentoml.md +++ b/docs/cookbook/deploy-using-bentoml.md @@ -2,7 +2,7 @@ [BentoML](https://github.com/bentoml/BentoML) is an open-source model serving library for building performant and scalable AI applications with Python. It comes with tools that you need for serving optimization, model packaging, and production deployment. -In this guide, we will show you how to use BentoML to run programs written with Outlines on GPU locally and in [BentoCloud](https://www.bentoml.com/), an AI Inference Platform for enterprise AI teams. The example source code in this guide is also available in the [examples/bentoml/](https://github.com/outlines-dev/outlines/blob/main/examples/bentoml/) directory. +In this guide, we will show you how to use BentoML to run programs written with Outlines on GPU locally and in [BentoCloud](https://www.bentoml.com/), an AI Inference Platform for enterprise AI teams. The example source code in this guide is also available in the [examples/bentoml/](https://github.com/dottxt-ai/outlines/blob/main/examples/bentoml/) directory. ## Import a model @@ -56,7 +56,7 @@ mistralai--mistral-7b-v0.1:m7lmf5ac2cmubnnz 13.49 GiB 2024-04-25 06:5 As the model is ready, we can define a [BentoML Service](https://docs.bentoml.com/en/latest/guides/services.html) to wrap the capabilities of the model. -We will run the JSON-structured generation example [in the README](https://github.com/outlines-dev/outlines?tab=readme-ov-file#efficient-json-generation-following-a-json-schema), with the following schema: +We will run the JSON-structured generation example [in the README](https://github.com/dottxt-ai/outlines?tab=readme-ov-file#efficient-json-generation-following-a-json-schema), with the following schema: ```python DEFAULT_SCHEMA = """{ @@ -153,7 +153,7 @@ We then need to define an HTTP endpoint using `@bentoml.api` to decorate the met Here `@bentoml.api` decorator defines `generate` as an HTTP endpoint that accepts a JSON request body with two fields: `prompt` and `json_schema` (optional, which allows HTTP clients to provide their own JSON schema). The type hints in the function signature will be used to validate incoming JSON requests. You can define as many HTTP endpoints as you want by using `@bentoml.api` to decorate other methods of `Outlines` class. -Now you can save the above code to `service.py` (or use [this implementation](https://github.com/outlines-dev/outlines/blob/main/examples/bentoml/)), and run the code using the BentoML CLI. +Now you can save the above code to `service.py` (or use [this implementation](https://github.com/dottxt-ai/outlines/blob/main/examples/bentoml/)), and run the code using the BentoML CLI. ## Run locally for testing and debugging diff --git a/docs/cookbook/deploy-using-modal.md b/docs/cookbook/deploy-using-modal.md index 998e5d835..15e200cb5 100644 --- a/docs/cookbook/deploy-using-modal.md +++ b/docs/cookbook/deploy-using-modal.md @@ -76,7 +76,7 @@ outlines_image = outlines_image.run_function(import_model) ## Define a schema -We will run the JSON-structured generation example [in the README](https://github.com/outlines-dev/outlines?tab=readme-ov-file#efficient-json-generation-following-a-json-schema), with the following schema: +We will run the JSON-structured generation example [in the README](https://github.com/dottxt-ai/outlines?tab=readme-ov-file#efficient-json-generation-following-a-json-schema), with the following schema: ```python # Specify a schema for the character description. In this case, @@ -173,7 +173,7 @@ def main( generate.remote(prompt) ``` -Here `@app.local_entrypoint()` decorator defines `main` as the function to start from locally when using the Modal CLI. You can save above code to `example.py` (or use [this implementation](https://github.com/outlines-dev/outlines/blob/main/examples/modal_example.py)). Let's now see how to run the code on the cloud using the Modal CLI. +Here `@app.local_entrypoint()` decorator defines `main` as the function to start from locally when using the Modal CLI. You can save above code to `example.py` (or use [this implementation](https://github.com/dottxt-ai/outlines/blob/main/examples/modal_example.py)). Let's now see how to run the code on the cloud using the Modal CLI. ## Run on the cloud diff --git a/docs/cookbook/knowledge_graph_extraction.md b/docs/cookbook/knowledge_graph_extraction.md index e25166bca..6a6877756 100644 --- a/docs/cookbook/knowledge_graph_extraction.md +++ b/docs/cookbook/knowledge_graph_extraction.md @@ -1,6 +1,6 @@ # Knowledge Graph Extraction -In this guide, we use [outlines](https://outlines-dev.github.io/outlines/) to extract a knowledge graph from unstructured text. +In this guide, we use [outlines](https://dottxt-ai.github.io/outlines/) to extract a knowledge graph from unstructured text. We will use [llama.cpp](https://github.com/ggerganov/llama.cpp) using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) library. Outlines supports llama-cpp-python, but we need to install it ourselves: diff --git a/docs/cookbook/react_agent.md b/docs/cookbook/react_agent.md index ca4829d5f..0597eab07 100644 --- a/docs/cookbook/react_agent.md +++ b/docs/cookbook/react_agent.md @@ -1,6 +1,6 @@ # ReAct Agent -This example shows how to use [outlines](https://outlines-dev.github.io/outlines/) to build your own agent with open weights local models and structured outputs. It is inspired by the blog post [A simple Python implementation of the ReAct pattern for LLMs](https://til.simonwillison.net/llms/python-react-pattern) by [Simon Willison](https://simonwillison.net/). +This example shows how to use [outlines](https://dottxt-ai.github.io/outlines/) to build your own agent with open weights local models and structured outputs. It is inspired by the blog post [A simple Python implementation of the ReAct pattern for LLMs](https://til.simonwillison.net/llms/python-react-pattern) by [Simon Willison](https://simonwillison.net/). The ReAct pattern (for Reason+Act) is described in the paper [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/abs/2210.03629). It's a pattern where you implement additional actions that an LLM can take - searching Wikipedia or running calculations for example - and then teach it how to request the execution of those actions, and then feed their results back into the LLM. diff --git a/docs/cookbook/simtom.md b/docs/cookbook/simtom.md index aa96005b4..f730d029a 100644 --- a/docs/cookbook/simtom.md +++ b/docs/cookbook/simtom.md @@ -17,9 +17,9 @@ SimToM calls an LLM with two consecutive prompts: To implement SimToM with Outlines, we will need to: -1. Write the prompts with [prompt functions](https://outlines-dev.github.io/outlines/reference/prompting/). +1. Write the prompts with [prompt functions](https://dottxt-ai.github.io/outlines/reference/prompting/). 2. Define the JSON object each prompt will return using Pydantic. -3. Generate responses with a Mistral model using the [transformers integration](https://outlines-dev.github.io/outlines/reference/models/transformers/). +3. Generate responses with a Mistral model using the [transformers integration](https://dottxt-ai.github.io/outlines/reference/models/transformers/). Let's dive into it! diff --git a/docs/index.md b/docs/index.md index e7409c0d3..3692d32c9 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,26 +1,8 @@ --- title: Outlines +template: home.html # Note that this is managed in overrides/home.html hide: - navigation - toc - feedback --- - -# - -
- ![Image title](assets/images/logo.png){ width="600" } -
- -
-

Generate text with LLMs

-

Robust prompting & (structured) text generation

- [:fontawesome-solid-bolt: Get started](welcome.md){ .md-button .md-button--primary } - [:fontawesome-brands-discord: Join the Community](https://discord.gg/ZxBxyWmW5n){ .md-button } - -
-```bash -pip install outlines -``` -
-
diff --git a/docs/installation.md b/docs/installation.md index 1017b627e..bf2da86f9 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -28,7 +28,7 @@ If you encounter any problem using Outlines with these libraries, take a look at You can install the latest version of Outlines on the repository's `main` branch: ```python -pip install git+https://github.com/outlines-dev/outlines.git@main +pip install git+https://github.com/dottxt-ai/outlines.git@main ``` This can be useful, for instance, when a fix has been merged but not yet released. diff --git a/docs/overrides/home.html b/docs/overrides/home.html new file mode 100644 index 000000000..1114895e6 --- /dev/null +++ b/docs/overrides/home.html @@ -0,0 +1,120 @@ +{#- +This file overrides the home page to use HTML tooling +better. +-#} +{% extends "main.html" %} +{% block tabs %} +{{ super() }} + + + +
+
+
+
+ Outlines Logo +
+
+

+ Structured text generation and robust prompting for language models +

+ + + + + + +

Made with ❤️ by the team at .txt

+
+
+
+
+{% endblock %} +{% block content %}{% endblock %} +{% block footer %}{% endblock %} \ No newline at end of file diff --git a/docs/overrides/index.html b/docs/overrides/index.html deleted file mode 100644 index 74a4987f4..000000000 --- a/docs/overrides/index.html +++ /dev/null @@ -1,11 +0,0 @@ -{% extends "base.html" %} - -{% block announce %} - For updates follow @remilouf on - - - Twitter - -{% endblock %} diff --git a/docs/overrides/main.html b/docs/overrides/main.html index b4183d71a..5cb6467e6 100644 --- a/docs/overrides/main.html +++ b/docs/overrides/main.html @@ -1,22 +1,2 @@ {% extends "base.html" %} -{% block announce %} - For updates follow @dottxtai on - - - Twitter - - and - - {% include ".icons/fontawesome/solid/star.svg" %} - - the repo on - - - {% include ".icons/fontawesome/brands/github.svg" %} - - Github - -{% endblock %} diff --git a/docs/quickstart.md b/docs/quickstart.md index 2e1f9a6bb..81a067ad6 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -216,7 +216,7 @@ Once you are done experimenting with a prompt and an output structure, it is use ## Going further -If you need more inspiration you can take a look at the [cookbook](cookbook/index.md) or watch [Remi Louf's AI Engineer World’s Fair Presentation on Outlines](https://www.youtube.com/live/R0X7mPagRiE?t=775s). If you have any question, or requests for documentation please reach out to us on [GitHub](https://github.com/outlines-dev/outlines/discussions), [Twitter](https://twitter.com/remilouf) or [Discord](https://discord.gg/UppQmhEpe8). +If you need more inspiration you can take a look at the [cookbook](cookbook/index.md) or watch [Remi Louf's AI Engineer World’s Fair Presentation on Outlines](https://www.youtube.com/live/R0X7mPagRiE?t=775s). If you have any question, or requests for documentation please reach out to us on [GitHub](https://github.com/dottxt-ai/outlines/discussions), [Twitter](https://twitter.com/remilouf) or [Discord](https://discord.gg/UppQmhEpe8). [pydantic]: https://docs.pydantic.dev/latest diff --git a/docs/reference/generation/cfg.md b/docs/reference/generation/cfg.md index e3b177800..f0104ec63 100644 --- a/docs/reference/generation/cfg.md +++ b/docs/reference/generation/cfg.md @@ -34,7 +34,7 @@ print(sequence) !!! Note "Experimental" - Outlines current **community-contributed** implementation of CFG-structured generation is experimental. This does not reflect the performance of [.txt](https://dottxt.co)'s product, where we have optimized grammar-structured generation to be as fast as regex-structured generation. Additionally, it does not fully align with the approach described in our [technical report](https://arxiv.org/pdf/2307.09702), aside from its use of incremental/partial parsing. This feature is still a work in progress, requiring performance enhancements and bug fixes for an ideal implementation. For more details, please see our [grammar-related open issues on GitHub](https://github.com/outlines-dev/outlines/issues?q=is%3Aissue+is%3Aopen+label%3Agrammar). + Outlines current **community-contributed** implementation of CFG-structured generation is experimental. This does not reflect the performance of [.txt](https://dottxt.co)'s product, where we have optimized grammar-structured generation to be as fast as regex-structured generation. Additionally, it does not fully align with the approach described in our [technical report](https://arxiv.org/pdf/2307.09702), aside from its use of incremental/partial parsing. This feature is still a work in progress, requiring performance enhancements and bug fixes for an ideal implementation. For more details, please see our [grammar-related open issues on GitHub](https://github.com/dottxt-ai/outlines/issues?q=is%3Aissue+is%3Aopen+label%3Agrammar). !!! Note "Greedy" @@ -65,7 +65,7 @@ The following grammars are currently available: - Arithmetic grammar via `outlines.grammars.arithmetic` - JSON grammar via `outlines.grammars.json` -If you would like more grammars to be added to the repository, please open an [issue](https://github.com/outlines-dev/outlines/issues) or a [pull request](https://github.com/outlines-dev/outlines/pulls). +If you would like more grammars to be added to the repository, please open an [issue](https://github.com/dottxt-ai/outlines/issues) or a [pull request](https://github.com/dottxt-ai/outlines/pulls). ## Grammar guide diff --git a/docs/reference/generation/generation.md b/docs/reference/generation/generation.md index 0c090f8a7..a14818514 100644 --- a/docs/reference/generation/generation.md +++ b/docs/reference/generation/generation.md @@ -174,7 +174,7 @@ print(result) # 5+5+5+5+5 ``` -The available grammars are listed [here](https://github.com/outlines-dev/outlines/tree/main/outlines/grammars). +The available grammars are listed [here](https://github.com/dottxt-ai/outlines/tree/main/outlines/grammars). ### [Regex-structured generation](./regex.md) diff --git a/docs/reference/generation/types.md b/docs/reference/generation/types.md index 5b83a5916..eb6d7382b 100644 --- a/docs/reference/generation/types.md +++ b/docs/reference/generation/types.md @@ -73,7 +73,7 @@ print(result) ``` -We plan on adding many more custom types. If you have found yourself writing regular expressions to generate fields of a given type, or if you could benefit from more specific types don't hesite to [submit a PR](https://github.com/outlines-dev/outlines/pulls) or [open an issue](https://github.com/outlines-dev/outlines/issues/new/choose). +We plan on adding many more custom types. If you have found yourself writing regular expressions to generate fields of a given type, or if you could benefit from more specific types don't hesite to [submit a PR](https://github.com/dottxt-ai/outlines/pulls) or [open an issue](https://github.com/dottxt-ai/outlines/issues/new/choose). [wiki-isbn]: https://en.wikipedia.org/wiki/ISBN#Check_digits diff --git a/docs/reference/serve/vllm.md b/docs/reference/serve/vllm.md index 8e2886c96..14277a526 100644 --- a/docs/reference/serve/vllm.md +++ b/docs/reference/serve/vllm.md @@ -65,7 +65,7 @@ curl http://127.0.0.1:8000/generate \ Instead of `curl`, you can also use the [requests][requests]{:target="_blank"} library from another python program. -Please consult the [vLLM documentation][vllm]{:target="_blank"} for details on additional request parameters. You can also [read the code](https://github.com/outlines-dev/outlines/blob/main/outlines/serve/serve.py) in case you need to customize the solution to your needs. +Please consult the [vLLM documentation][vllm]{:target="_blank"} for details on additional request parameters. You can also [read the code](https://github.com/dottxt-ai/outlines/blob/main/outlines/serve/serve.py) in case you need to customize the solution to your needs. [requests]: https://requests.readthedocs.io/en/latest/ [vllm]: https://docs.vllm.ai/en/latest/index.html diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index c4539ab80..7c5b5e808 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -9,6 +9,7 @@ --md-code-fg-color: #FFFFFF; --md-text-font-family: "Inter"; --md-code-font: "Source Code Pro Custom"; + --md-typeset-a-color: #d53135; /*this is the brand color*/ /* don't inherit white fg color for mermaid diagrams from --md-code-fg-color */ --md-mermaid-label-fg-color: #000000; diff --git a/docs/welcome.md b/docs/welcome.md index a7800f7ad..728a30a97 100644 --- a/docs/welcome.md +++ b/docs/welcome.md @@ -2,11 +2,11 @@ title: Welcome to Outlines! --- -Outlines〰 is a Python library that allows you to use Large Language Model in a simple and robust way (with structured generation). It is built by [.txt][.txt]{:target="_blank"}, and is already used in production by many companies. +Outlines is a Python library that allows you to use Large Language Model in a simple and robust way (with structured generation). It is built by [.txt][.txt]{:target="_blank"}, and is already used in production by many companies. ## What models do you support? -We support [Openai](reference/models/openai.md), but the true power of Outlines〰 is unleashed with Open Source models available via the [transformers](reference/models/transformers.md), [llama.cpp](reference/models/llamacpp.md), [exllama2](reference/models/exllamav2.md), [mlx-lm](reference/models/mlxlm.md) and [vllm](reference/models/vllm.md) models. If you want to build and maintain an integration with another library, [get in touch][discord]. +We support [Openai](reference/models/openai.md), but the true power of Outlines is unleashed with Open Source models available via the [transformers](reference/models/transformers.md), [llama.cpp](reference/models/llamacpp.md), [exllama2](reference/models/exllamav2.md), [mlx-lm](reference/models/mlxlm.md) and [vllm](reference/models/vllm.md) models. If you want to build and maintain an integration with another library, [get in touch][discord]. ## What are the main features? @@ -48,7 +48,7 @@ We support [Openai](reference/models/openai.md), but the true power of Outlines ## Why use Outlines? -Outlines〰 is built at [.txt][.txt] by engineers with decades of experience in software engineering, machine learning (Bayesian Statistics and NLP), and compilers. [.txt][.txt] is a VC-backed company fully focused on the topic of structured generation and is committed to make the community benefit from its experience. +Outlines is built at [.txt][.txt] by engineers with decades of experience in software engineering, machine learning (Bayesian Statistics and NLP), and compilers. [.txt][.txt] is a VC-backed company fully focused on the topic of structured generation and is committed to make the community benefit from its experience. We are also open source veterans and have authored/maintained many libraries over the years: the [Aesara][aesara]{:target="_blank"} and [Pythological][pythological]{:target="_blank"} ecosystems, [Blackjax][blackjax]{:target="_blank"} and [Hy][hy]{:target="_blank"} among many others. . @@ -119,33 +119,33 @@ Still not convinced, read [what people say about us](community/feedback.md). And ## Philosophy -**Outlines** 〰 is a library for neural text generation. You can think of it as a +**Outlines** is a library for neural text generation. You can think of it as a more flexible replacement for the `generate` method in the [transformers](https://github.com/huggingface/transformers) library. -**Outlines** 〰 helps developers *structure text generation* to build robust +**Outlines** helps developers *structure text generation* to build robust interfaces with external systems. It provides generation methods that guarantee that the output will match a regular expressions, or follow a JSON schema. -**Outlines** 〰 provides *robust prompting primitives* that separate the prompting +**Outlines** provides *robust prompting primitives* that separate the prompting from the execution logic and lead to simple implementations of few-shot generations, ReAct, meta-prompting, agents, etc. -**Outlines** 〰 is designed as a *library* that is meant to be compatible the +**Outlines** is designed as a *library* that is meant to be compatible the broader ecosystem, not to replace it. We use as few abstractions as possible, and generation can be interleaved with control flow, conditionals, custom Python functions and calls to other libraries. -**Outlines** 〰 is *compatible with every auto-regressive model*. It only interfaces with models +**Outlines** is *compatible with every auto-regressive model*. It only interfaces with models via the next-token logits distribution. ## Outlines people Outlines would not be what it is today without a community of dedicated developers: - - + + ## Acknowledgements diff --git a/environment.yml b/environment.yml index c267f86a0..6fc980fed 100644 --- a/environment.yml +++ b/environment.yml @@ -1,9 +1,9 @@ # To use: # # $ conda env create -f environment.yml # `mamba` works too for this command -# $ conda activate outlines-dev +# $ conda activate dottxt-ai # -name: outlines-dev +name: dottxt-ai channels: - conda-forge - huggingface diff --git a/examples/dating_profile.py b/examples/dating_profile.py index acc00126e..504ec943d 100644 --- a/examples/dating_profile.py +++ b/examples/dating_profile.py @@ -26,7 +26,7 @@ class QuestionAnswer: class DatingProfile(BaseModel): # It is possible put length constraints on these strings using constr- however, this appears to dramatically increase the generation time - # This may be resolved in the future with this PR: https://github.com/outlines-dev/outlines/pull/272 + # This may be resolved in the future with this PR: https://github.com/dottxt-ai/outlines/pull/272 bio: str job: str # Ignore mypy checks here because it still doesn't support conlist or constr: https://github.com/pydantic/pydantic/issues/975 diff --git a/mkdocs.yml b/mkdocs.yml index c95905914..4189df1c0 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,5 +1,5 @@ # Site information -site_name: Outlines 〰️ +site_name: Outlines site_author: The Outlines developers site_description: >- Structured text generation with LLMs @@ -19,7 +19,7 @@ theme: palette: - scheme: default primary: white - logo: assets/images/logo-simple.png + logo: assets/images/logo-square.svg favicon: assets/images/logo-simple.png icon: repo: fontawesome/brands/github diff --git a/outlines/fsm/guide.py b/outlines/fsm/guide.py index 44a918494..b7b121fe6 100644 --- a/outlines/fsm/guide.py +++ b/outlines/fsm/guide.py @@ -322,7 +322,7 @@ def __init__(self, cfg_string: str, tokenizer): """ warnings.warn( "Outlines' public *community-contributed* CFG structured generation is experimental. " - "Please review https://outlines-dev.github.io/outlines/reference/cfg#disclaimer" + "Please review https://dottxt-ai.github.io/outlines/reference/cfg#disclaimer" ) self.cfg_string = cfg_string @@ -466,7 +466,7 @@ def _get_parser_state_token_applied( def is_final_state(self, state: CFGState) -> bool: # TODO: remove this method, use can_terminate_state and must_terminate_state - # here and in RegexGuide per https://github.com/outlines-dev/outlines/issues/885 + # here and in RegexGuide per https://github.com/dottxt-ai/outlines/issues/885 return self.can_terminate_state(state) def can_terminate_state(self, state: CFGState) -> bool: diff --git a/outlines/generate/cfg.py b/outlines/generate/cfg.py index 4f372f209..034a65ae5 100644 --- a/outlines/generate/cfg.py +++ b/outlines/generate/cfg.py @@ -44,7 +44,7 @@ def cfg_vision(model, cfg_str: str, sampler: Sampler = multinomial()): @cfg.register(ExLlamaV2Model) def cfg_exllamav2(model, cfg_str: str, sampler: Sampler = multinomial()): raise NotImplementedError( - "Not yet available, track progress in https://github.com/outlines-dev/outlines/pull/1010" + "Not yet available, track progress in https://github.com/dottxt-ai/outlines/pull/1010" ) diff --git a/pyproject.toml b/pyproject.toml index 99d4f94e1..dfda81acd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,9 +73,9 @@ serve = [ ] [project.urls] -homepage = "https://github.com/outlines-dev/outlines" -documentation = "https://outlines-dev.github.io/outlines/" -repository = "https://github.com/outlines-dev/outlines" +homepage = "https://github.com/dottxt-ai/outlines" +documentation = "https://dottxt-ai.github.io/outlines/" +repository = "https://github.com/dottxt-ai/outlines" [project.readme] file="README.md" diff --git a/tests/fsm/test_json_schema.py b/tests/fsm/test_json_schema.py index 21571da8d..7565ff642 100644 --- a/tests/fsm/test_json_schema.py +++ b/tests/fsm/test_json_schema.py @@ -1018,7 +1018,7 @@ class MockModel(BaseModel): def test_one_of_doesnt_produce_illegal_lookaround(): - """Reproduces failure in https://github.com/outlines-dev/outlines/issues/823""" + """Reproduces failure in https://github.com/dottxt-ai/outlines/issues/823""" class Cat(BaseModel): pet_type: Literal["cat"] diff --git a/tests/fsm/test_regex.py b/tests/fsm/test_regex.py index 824588b22..7418deca2 100644 --- a/tests/fsm/test_regex.py +++ b/tests/fsm/test_regex.py @@ -667,7 +667,7 @@ def convert_token_to_string(self, token): def test_numba_leading_null_byte_UnicodeCharSeq_remains_broken(): """Assert numba UnicodeCharSeq w/ leading \x00 is still broken""" # EXPLANATION: - # https://github.com/outlines-dev/outlines/pull/930#issuecomment-2143535968 + # https://github.com/dottxt-ai/outlines/pull/930#issuecomment-2143535968 # from https://github.com/numba/numba/issues/9542 d = numba.typed.typeddict.Dict.empty(numba.types.UnicodeCharSeq(1), numba.int64) @@ -685,7 +685,7 @@ def test_numba_leading_null_byte_UnicodeCharSeq_remains_broken(): def test_numba_leading_null_byte_unicode_type_sane(input_key): """Assert numba unicode_type w/ leading \x00 is working""" # EXPLANATION: - # https://github.com/outlines-dev/outlines/pull/930#issuecomment-2143535968 + # https://github.com/dottxt-ai/outlines/pull/930#issuecomment-2143535968 # from https://github.com/numba/numba/issues/9542 d = numba.typed.typeddict.Dict.empty(numba.types.unicode_type, numba.int64) @@ -712,8 +712,8 @@ def test_reduced_vocabulary_with_rare_tokens(rare_token): See [1] and [2] for context. - [1]: https://github.com/outlines-dev/outlines/pull/763 - [2]: https://github.com/outlines-dev/outlines/pull/948 + [1]: https://github.com/dottxt-ai/outlines/pull/763 + [2]: https://github.com/dottxt-ai/outlines/pull/948 """ tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tokenizer = TransformerTokenizer(tokenizer=tokenizer) diff --git a/tests/generate/test_generate.py b/tests/generate/test_generate.py index fc4166535..ff247b0f4 100644 --- a/tests/generate/test_generate.py +++ b/tests/generate/test_generate.py @@ -76,7 +76,7 @@ def model_vllm(tmp_path_factory): return models.vllm("facebook/opt-125m", gpu_memory_utilization=0.1) -# TODO: exllamav2 failing in main, address in https://github.com/outlines-dev/outlines/issues/808 +# TODO: exllamav2 failing in main, address in https://github.com/dottxt-ai/outlines/issues/808 # TODO: t5 tokenizer doesn't work with streaming """ @pytest.fixture(scope="session") @@ -235,7 +235,7 @@ def test_generate_fsm(request, model_fixture, pattern): @pytest.mark.skip( - "Fix issues with JSON, some models fail this test https://github.com/outlines-dev/outlines/issues/985" + "Fix issues with JSON, some models fail this test https://github.com/dottxt-ai/outlines/issues/985" ) @pytest.mark.parametrize("model_fixture", ALL_MODEL_FIXTURES) def test_generate_json(request, model_fixture, sample_schema): diff --git a/tests/generate/test_integration_llamacpp.py b/tests/generate/test_integration_llamacpp.py index 0a98f0226..08521c672 100644 --- a/tests/generate/test_integration_llamacpp.py +++ b/tests/generate/test_integration_llamacpp.py @@ -250,7 +250,7 @@ def test_llamacpp_json_schema(model): ], ) def test_byte_tokenizer_regression(repo, model_path, hf_tokenizer_uri): - """Reproduce https://github.com/outlines-dev/outlines/issues/820""" + """Reproduce https://github.com/dottxt-ai/outlines/issues/820""" import llama_cpp model = models.llamacpp( diff --git a/tests/test_function.py b/tests/test_function.py index 24e132d42..62f7ea29f 100644 --- a/tests/test_function.py +++ b/tests/test_function.py @@ -28,29 +28,29 @@ def test_download_from_github_invalid(): download_from_github("outlines/program") with pytest.raises(ValueError, match="Do not append"): - download_from_github("outlines-dev/outlines/program.py") + download_from_github("dottxt-ai/outlines/program.py") @responses.activate def test_download_from_github_success(): responses.add( responses.GET, - "https://raw.githubusercontent.com/outlines-dev/outlines/main/program.py", + "https://raw.githubusercontent.com/dottxt-ai/outlines/main/program.py", body="import outlines\n", status=200, ) - file = download_from_github("outlines-dev/outlines/program") + file = download_from_github("dottxt-ai/outlines/program") assert file == "import outlines\n" responses.add( responses.GET, - "https://raw.githubusercontent.com/outlines-dev/outlines/main/foo/bar/program.py", + "https://raw.githubusercontent.com/dottxt-ai/outlines/main/foo/bar/program.py", body="import outlines\n", status=200, ) - file = download_from_github("outlines-dev/outlines/foo/bar/program") + file = download_from_github("dottxt-ai/outlines/foo/bar/program") assert file == "import outlines\n"