diff --git a/packages/ui/fern-docs-mdx/src/headings.ts b/packages/ui/fern-docs-mdx/src/headings.ts index 988e3c335a..22800167c7 100644 --- a/packages/ui/fern-docs-mdx/src/headings.ts +++ b/packages/ui/fern-docs-mdx/src/headings.ts @@ -43,9 +43,12 @@ export function collectRootHeadings(tree: Root, lines: readonly string[]): Headi // `toString` will strip away all markdown formatting for the title // TODO: we should preserve some formatting within the heading, i.e. `` and ``, etc. - const title = mdastToString(heading, { preserveNewlines: false }); + const rawTitle = mdastToString(heading, { preserveNewlines: false }); - let id = extractAnchorFromHeadingText(title).anchor; + const extractedTitle = extractAnchorFromHeadingText(rawTitle); + const title = extractedTitle.text; + + let id = extractedTitle.anchor; if (id == null) { id = slugger.slug(title); diff --git a/packages/ui/fern-docs-mdx/src/mdast-utils/mdast-to-string.ts b/packages/ui/fern-docs-mdx/src/mdast-utils/mdast-to-string.ts index d11c51ec21..2b5734a9c4 100644 --- a/packages/ui/fern-docs-mdx/src/mdast-utils/mdast-to-string.ts +++ b/packages/ui/fern-docs-mdx/src/mdast-utils/mdast-to-string.ts @@ -40,7 +40,7 @@ export function mdastToString( value?: unknown, { includeImageAlt = true, includeHtml = true, preserveNewlines = true }: Options = {}, ): string { - return one(value, includeImageAlt, includeHtml, preserveNewlines); + return one(value, includeImageAlt, includeHtml, preserveNewlines).trim(); } /** diff --git a/packages/ui/fern-docs-search-server/package.json b/packages/ui/fern-docs-search-server/package.json index 146bebafe0..eaa8ceda58 100644 --- a/packages/ui/fern-docs-search-server/package.json +++ b/packages/ui/fern-docs-search-server/package.json @@ -13,19 +13,21 @@ "type": "module", "exports": { "./types": { - "types": "./dist/algolia/types.d.ts", + "types": "./src/algolia/types.ts", "default": "./dist/algolia/types.js" }, - ".": { - "types": "./dist/index.d.ts", - "default": "./dist/index.js" + "./algolia": { + "types": "./src/algolia/index.ts", + "default": "./dist/algolia/index.js" + }, + "./tasks": { + "types": "./src/tasks/index.ts", + "default": "./dist/tasks/index.js" } }, "sideEffects": false, "scripts": { - "clean": "rm -rf ./lib && tsc --build --clean", - "trigger:dev": "pnpm dlx trigger.dev@latest dev", - "compile": "tsc --build", + "compile": "tsup --clean", "test": "vitest --run --passWithNoTests --globals", "lint:eslint": "eslint --max-warnings 0 . --ignore-path=../../../.eslintignore", "lint:eslint:fix": "pnpm lint:eslint --fix", @@ -35,7 +37,7 @@ "format:check": "prettier --check --ignore-unknown --ignore-path ../../../shared/.prettierignore \"**\"", "organize-imports": "organize-imports-cli tsconfig.json", "depcheck": "depcheck", - "dev": "tsc --watch", + "dev": "tsup --watch", "docs:dev": "pnpm dev" }, "dependencies": { @@ -43,21 +45,21 @@ "@fern-api/ui-core-utils": "workspace:*", "@fern-ui/fern-docs-mdx": "workspace:*", "@fern-ui/fern-docs-utils": "workspace:*", - "@trigger.dev/sdk": "^3.0.13", "algoliasearch": "^5.10.2", "es-toolkit": "^1.24.0", "pnpm": "^9.12.1", + "ts-essentials": "^10.0.1", "zod": "^3.23.8" }, "devDependencies": { "@fern-platform/configs": "workspace:*", - "@trigger.dev/build": "^3.0.13", "@types/node": "^18.7.18", "depcheck": "^1.4.3", "eslint": "^8.56.0", "organize-imports-cli": "^0.10.0", "prettier": "^3.3.2", "stylelint": "^16.1.0", + "tsup": "^8.0.2", "typescript": "5.4.3", "vitest": "^1.5.0" } diff --git a/packages/ui/fern-docs-search-server/src/algolia/__test__/__snapshots__/cohere.json b/packages/ui/fern-docs-search-server/src/algolia/__test__/__snapshots__/cohere.json new file mode 100644 index 0000000000..8bd64fa002 --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/algolia/__test__/__snapshots__/cohere.json @@ -0,0 +1,387 @@ +[ + { + "objectID": "test", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "Elasticsearch and Cohere", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "description": "Learn how to create a semantic search pipeline with Elasticsearch and Cohere's generative AI capabilities.", + "content": "Elasticsearch has all the tools developers need to build next generation search experiences with generative AI, and it supports native integration with Cohere through their inference API.\nUse Elastic if you’d like to build with:\nA vector database\n\nDeploy multiple ML models\n\nPerform text, vector and hybrid search\n\nSearch with filters, facet, aggregations\n\nApply document and field level security\n\nRun on-prem, cloud, or serverless (preview)\n\n\nThis guide uses a dataset of Wikipedia articles to set up a pipeline for semantic search. It will cover:\nCreating an Elastic inference processor using Cohere embeddings\n\nCreating an Elasticsearch index with embeddings\n\nPerforming hybrid search on the Elasticsearch index and reranking results\n\nPerforming basic RAG\n\n\nTo see the full code sample, refer to this notebook. You can also find an integration guide here." + }, + { + "objectID": "test-prerequisites", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "test", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "hash": "#prerequisites", + "content": "This tutorial assumes you have the following:\nAn Elastic Cloud account through Elastic Cloud, available with a free trial\n\nA Cohere production API Key. Get your API Key at this link if you don't have one\n\nPython 3.7 or higher\n\n\nNote: While this tutorial integrates Cohere with an Elastic Cloud serverless project, you can also integrate with your self-managed Elasticsearch deployment or Elastic Cloud deployment by simply switching from the serverless to the general language client.", + "hierarchy": { + "h2": { + "id": "prerequisites", + "title": "Prerequisites" + } + }, + "level": "h2", + "level_title": "Prerequisites" + }, + { + "objectID": "test-create-an-elastic-serverless-deployment", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "test", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "hash": "#create-an-elastic-serverless-deployment", + "content": "If you don't have an Elastic Cloud deployment, sign up here for a free trial and request access to Elastic Serverless", + "hierarchy": { + "h2": { + "id": "create-an-elastic-serverless-deployment", + "title": "Create an Elastic Serverless deployment" + } + }, + "level": "h2", + "level_title": "Create an Elastic Serverless deployment" + }, + { + "objectID": "test-install-the-required-packages", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "test", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "hash": "#install-the-required-packages", + "content": "Install and import the required Python Packages:\nelasticsearch_serverless\n\ncohere: ensure you are on version 5.2.5 or later\n\n\nTo install the packages, use the following code\nAfter the instalation has finished, find your endpoint URL and create your API key in the Serverless dashboard.", + "code_snippets": [ + { + "lang": "python", + "meta": "PYTHON", + "code": "!pip install elasticsearch_serverless==0.2.0.20231031\n!pip install cohere==5.2.5" + } + ], + "hierarchy": { + "h2": { + "id": "install-the-required-packages", + "title": "Install the required packages" + } + }, + "level": "h2", + "level_title": "Install the required packages" + }, + { + "objectID": "test-import-the-required-packages", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "test", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "hash": "#import-the-required-packages", + "content": "Next, we need to import the modules we need. 🔐 NOTE: getpass enables us to securely prompt the user for credentials without echoing them to the terminal, or storing it in memory.", + "code_snippets": [ + { + "lang": "python", + "meta": "PYTHON", + "code": "from elasticsearch_serverless import Elasticsearch, helpers\nfrom getpass import getpass\nimport cohere\nimport json\nimport requests" + } + ], + "hierarchy": { + "h2": { + "id": "import-the-required-packages", + "title": "Import the required packages" + } + }, + "level": "h2", + "level_title": "Import the required packages" + }, + { + "objectID": "test-create-an-elasticsearch-client", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "test", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "hash": "#create-an-elasticsearch-client", + "content": "Now we can instantiate the Python Elasticsearch client.\nFirst we prompt the user for their endpoint and encoded API key. Then we create a client object that instantiates an instance of the Elasticsearch class.\nWhen creating your Elastic Serverless API key make sure to turn on Control security privileges, and edit cluster privileges to specify \"cluster\": [\"all\"].", + "code_snippets": [ + { + "lang": "python", + "meta": "PYTHON", + "code": "ELASTICSEARCH_ENDPOINT = getpass(\"Elastic Endpoint: \")\nELASTIC_API_KEY = getpass(\"Elastic encoded API key: \") # Use the encoded API key\n\nclient = Elasticsearch(\n ELASTICSEARCH_ENDPOINT,\n api_key=ELASTIC_API_KEY\n)\n\n# Confirm the client has connected\nprint(client.info())" + } + ], + "hierarchy": { + "h2": { + "id": "create-an-elasticsearch-client", + "title": "Create an Elasticsearch client" + } + }, + "level": "h2", + "level_title": "Create an Elasticsearch client" + }, + { + "objectID": "test-create-an-inference-endpoint", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "test", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "hash": "#create-an-inference-endpoint", + "content": "One of the biggest pain points of building a vector search index is computing embeddings for a large corpus of data. Fortunately Elastic offers inference endpoints that can be used in ingest pipelines to automatically compute embeddings when bulk indexing operations are performed.\nTo set up an inference pipeline for ingestion we first must create an inference endpoint that uses Cohere embeddings. You'll need a Cohere API key for this that you can find in your Cohere account under the API keys section.\nWe will create an inference endpoint that uses embed-english-v3.0 and int8 or byte compression to save on storage.\nHere's what you might see:", + "code_snippets": [ + { + "lang": "python", + "meta": "PYTHON", + "code": "COHERE_API_KEY = getpass(\"Enter Cohere API key: \")\n# Delete the inference model if it already exists\nclient.options(ignore_status=[404]).inference.delete(inference_id=\"cohere_embeddings\")\n\nclient.inference.put(\n task_type=\"text_embedding\",\n inference_id=\"cohere_embeddings\",\n body={\n \"service\": \"cohere\",\n \"service_settings\": {\n \"api_key\": COHERE_API_KEY,\n \"model_id\": \"embed-english-v3.0\",\n \"embedding_type\": \"int8\",\n \"similarity\": \"cosine\"\n },\n \"task_settings\": {},\n },\n)" + }, + { + "code": "Enter Cohere API key: ··········\nObjectApiResponse({'model_id': 'cohere_embeddings', 'inference_id': 'cohere_embeddings', 'task_type': 'text_embedding', 'service': 'cohere', 'service_settings': {'similarity': 'cosine', 'dimensions': 1024, 'model_id': 'embed-english-v3.0', 'rate_limit': {'requests_per_minute': 10000}, 'embedding_type': 'byte'}, 'task_settings': {}})" + } + ], + "hierarchy": { + "h1": { + "id": "build-a-hybrid-search-index-with-cohere-and-elasticsearch", + "title": "Build a Hybrid Search Index with Cohere and Elasticsearch" + }, + "h2": { + "id": "create-an-inference-endpoint", + "title": "Create an inference endpoint" + } + }, + "level": "h2", + "level_title": "Create an inference endpoint" + }, + { + "objectID": "test-create-the-index", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "test", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "hash": "#create-the-index", + "content": "The mapping of the destination index – the index that contains the embeddings that the model will generate based on your input text – must be created. The destination index must have a field with the semantic_text field type to index the output of the Cohere model.\nLet's create an index named cohere-wiki-embeddings with the mappings we need\nYou might see something like this:\nLet's note a few important parameters from that API call:\nsemantic_text: A field type automatically generates embeddings for text content using an inference endpoint.\n\ninference_id: Specifies the ID of the inference endpoint to be used. In this example, the model ID is set to cohere_embeddings.\n\ncopy_to: Specifies the output field which contains inference results", + "code_snippets": [ + { + "lang": "python", + "meta": "PYTHON", + "code": "client.indices.delete(index=\"cohere-wiki-embeddings\", ignore_unavailable=True)\nclient.indices.create(\n index=\"cohere-wiki-embeddings\",\n mappings={\n \"properties\": {\n \"text_semantic\": {\n \"type\": \"semantic_text\",\n \"inference_id\": \"cohere_embeddings\"\n },\n \"text\": {\"type\": \"text\", \"copy_to\": \"text_semantic\"},\n \"wiki_id\": {\"type\": \"integer\"},\n \"url\": {\"type\": \"text\"},\n \"views\": {\"type\": \"float\"},\n \"langs\": {\"type\": \"integer\"},\n \"title\": {\"type\": \"text\"},\n \"paragraph_id\": {\"type\": \"integer\"},\n \"id\": {\"type\": \"integer\"}\n }\n },\n)" + }, + { + "code": "ObjectApiResponse({'acknowledged': True, 'shards_acknowledged': True, 'index': 'cohere-wiki-embeddings'})" + } + ], + "hierarchy": { + "h1": { + "id": "build-a-hybrid-search-index-with-cohere-and-elasticsearch", + "title": "Build a Hybrid Search Index with Cohere and Elasticsearch" + }, + "h2": { + "id": "create-the-index", + "title": "Create the Index" + } + }, + "level": "h2", + "level_title": "Create the Index" + }, + { + "objectID": "test-insert-documents", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "test", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "hash": "#insert-documents", + "content": "Let's insert our example wiki dataset. You need a production Cohere account to complete this step, otherwise the documentation ingest will time out due to the API request rate limits.\nYou should see this:", + "code_snippets": [ + { + "lang": "python", + "meta": "PYTHON", + "code": "url = \"https://raw.githubusercontent.com/cohere-ai/notebooks/main/notebooks/data/embed_jobs_sample_data.jsonl\"\nresponse = requests.get(url)\n\n# Load the response data into a JSON object\njsonl_data = response.content.decode('utf-8').splitlines()\n\n# Prepare the documents to be indexed\ndocuments = []\nfor line in jsonl_data:\n data_dict = json.loads(line)\n documents.append({\n \"_index\": \"cohere-wiki-embeddings\",\n \"_source\": data_dict,\n }\n )\n\n# Use the bulk endpoint to index\nhelpers.bulk(client, documents)\n\nprint(\"Done indexing documents into `cohere-wiki-embeddings` index!\")" + }, + { + "code": "Done indexing documents into `cohere-wiki-embeddings` index!" + } + ], + "hierarchy": { + "h1": { + "id": "build-a-hybrid-search-index-with-cohere-and-elasticsearch", + "title": "Build a Hybrid Search Index with Cohere and Elasticsearch" + }, + "h2": { + "id": "insert-documents", + "title": "Insert Documents" + } + }, + "level": "h2", + "level_title": "Insert Documents" + }, + { + "objectID": "test-semantic-search", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "test", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "hash": "#semantic-search", + "content": "After the dataset has been enriched with the embeddings, you can query the data using the semantic query provided by Elasticsearch. semantic_text in Elasticsearch simplifies the semantic search significantly. Learn more about how semantic text in Elasticsearch allows you to focus on your model and results instead of on the technical details.\nHere's what that might look like:", + "code_snippets": [ + { + "lang": "python", + "meta": "PYTHON ", + "code": "query = \"When were the semi-finals of the 2022 FIFA world cup played?\"\n\nresponse = client.search(\n index=\"cohere-wiki-embeddings\",\n size=100,\n query = {\n \"semantic\": {\n \"query\": \"When were the semi-finals of the 2022 FIFA world cup played?\",\n \"field\": \"text_semantic\"\n }\n }\n)\n\nraw_documents = response[\"hits\"][\"hits\"]\n\n# Display the first 10 results\nfor document in raw_documents[0:10]:\n print(f'Title: {document[\"_source\"][\"title\"]}\\nText: {document[\"_source\"][\"text\"]}\\n')\n\n# Format the documents for ranking\ndocuments = []\nfor hit in response[\"hits\"][\"hits\"]:\n documents.append(hit[\"_source\"][\"text\"])" + }, + { + "code": "Title: 2022 FIFA World Cup\nText: The 2022 FIFA World Cup was an international football tournament contested by the men's national teams of FIFA's member associations and 22nd edition of the FIFA World Cup. It took place in Qatar from 20 November to 18 December 2022, making it the first World Cup held in the Arab world and Muslim world, and the second held entirely in Asia after the 2002 tournament in South Korea and Japan. France were the defending champions, having defeated Croatia 4–2 in the 2018 final. At an estimated cost of over $220 billion, it is the most expensive World Cup ever held to date; this figure is disputed by Qatari officials, including organising CEO Nasser Al Khater, who said the true cost was $8 billion, and other figures related to overall infrastructure development since the World Cup was awarded to Qatar in 2010.\n\nTitle: 2022 FIFA World Cup\nText: The semi-finals were played on 13 and 14 December. Messi scored a penalty kick before Julián Álvarez scored twice to give Argentina a 3–0 victory over Croatia. Théo Hernandez scored after five minutes as France led Morocco for most of the game and later Randal Kolo Muani scored on 78 minutes to complete a 2–0 victory for France over Morocco as they reached a second consecutive final.\n\nTitle: 2022 FIFA World Cup\nText: The quarter-finals were played on 9 and 10 December. Croatia and Brazil ended 0–0 after 90 minutes and went to extra time. Neymar scored for Brazil in the 15th minute of extra time. Croatia, however, equalised through Bruno Petković in the second period of extra time. With the match tied, a penalty shootout decided the contest, with Croatia winning the shoot-out 4–2. In the second quarter-final match, Nahuel Molina and Messi scored for Argentina before Wout Weghorst equalised with two goals shortly before the end of the game. The match went to extra time and then penalties, where Argentina would go on to win 4–3. Morocco defeated Portugal 1–0, with Youssef En-Nesyri scoring at the end of the first half. Morocco became the first African and the first Arab nation to advance as far as the semi-finals of the competition. Despite Harry Kane scoring a penalty for England, it was not enough to beat France, who won 2–1 by virtue of goals from Aurélien Tchouaméni and Olivier Giroud, sending them to their second consecutive World Cup semi-final and becoming the first defending champions to reach this stage since Brazil in 1998.\n\nTitle: 2022 FIFA World Cup\nText: Unlike previous FIFA World Cups, which are typically played in June and July, because of Qatar's intense summer heat and often fairly high humidity, the 2022 World Cup was played in November and December. As a result, the World Cup was unusually staged in the middle of the seasons of domestic association football leagues, which started in late July or August, including all of the major European leagues, which had been obliged to incorporate extended breaks into their domestic schedules to accommodate the World Cup. Major European competitions had scheduled their respective competitions group matches to be played before the World Cup, to avoid playing group matches the following year.\n\nTitle: 2022 FIFA World Cup\nText: The match schedule was confirmed by FIFA in July 2020. The group stage was set to begin on 21 November, with four matches every day. Later, the schedule was tweaked by moving the Qatar vs Ecuador game to 20 November, after Qatar lobbied FIFA to allow their team to open the tournament. The final was played on 18 December 2022, National Day, at Lusail Stadium.\n\nTitle: 2022 FIFA World Cup\nText: Owing to the climate in Qatar, concerns were expressed over holding the World Cup in its traditional time frame of June and July. In October 2013, a task force was commissioned to consider alternative dates and report after the 2014 FIFA World Cup in Brazil. On 24 February 2015, the FIFA Task Force proposed that the tournament be played from late November to late December 2022, to avoid the summer heat between May and September and also avoid clashing with the 2022 Winter Olympics in February, the 2022 Winter Paralympics in March and Ramadan in April.\n\nTitle: 2022 FIFA World Cup\nText: Of the 32 nations qualified to play at the 2022 FIFA World Cup, 24 countries competed at the previous tournament in 2018. Qatar were the only team making their debut in the FIFA World Cup, becoming the first hosts to make their tournament debut since Italy in 1934. As a result, the 2022 tournament was the first World Cup in which none of the teams that earned a spot through qualification were making their debut. The Netherlands, Ecuador, Ghana, Cameroon, and the United States returned to the tournament after missing the 2018 tournament. Canada returned after 36 years, their only prior appearance being in 1986. Wales made their first appearance in 64 years – the longest ever gap for any team, their only previous participation having been in 1958.\n\nTitle: 2022 FIFA World Cup\nText: After UEFA were guaranteed to host the 2018 event, members of UEFA were no longer in contention to host in 2022. There were five bids remaining for the 2022 FIFA World Cup: Australia, Japan, Qatar, South Korea, and the United States.\n\nTitle: Cristiano Ronaldo\nText: Ronaldo was named in Portugal's squad for the 2022 FIFA World Cup in Qatar, making it his fifth World Cup. On 24 November, in Portugal's opening match against Ghana, Ronaldo scored a penalty kick and became the first male player to score in five different World Cups. In the last group game against South Korea, Ronaldo received criticism from his own coach for his reaction at being substituted. He was dropped from the starting line-up for Portugal's last 16 match against Switzerland, marking the first time since Euro 2008 that he had not started a game for Portugal in a major international tournament, and the first time Portugal had started a knockout game without Ronaldo in the starting line-up at an international tournament since Euro 2000. He came off the bench late on as Portugal won 6–1, their highest tally in a World Cup knockout game since the 1966 World Cup, with Ronaldo's replacement Gonçalo Ramos scoring a hat-trick. Portugal employed the same strategy in the quarter-finals against Morocco, with Ronaldo once again coming off the bench; in the process, he equalled Bader Al-Mutawa's international appearance record, becoming the joint–most capped male footballer of all time, with 196 caps. Portugal lost 1–0, however, with Morocco becoming the first CAF nation ever to reach the World Cup semi-finals.\n\nTitle: 2022 FIFA World Cup\nText: The final draw was held at the Doha Exhibition and Convention Center in Doha, Qatar, on 1 April 2022, 19:00 AST, prior to the completion of qualification. The two winners of the inter-confederation play-offs and the winner of the Path A of the UEFA play-offs were not known at the time of the draw. The draw was attended by 2,000 guests and was led by Carli Lloyd, Jermaine Jenas and sports broadcaster Samantha Johnson, assisted by the likes of Cafu (Brazil), Lothar Matthäus (Germany), Adel Ahmed Malalla (Qatar), Ali Daei (Iran), Bora Milutinović (Serbia/Mexico), Jay-Jay Okocha (Nigeria), Rabah Madjer (Algeria), and Tim Cahill (Australia)." + } + ], + "hierarchy": { + "h1": { + "id": "build-a-hybrid-search-index-with-cohere-and-elasticsearch", + "title": "Build a Hybrid Search Index with Cohere and Elasticsearch" + }, + "h2": { + "id": "semantic-search", + "title": "Semantic Search" + } + }, + "level": "h2", + "level_title": "Semantic Search" + }, + { + "objectID": "test-hybrid-search", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "test", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "hash": "#hybrid-search", + "content": "After the dataset has been enriched with the embeddings, you can query the data using hybrid search.\nPass a semantic query, and provide the query text and the model you have used to create the embeddings.", + "code_snippets": [ + { + "lang": "python", + "meta": "PYTHON ", + "code": "query = \"When were the semi-finals of the 2022 FIFA world cup played?\"\n\nresponse = client.search(\n index=\"cohere-wiki-embeddings\",\n size=100,\n query={\n \"bool\": {\n \"must\": {\n \"multi_match\": {\n \"query\": \"When were the semi-finals of the 2022 FIFA world cup played?\",\n \"fields\": [\"text\", \"title\"]\n }\n },\n \"should\": {\n \"semantic\": {\n \"query\": \"When were the semi-finals of the 2022 FIFA world cup played?\",\n \"field\": \"text_semantic\"\n }\n },\n }\n }\n\n)\n\nraw_documents = response[\"hits\"][\"hits\"]\n\n# Display the first 10 results\nfor document in raw_documents[0:10]:\n print(f'Title: {document[\"_source\"][\"title\"]}\\nText: {document[\"_source\"][\"text\"]}\\n')\n\n# Format the documents for ranking\ndocuments = []\nfor hit in response[\"hits\"][\"hits\"]:\n documents.append(hit[\"_source\"][\"text\"])" + } + ], + "hierarchy": { + "h1": { + "id": "build-a-hybrid-search-index-with-cohere-and-elasticsearch", + "title": "Build a Hybrid Search Index with Cohere and Elasticsearch" + }, + "h2": { + "id": "hybrid-search", + "title": "Hybrid Search" + } + }, + "level": "h2", + "level_title": "Hybrid Search" + }, + { + "objectID": "test-ranking", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "test", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "hash": "#ranking", + "content": "In order to effectively combine the results from our vector and BM25 retrieval, we can use Cohere's Rerank 3 model through the inference API to provide a final, more precise, semantic reranking of our results.\nFirst, create an inference endpoint with your Cohere API key. Make sure to specify a name for your endpoint, and the model_id of one of the rerank models. In this example we will use Rerank 3.\nYou can now rerank your results using that inference endpoint. Here we will pass in the query we used for retrieval, along with the documents we just retrieved using hybrid search.\nThe inference service will respond with a list of documents in descending order of relevance. Each document has a corresponding index (reflecting to the order the documents were in when sent to the inference endpoint), and if the “return_documents” task setting is True, then the document texts will be included as well.\nIn this case we will set the response to False and will reconstruct the input documents based on the index returned in the response.", + "code_snippets": [ + { + "lang": "python", + "meta": "PYTHON ", + "code": "# Delete the inference model if it already exists\nclient.options(ignore_status=[404]).inference.delete(inference_id=\"cohere_rerank\")\n\nclient.inference.put(\n task_type=\"rerank\",\n inference_id=\"cohere_rerank\",\n body={\n \"service\": \"cohere\",\n \"service_settings\":{\n \"api_key\": COHERE_API_KEY,\n \"model_id\": \"rerank-english-v3.0\"\n },\n \"task_settings\": {\n \"top_n\": 10,\n },\n }\n)" + }, + { + "lang": "python", + "meta": "PYTHON ", + "code": "response = client.inference.inference(\n inference_id=\"cohere_rerank\",\n body={\n \"query\": query,\n \"input\": documents,\n \"task_settings\": {\n \"return_documents\": False\n }\n }\n)\n\n# Reconstruct the input documents based on the index provided in the rereank response\nranked_documents = []\nfor document in response.body[\"rerank\"]:\n ranked_documents.append({\n \"title\": raw_documents[int(document[\"index\"])][\"_source\"][\"title\"],\n \"text\": raw_documents[int(document[\"index\"])][\"_source\"][\"text\"]\n })\n\n# Print the top 10 results\nfor document in ranked_documents[0:10]:\n print(f\"Title: {document['title']}\\nText: {document['text']}\\n\")" + } + ], + "hierarchy": { + "h1": { + "id": "build-a-hybrid-search-index-with-cohere-and-elasticsearch", + "title": "Build a Hybrid Search Index with Cohere and Elasticsearch" + }, + "h2": { + "id": "ranking", + "title": "Ranking" + } + }, + "level": "h2", + "level_title": "Ranking" + }, + { + "objectID": "test-retrieval-augemented-generation", + "org_id": "test", + "domain": "test", + "pathname": "/test", + "page_title": "test", + "breadcrumb": [], + "visible_by": [], + "authed": false, + "type": "markdown", + "hash": "#retrieval-augemented-generation", + "content": "Now that we have ranked our results, we can easily turn this into a RAG system with Cohere's Chat API. Pass in the retrieved documents, along with the query and see the grounded response using Cohere's newest generative model Command R+.\nFirst, we will create the Cohere client.\nNext, we can easily get a grounded generation with citations from the Cohere Chat API. We simply pass in the user query and documents retrieved from Elastic to the API, and print out our grounded response.\nAnd there you have it! A quick and easy implementation of hybrid search and RAG with Cohere and Elastic.", + "code_snippets": [ + { + "lang": "python", + "meta": "PYTHON ", + "code": "co = cohere.Client(COHERE_API_KEY)" + }, + { + "lang": "python", + "meta": "PYTHON", + "code": "response = co.chat(\n message=query,\n documents=ranked_documents,\n model='command-r-plus-08-2024'\n)\n\nsource_documents = []\nfor citation in response.citations:\n for document_id in citation.document_ids:\n if document_id not in source_documents:\n source_documents.append(document_id)\n\nprint(f\"Query: {query}\")\nprint(f\"Response: {response.text}\")\nprint(\"Sources:\")\nfor document in response.documents:\n if document['id'] in source_documents:\n print(f\"{document['title']}: {document['text']}\")" + } + ], + "hierarchy": { + "h1": { + "id": "build-a-hybrid-search-index-with-cohere-and-elasticsearch", + "title": "Build a Hybrid Search Index with Cohere and Elasticsearch" + }, + "h2": { + "id": "retrieval-augemented-generation", + "title": "Retrieval augemented generation" + } + }, + "level": "h2", + "level_title": "Retrieval augemented generation" + } +] \ No newline at end of file diff --git a/packages/ui/fern-docs-search-server/src/algolia/__test__/__snapshots__/humanloop.json b/packages/ui/fern-docs-search-server/src/algolia/__test__/__snapshots__/humanloop.json index 9e10b3aeda..c3c1a456c7 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/__test__/__snapshots__/humanloop.json +++ b/packages/ui/fern-docs-search-server/src/algolia/__test__/__snapshots__/humanloop.json @@ -24,8 +24,7 @@ "authed": false, "type": "markdown", "description": "Learn how to use Humanloop for prompt engineering, evaluation and monitoring. Comprehensive guides and tutorials for LLMOps.\nHumanloop is an Integrated Development Environment for Large Language Models", - "content": "Humanloop enables AI and product teams to develop LLM-based applications that are reliable and scalable.\nPrincipally, it is an evaluation framework that enables you to rigorously measure and improve LLM performance during development and in production and a collaborative workspace where engineers, PMs and subject matter experts improve prompts, tools and agents together.\nBy adopting Humanloop, teams save 6-8 engineering hours per project each week and they feel confident that their AI is reliable.\n\n\n\n\n\n\nThe power of Humanloop lies in its integrated approach to AI development. Evaluation,\nmonitoring and prompt engineering in one integrated platform enables you to understand system performance and take the actions needed to fix it.\nThe SDK slots seamlessly into your existing code-based orchestration and the user-friendly interface allows both developers and non-technical stakeholders to adjust the AI together.\nYou can learn more about the challenges of AI development and how Humanloop solves them in Why Humanloop?.", - "code_snippets": [] + "content": "Humanloop enables AI and product teams to develop LLM-based applications that are reliable and scalable.\nPrincipally, it is an evaluation framework that enables you to rigorously measure and improve LLM performance during development and in production and a collaborative workspace where engineers, PMs and subject matter experts improve prompts, tools and agents together.\nBy adopting Humanloop, teams save 6-8 engineering hours per project each week and they feel confident that their AI is reliable.\n\n\n\n\n\n\nThe power of Humanloop lies in its integrated approach to AI development. Evaluation,\nmonitoring and prompt engineering in one integrated platform enables you to understand system performance and take the actions needed to fix it.\nThe SDK slots seamlessly into your existing code-based orchestration and the user-friendly interface allows both developers and non-technical stakeholders to adjust the AI together.\nYou can learn more about the challenges of AI development and how Humanloop solves them in Why Humanloop?." }, { "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.getting-started/why-humanloop", @@ -51,11 +50,10 @@ ], "authed": false, "type": "markdown", - "description": "Humanloop is an enterprise-grade stack for product teams building with LLMs. We are SOC-2 compliant, offer self-hosting and never train on your data.", - "code_snippets": [] + "description": "Humanloop is an enterprise-grade stack for product teams building with LLMs. We are SOC-2 compliant, offer self-hosting and never train on your data." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.getting-started/why-humanloop-llms-break-traditional-software-processes-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.getting-started/why-humanloop-llms-break-traditional-software-processes", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/getting-started/why-humanloop", @@ -78,19 +76,19 @@ ], "authed": false, "type": "markdown", - "hash": "#llms-break-traditional-software-processes-", + "hash": "#llms-break-traditional-software-processes", "content": "The principal way you \"program\" LLMs is through natural language instructions called prompts. There's a plethora of techniques needed to prompt the models to work robustly, reliably and with the correct knowledge.\nDeveloping, managing and evaluating prompts for LLMs is surprisingly hard and dissimilar to traditional software in the following ways:\nSubject matter experts matter more than ever. As LLMs are being applied to all different domains, the people that know how they should best perform are rarely the software engineers but the experts in that field.\n\nAI output is often non-deterministic. Innocuous changes to the prompts can cause unforeseen issues elsewhere.\n\nAI outputs are subjective. It’s hard to measure how well products are working and so, without robust evaluation, larger companies simply can’t trust putting generative AI in production.\n\n\n\n\nBad workflows for generative AI are costing you through wasted engineering effort and delays to launch\nMany companies struggle to enable the collaboration needed between product leaders, subject matter experts and engineers. Often they'll rely on a hodge-podge of tools like the OpenAI Playground, custom scripts and complex spreadsheets. The process is slow and error-prone, wasting engineering time and leading to long delays and feelings of uncertainty.", "hierarchy": { "h2": { - "id": "llms-break-traditional-software-processes-", - "title": "LLMs Break Traditional Software Processes " + "id": "llms-break-traditional-software-processes", + "title": "LLMs Break Traditional Software Processes" } }, "level": "h2", "level_title": "LLMs Break Traditional Software Processes" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.getting-started/why-humanloop-humanloop-solves-the-most-critical-workflows-around-prompt-engineering-and-evaluation-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.getting-started/why-humanloop-humanloop-solves-the-most-critical-workflows-around-prompt-engineering-and-evaluation", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/getting-started/why-humanloop", @@ -113,19 +111,19 @@ ], "authed": false, "type": "markdown", - "hash": "#humanloop-solves-the-most-critical-workflows-around-prompt-engineering-and-evaluation-", + "hash": "#humanloop-solves-the-most-critical-workflows-around-prompt-engineering-and-evaluation", "content": "We give you an interactive environment where your domain experts, product managers and engineers can work together to iterate on prompts. Coupled with this are tools for rigorously evaluating the performance of your AI systems.\nCoding best practices still apply. All your assets are strictly versioned and can be serialised to work with existing systems like git and your CI/CD pipeline. Our TypeScript and Python SDKs seamlessly integrate with your existing codebases.\nCompanies like Duolingo and AmexGBT use Humanloop to manage their prompt development and evaluation so they can produce high-quality AI features and be confident that they work appropriately.\n“We implemented Humanloop at a crucial moment for Twain when we had to develop and test many new prompts for a new feature release. I cannot imagine how long it would have taken us to release this new feature without Humanloop.” – Maddy Ralph, Prompt Engineer at Twain\n\nCheck out more detailed case study pages for more real world examples of the impact of Humanloop.", "hierarchy": { "h2": { - "id": "humanloop-solves-the-most-critical-workflows-around-prompt-engineering-and-evaluation-", - "title": "Humanloop solves the most critical workflows around prompt engineering and evaluation " + "id": "humanloop-solves-the-most-critical-workflows-around-prompt-engineering-and-evaluation", + "title": "Humanloop solves the most critical workflows around prompt engineering and evaluation" } }, "level": "h2", "level_title": "Humanloop solves the most critical workflows around prompt engineering and evaluation" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.getting-started/why-humanloop-whos-it-for-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.getting-started/why-humanloop-whos-it-for", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/getting-started/why-humanloop", @@ -148,12 +146,12 @@ ], "authed": false, "type": "markdown", - "hash": "#whos-it-for-", + "hash": "#whos-it-for", "content": "Humanloop is an enterprise-grade stack for AI and product teams. We are SOC-2 compliant, offer self-hosting and never train on your data.\nProduct owners and subject matter experts appreciate that the Humanloop enables them to direct the AI behavior through the intuitive UI.\nDevelopers find that Humanloop SDK/API slots well into existing code-based LLM orchestration without forcing unhelpful abstractions upon them, while removing bottlenecks around updating prompts and running evaluations.\nWith Humanloop, companies are overcoming the challenges of building with AI and shipping groundbreaking applications with confidence: By giving companies the right tools, Humanloop dramatically accelerates their AI adoption and makes it easy for best practices to spread around an organization.\n“Our teams use Humanloop as our development playground to try out various language models, develop our prompts, and test performance. We are still in the official onboarding process but Humanloop is already an essential part of our AI R&D process.“ – American Express Global Business Travel", "hierarchy": { "h2": { - "id": "whos-it-for-", - "title": "Who's it for? " + "id": "whos-it-for", + "title": "Who's it for?" } }, "level": "h2", @@ -184,11 +182,10 @@ "authed": false, "type": "markdown", "description": "Getting up and running with Humanloop is quick and easy. This guide will run you through creating and managing your first Prompt in a few minutes.\nGetting up and running with Humanloop is quick and easy. This guide will run you through creating and managing your first Prompt in a few minutes.", - "content": "Create a Humanloop Account\nIf you haven’t already, create an account or log in to Humanloop\nAdd an OpenAI API Key\nIf you’re the first person in your organization, you’ll need to add an API key to a model provider.\nGo to OpenAI and grab an API key\n\nIn Humanloop Organization Settings set up OpenAI as a model provider.\n\n\n\n\nUsing the Prompt Editor will use your OpenAI credits in the same way that the OpenAI playground does. Keep your API keys for Humanloop and the model providers private.", - "code_snippets": [] + "content": "Create a Humanloop Account\nIf you haven’t already, create an account or log in to Humanloop\nAdd an OpenAI API Key\nIf you’re the first person in your organization, you’ll need to add an API key to a model provider.\nGo to OpenAI and grab an API key\n\nIn Humanloop Organization Settings set up OpenAI as a model provider.\n\n\n\n\nUsing the Prompt Editor will use your OpenAI credits in the same way that the OpenAI playground does. Keep your API keys for Humanloop and the model providers private." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.tutorials/quickstart-get-started-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.tutorials/quickstart-get-started", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/tutorials/quickstart", @@ -211,7 +208,7 @@ ], "authed": false, "type": "markdown", - "hash": "#get-started-", + "hash": "#get-started", "content": "Create a Prompt File\nWhen you first open Humanloop you’ll see your File navigation on the left. Click ‘+ New’ and create a Prompt.\n\n\nIn the sidebar, rename this file to \"Comedian Bot\" now or later.\nCreate the Prompt template in the Editor\nThe left hand side of the screen defines your Prompt – the parameters such as model, temperature and template. The right hand side is a single chat session with this Prompt.\n\n\nClick the “+ Message” button within the chat template to add a system message to the chat template.\n\n\nAdd the following templated message to the chat template.\nThis message forms the chat template. It has an input slot called topic (surrounded by two curly brackets) for an input value that is provided each time you call this Prompt.\nOn the right hand side of the page, you’ll now see a box in the Inputs section for topic.\nAdd a value for topic e.g. music, jogging, whatever\n\nClick Run in the bottom right of the page\n\n\nThis will call OpenAI’s model and return the assistant response. Feel free to try other values, the model is very funny.\nYou now have a first version of your prompt that you can use.\nCommit your first version of this Prompt\nClick the Commit button\n\nPut “initial version” in the commit message field\n\nClick Commit\n\n\n\n\nView the logs\nUnder the Prompt File, click ‘Logs’ to view all the generations from this Prompt\nClick on a row to see the details of what version of the prompt generated it. From here you can give feedback to that generation, see performance metrics, open up this example in the Editor, or add this log to a dataset.", "code_snippets": [ { @@ -223,15 +220,15 @@ ], "hierarchy": { "h2": { - "id": "get-started-", - "title": "Get Started " + "id": "get-started", + "title": "Get Started" } }, "level": "h2", "level_title": "Get Started" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.tutorials/quickstart-next-steps-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.tutorials/quickstart-next-steps", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/tutorials/quickstart", @@ -254,12 +251,12 @@ ], "authed": false, "type": "markdown", - "hash": "#next-steps-", + "hash": "#next-steps", "content": "Well done! You've now created your first Prompt. If you look around it might seem a bit empty at the moment.", "hierarchy": { "h2": { - "id": "next-steps-", - "title": "Next Steps " + "id": "next-steps", + "title": "Next Steps" } }, "level": "h2", @@ -294,11 +291,10 @@ "authed": false, "type": "markdown", "description": "Discover how Humanloop manages datasets, with version control and collaboration to enable you to evaluate and fine-tune your models.\nHumanloop provides a set of simple building blocks for your AI applications and avoids complex abstractions.", - "content": "Prompts, Tools and Evaluators are the core building blocks of your AI features on Humanloop:\nPrompts: Prompts define how a large language model behaves.\n\nTools: Tools are functions that can extend your LLMs with access to external data sources and enabling them to take actions.\n\nEvaluators: Evaluators on Humanloop are functions that can be used to judge the output of Prompts, Tools or other Evaluators.", - "code_snippets": [] + "content": "Prompts, Tools and Evaluators are the core building blocks of your AI features on Humanloop:\nPrompts: Prompts define how a large language model behaves.\n\nTools: Tools are functions that can extend your LLMs with access to external data sources and enabling them to take actions.\n\nEvaluators: Evaluators on Humanloop are functions that can be used to judge the output of Prompts, Tools or other Evaluators." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-file-properties-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-file-properties", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/overview", @@ -325,19 +321,19 @@ ], "authed": false, "type": "markdown", - "hash": "#file-properties-", + "hash": "#file-properties", "content": "These core building blocks of Prompts, Tools and Evaluators are represented as different file types within a flexible filesystem in your Humanloop organization.\nAll file types share the following key properties:", "hierarchy": { "h2": { - "id": "file-properties-", - "title": "File Properties " + "id": "file-properties", + "title": "File Properties" } }, "level": "h2", "level_title": "File Properties" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-managed-ui-or-code-first-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-managed-ui-or-code-first", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/overview", @@ -364,23 +360,23 @@ ], "authed": false, "type": "markdown", - "hash": "#managed-ui-or-code-first-", + "hash": "#managed-ui-or-code-first", "content": "You can create and manage these files in the Humanloop UI,\nor via the API. Product teams and their subject matter experts may prefer using the UI first workflows for convenience, whereas AI teams and engineers may prefer to use the API for greater control and customisation.", "hierarchy": { "h2": { - "id": "managed-ui-or-code-first-", - "title": "Managed UI or code first " + "id": "managed-ui-or-code-first", + "title": "Managed UI or code first" }, "h3": { - "id": "managed-ui-or-code-first-", - "title": "Managed UI or code first " + "id": "managed-ui-or-code-first", + "title": "Managed UI or code first" } }, "level": "h3", "level_title": "Managed UI or code first" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-are-strictly-version-controlled-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-are-strictly-version-controlled", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/overview", @@ -407,23 +403,23 @@ ], "authed": false, "type": "markdown", - "hash": "#are-strictly-version-controlled-", + "hash": "#are-strictly-version-controlled", "content": "Files have immutable versions that are uniquely determined by\ntheir parameters that characterise the behaviour of the system. For example, a Prompt version is determined by the prompt template, base model and hyperparameters chosen.\nWithin the Humanloop Editor and via the API, you can commit new versions of a file, view the history of changes and revert to a previous version.", "hierarchy": { "h2": { - "id": "are-strictly-version-controlled-", - "title": "Are strictly version controlled " + "id": "are-strictly-version-controlled", + "title": "Are strictly version controlled" }, "h3": { - "id": "are-strictly-version-controlled-", - "title": "Are strictly version controlled " + "id": "are-strictly-version-controlled", + "title": "Are strictly version controlled" } }, "level": "h3", "level_title": "Are strictly version controlled" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-have-a-flexible-runtime-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-have-a-flexible-runtime", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/overview", @@ -450,23 +446,23 @@ ], "authed": false, "type": "markdown", - "hash": "#have-a-flexible-runtime-", + "hash": "#have-a-flexible-runtime", "content": "All files can be called (if you use the Humanloop runtime) or logged to (where you manage the runtime yourself). For example,\nwith Prompts, Humanloop integrates to all the major model providers. You can choose to call a Prompt, where Humanloop acts as a proxy to the model provider. Alternatively, you can choose to manage the model calls yourself and log the results to the Prompt on Humanloop.\nUsing the Humanloop runtime is generally the simpler option and allows you to call the file natively within the Humanloop UI, whereas owning the runtime yourself and logging allows you to have more fine-grained control.", "hierarchy": { "h2": { - "id": "have-a-flexible-runtime-", - "title": "Have a flexible runtime " + "id": "have-a-flexible-runtime", + "title": "Have a flexible runtime" }, "h3": { - "id": "have-a-flexible-runtime-", - "title": "Have a flexible runtime " + "id": "have-a-flexible-runtime", + "title": "Have a flexible runtime" } }, "level": "h3", "level_title": "Have a flexible runtime" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-are-composable-with-sessions-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-are-composable-with-sessions", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/overview", @@ -493,23 +489,23 @@ ], "authed": false, "type": "markdown", - "hash": "#are-composable-with-sessions-", + "hash": "#are-composable-with-sessions", "content": "Files can be combined with other files to create more complex systems like chains and agents. For example, a Prompt can call a Tool, which can then be evaluated by an Evaluator.\nThe orchestration of more complex systems is best done in code using the API and the full trace of execution is accessible in the Humanloop UI for debugging and evaluation purposes.", "hierarchy": { "h2": { - "id": "are-composable-with-sessions-", - "title": "Are composable with sessions " + "id": "are-composable-with-sessions", + "title": "Are composable with sessions" }, "h3": { - "id": "are-composable-with-sessions-", - "title": "Are composable with sessions " + "id": "are-composable-with-sessions", + "title": "Are composable with sessions" } }, "level": "h3", "level_title": "Are composable with sessions" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-have-a-serialized-form-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-have-a-serialized-form", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/overview", @@ -536,23 +532,23 @@ ], "authed": false, "type": "markdown", - "hash": "#have-a-serialized-form-", + "hash": "#have-a-serialized-form", "content": "All files can be exported and imported in a serialized form. For example, Prompts are serialized to our .prompt format. This provides a useful medium for more technical teams that wish to maintain the source of truth in their existing version control system like git.", "hierarchy": { "h2": { - "id": "have-a-serialized-form-", - "title": "Have a serialized form " + "id": "have-a-serialized-form", + "title": "Have a serialized form" }, "h3": { - "id": "have-a-serialized-form-", - "title": "Have a serialized form " + "id": "have-a-serialized-form", + "title": "Have a serialized form" } }, "level": "h3", "level_title": "Have a serialized form" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-support-deployments-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.overview-support-deployments", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/overview", @@ -579,16 +575,16 @@ ], "authed": false, "type": "markdown", - "hash": "#support-deployments-", + "hash": "#support-deployments", "content": "You can tag file versions with specific environments and target these environments via the UI and API to facilitate robust deployment workflows.\n\n\nHumanloop also has the concept of Datasets that are used within Evaluation workflows. Datasets share all the same properties, except they do not have a runtime consideration.", "hierarchy": { "h2": { - "id": "support-deployments-", - "title": "Support deployments " + "id": "support-deployments", + "title": "Support deployments" }, "h3": { - "id": "support-deployments-", - "title": "Support deployments " + "id": "support-deployments", + "title": "Support deployments" } }, "level": "h3", @@ -636,7 +632,7 @@ ] }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.prompts-versioning-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.prompts-versioning", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/prompts", @@ -663,19 +659,19 @@ ], "authed": false, "type": "markdown", - "hash": "#versioning-", + "hash": "#versioning", "content": "A Prompt file will have multiple versions as you try out different models, params or templates, but they should all be doing the same task, and in general should be swappable with one-another.\nBy versioning your Prompts, you can track how adjustments to the template or parameters influence the LLM's responses. This is crucial for iterative development, as you can pinpoint which versions produce the most relevant or accurate outputs for your specific use case.", "hierarchy": { "h2": { - "id": "versioning-", - "title": "Versioning " + "id": "versioning", + "title": "Versioning" } }, "level": "h2", "level_title": "Versioning" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.prompts-when-to-create-a-new-prompt-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.prompts-when-to-create-a-new-prompt", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/prompts", @@ -702,23 +698,23 @@ ], "authed": false, "type": "markdown", - "hash": "#when-to-create-a-new-prompt-", + "hash": "#when-to-create-a-new-prompt", "content": "You should create a new Prompt for every different ‘task to be done’ with the LLM. For example each of these tasks are things that can be done by an LLM and should be a separate Prompt File: Writing Copilot, Personal Assistant, Summariser, etc.\nWe've seen people find it useful to also create a Prompt called 'Playground' where they can free form experiment without concern of breaking anything or making a mess of their other Prompts.", "hierarchy": { "h2": { - "id": "when-to-create-a-new-prompt-", - "title": "When to create a new Prompt " + "id": "when-to-create-a-new-prompt", + "title": "When to create a new Prompt" }, "h3": { - "id": "when-to-create-a-new-prompt-", - "title": "When to create a new Prompt " + "id": "when-to-create-a-new-prompt", + "title": "When to create a new Prompt" } }, "level": "h3", "level_title": "When to create a new Prompt" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.prompts-using-prompts-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.prompts-using-prompts", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/prompts", @@ -745,19 +741,19 @@ ], "authed": false, "type": "markdown", - "hash": "#using-prompts-", + "hash": "#using-prompts", "content": "Prompts are callable as an API. You supply and query-time data such as input values or user messages, and the model will respond with its text output.\n\n\nYou can also use Prompts without proxying through Humanloop to the model provider and instead call the model yourself and explicitly log the results to your Prompt.", "hierarchy": { "h2": { - "id": "using-prompts-", - "title": "Using Prompts " + "id": "using-prompts", + "title": "Using Prompts" } }, "level": "h2", "level_title": "Using Prompts" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.prompts-serialization-prompt-file-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.prompts-serialization-prompt-file", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/prompts", @@ -784,19 +780,19 @@ ], "authed": false, "type": "markdown", - "hash": "#serialization-prompt-file-", + "hash": "#serialization-prompt-file", "content": "Our .prompt file format is a serialized version of a model config that is designed to be human-readable and suitable for checking into your version control systems alongside your code. See the .prompt files reference reference for more details.", "hierarchy": { "h2": { - "id": "serialization-prompt-file-", - "title": "Serialization (.prompt file) " + "id": "serialization-prompt-file", + "title": "Serialization (.prompt file)" } }, "level": "h2", "level_title": "Serialization (.prompt file)" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.prompts-format-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.prompts-format", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/prompts", @@ -823,23 +819,23 @@ ], "authed": false, "type": "markdown", - "hash": "#format-", + "hash": "#format", "content": "The .prompt file is heavily inspired by MDX, with model and hyperparameters specified in a YAML header alongside a JSX-inspired format for your Chat Template.", "hierarchy": { "h2": { - "id": "format-", - "title": "Format " + "id": "format", + "title": "Format" }, "h3": { - "id": "format-", - "title": "Format " + "id": "format", + "title": "Format" } }, "level": "h3", "level_title": "Format" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.prompts-basic-examples-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.prompts-basic-examples", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/prompts", @@ -866,8 +862,7 @@ ], "authed": false, "type": "markdown", - "hash": "#basic-examples-", - "content": "", + "hash": "#basic-examples", "code_snippets": [ { "lang": "jsx", @@ -895,12 +890,12 @@ ], "hierarchy": { "h2": { - "id": "basic-examples-", - "title": "Basic examples " + "id": "basic-examples", + "title": "Basic examples" }, "h3": { - "id": "basic-examples-", - "title": "Basic examples " + "id": "basic-examples", + "title": "Basic examples" } }, "level": "h3", @@ -935,11 +930,10 @@ "authed": false, "type": "markdown", "description": "Discover how Humanloop manages tools for use with large language models (LLMs) with version control and rigorous evaluation for better performance.\nTools are functions that can extend your LLMs with access to external data sources and enabling them to take actions.", - "content": "Humanloop Tools can be used in multiple ways:\nby the LLM by OpenAI function calling)\n\nwithin the Prompt template\n\nas part of a chain of events such as a Retrieval Tool in a RAG pipeline\n\n\nSome Tools are executable within Humanloop, and these offer the greatest utility and convenience. For example, Humanloop has pre-built integrations for Google search and Pinecone have and so these Tools can be executed and the results inserted into the API or Editor automatically.", - "code_snippets": [] + "content": "Humanloop Tools can be used in multiple ways:\nby the LLM by OpenAI function calling)\n\nwithin the Prompt template\n\nas part of a chain of events such as a Retrieval Tool in a RAG pipeline\n\n\nSome Tools are executable within Humanloop, and these offer the greatest utility and convenience. For example, Humanloop has pre-built integrations for Google search and Pinecone have and so these Tools can be executed and the results inserted into the API or Editor automatically." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.tools-tool-use-function-calling-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.tools-tool-use-function-calling", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/tools", @@ -966,19 +960,19 @@ ], "authed": false, "type": "markdown", - "hash": "#tool-use-function-calling-", + "hash": "#tool-use-function-calling", "content": "Certain large language models support tool use or \"function calling\". For these models, you can supply the description of functions and the model can choose to call one or more of them by providing the values to call the functions with.\n\n\n\n\nTools all have a functional interface that can be supplied as the JSONSchema needed for function calling. Additionally, if the Tool is executable on Humanloop, the result of any tool will automatically be inserted into the response in the API and in the Editor.\nTools for function calling can be defined inline in our Editor or centrally managed for an organization.", "hierarchy": { "h3": { - "id": "tool-use-function-calling-", - "title": "Tool Use (Function Calling) " + "id": "tool-use-function-calling", + "title": "Tool Use (Function Calling)" } }, "level": "h3", "level_title": "Tool Use (Function Calling)" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.tools-tools-in-a-prompt-template-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.tools-tools-in-a-prompt-template", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/tools", @@ -1005,19 +999,19 @@ ], "authed": false, "type": "markdown", - "hash": "#tools-in-a-prompt-template-", + "hash": "#tools-in-a-prompt-template", "content": "You can add a tool call in a prompt template and the result will be inserted into the prompt sent to the model. This allows you to insert retrieved information into your LLMs calls.\nFor example, if you have {{ google(\"population of india\") }} in your template, this Google tool will get executed and replaced with the resulting text “1.42 billion (2024)” before the prompt is sent to the model. Additionally, if your template contains a Tool call that uses an input variable e.g. {{ google(query) }} this will take the value of the input supplied in the request, compute the output of the Google tool, and insert that result into the resulting prompt that is sent to the model.\n\n\nExample of a Tool being used within a Prompt template. This example will mean that this Prompt needs two inputs to be supplied (query, and top_k)", "hierarchy": { "h3": { - "id": "tools-in-a-prompt-template-", - "title": "Tools in a Prompt template " + "id": "tools-in-a-prompt-template", + "title": "Tools in a Prompt template" } }, "level": "h3", "level_title": "Tools in a Prompt template" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.tools-tools-within-a-chain-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.tools-tools-within-a-chain", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/tools", @@ -1044,19 +1038,19 @@ ], "authed": false, "type": "markdown", - "hash": "#tools-within-a-chain-", + "hash": "#tools-within-a-chain", "content": "You can call a Tool within a session of events and post the result to Humanloop. For example in a RAG pipeline, instrumenting your retrieval function as a Tool, enables you to be able to trace through the full sequence of events. The retrieval Tool will be versioned and the logs will be available in the Humanloop UI, enabling you to independently improve that step in the pipeline.", "hierarchy": { "h2": { - "id": "tools-within-a-chain-", - "title": "Tools within a chain " + "id": "tools-within-a-chain", + "title": "Tools within a chain" } }, "level": "h2", "level_title": "Tools within a chain" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.tools-third-party-integrations-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.tools-third-party-integrations", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/tools", @@ -1083,23 +1077,23 @@ ], "authed": false, "type": "markdown", - "hash": "#third-party-integrations-", + "hash": "#third-party-integrations", "content": "Pinecone Search - Vector similarity search using Pinecone vector DB and OpenAI embeddings.\n\nGoogle Search - API for searching Google: https://serpapi.com/.\n\nGET API - Send a GET request to an external API.", "hierarchy": { "h2": { - "id": "third-party-integrations-", - "title": "Third-party integrations " + "id": "third-party-integrations", + "title": "Third-party integrations" }, "h3": { - "id": "third-party-integrations-", - "title": "Third-party integrations " + "id": "third-party-integrations", + "title": "Third-party integrations" } }, "level": "h3", "level_title": "Third-party integrations" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.tools-humanloop-tools-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.tools-humanloop-tools", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/tools", @@ -1126,16 +1120,16 @@ ], "authed": false, "type": "markdown", - "hash": "#humanloop-tools-", + "hash": "#humanloop-tools", "content": "Snippet Tool - Create reusable key/value pairs for use in prompts - see how to use the Snippet Tool.\n\nJSON Schema - JSON schema that can be used across multiple Prompts - see how to link a JSON Schema Tool.", "hierarchy": { "h2": { - "id": "humanloop-tools-", - "title": "Humanloop tools " + "id": "humanloop-tools", + "title": "Humanloop tools" }, "h3": { - "id": "humanloop-tools-", - "title": "Humanloop tools " + "id": "humanloop-tools", + "title": "Humanloop tools" } }, "level": "h3", @@ -1170,11 +1164,10 @@ "authed": false, "type": "markdown", "description": "Discover how Humanloop manages datasets, with version control and collaboration to enable you to evaluate and fine-tune your models.\nDatasets are collections of Datapoints, which are input-output pairs, that you can use within Humanloop for evaluations and fine-tuning.", - "content": "Datasets are primarily used for evaluation purposes on Humanloop. You can think of a Dataset as a collection of testcases for your AI applications. Each testcase is represented by a Datapoint, which contains the following fields:\nInputs: a collection of prompt variable values which are interpolated into the prompt template at generation time (i.e. they replace the {{ variables }} you define in your prompt template).\n\nMessages: for chat models, as well as the prompt template, you can optionally have a history of chat messages that are fed into amodel when generating a response.\n\nTarget: certain types of test cases can benefit from comparing the out your application to an expected or desired behaviour. In the simplest case, this can simply be a string representing the exact output you hope the model produces for the inputs and messages represented by the Datapoint.\nIn more complex cases, you can define an arbitrary JSON object for target with whatever fields are necessary to help you specify the intended behaviour.", - "code_snippets": [] + "content": "Datasets are primarily used for evaluation purposes on Humanloop. You can think of a Dataset as a collection of testcases for your AI applications. Each testcase is represented by a Datapoint, which contains the following fields:\nInputs: a collection of prompt variable values which are interpolated into the prompt template at generation time (i.e. they replace the {{ variables }} you define in your prompt template).\n\nMessages: for chat models, as well as the prompt template, you can optionally have a history of chat messages that are fed into amodel when generating a response.\n\nTarget: certain types of test cases can benefit from comparing the out your application to an expected or desired behaviour. In the simplest case, this can simply be a string representing the exact output you hope the model produces for the inputs and messages represented by the Datapoint.\nIn more complex cases, you can define an arbitrary JSON object for target with whatever fields are necessary to help you specify the intended behaviour." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.datasets-versioning-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.datasets-versioning", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/datasets", @@ -1201,19 +1194,19 @@ ], "authed": false, "type": "markdown", - "hash": "#versioning-", + "hash": "#versioning", "content": "A Dataset will have multiple versions as you iterate on refining your test cases for your task. This tends to be an evolving process as you learn more about how your Prompts behave and how users are interacting with your AI application in the wild.\nDataset versions are immutable and are uniquely defined by the contents of the Datapoints. If you change, or add additional, or remove existing Datapoints, this will constitute a new version.\nWhen running Evaluations you always reference a specific version of the Dataset. This allows you to have confidence in your Evaluations because they are always tied transparently to a specific set of test cases.", "hierarchy": { "h2": { - "id": "versioning-", - "title": "Versioning " + "id": "versioning", + "title": "Versioning" } }, "level": "h2", "level_title": "Versioning" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.datasets-creating-datasets-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.datasets-creating-datasets", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/datasets", @@ -1240,19 +1233,19 @@ ], "authed": false, "type": "markdown", - "hash": "#creating-datasets-", + "hash": "#creating-datasets", "content": "Datasets can be created in the following ways:\nvia CSV upload in the UI.\n\nconverting from existing Logs you've stored on Humanloop. These can be Prompt or Tool Logs depending on your Evaluation goals.\n\nvia API requests.\n\n\nSee our detailed guide for more details.", "hierarchy": { "h2": { - "id": "creating-datasets-", - "title": "Creating Datasets " + "id": "creating-datasets", + "title": "Creating Datasets" } }, "level": "h2", "level_title": "Creating Datasets" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.datasets-evaluations-use-case-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.datasets-evaluations-use-case", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/datasets", @@ -1279,12 +1272,12 @@ ], "authed": false, "type": "markdown", - "hash": "#evaluations-use-case-", + "hash": "#evaluations-use-case", "content": "Evaluations are run on Humanloop by iterating over the Datapoints in a Dataset and generating output for the different versions of your AI application that you wish to compare.\nFor example, you may wish to test out how Claude Opus compares to GPT-4 and Google Gemini on cost and accuracy for a specific set of testcases that describe the expected behaviour of your application.\nEvaluators are then run against the logs generated by the AI applications for each Datapoint to provide a judgement on how well the model performed and can reference the target field in the Datapoint to determine the expected behaviour.", "hierarchy": { "h2": { - "id": "evaluations-use-case-", - "title": "Evaluations use case " + "id": "evaluations-use-case", + "title": "Evaluations use case" } }, "level": "h2", @@ -1319,11 +1312,10 @@ "authed": false, "type": "markdown", "description": "Learn about LLM Evaluation using Evaluators. Evaluators are functions that can be used to judge the output of Prompts, Tools or other Evaluators.\nEvaluators on Humanloop are functions that can be used to judge the output of Prompts, Tools or other Evaluators.", - "content": "The core entity in the Humanloop evaluation framework is an Evaluator - a function you define which takes an LLM-generated log as an argument and returns a judgment.\nThe judgment is typically either a boolean or a number, indicating how well the model performed according to criteria you determine based on your use case.\nEvaluators can be leveraged for Monitoring your live AI application, as well as for Evaluations to benchmark different version of your AI application against each other pre-deployment.", - "code_snippets": [] + "content": "The core entity in the Humanloop evaluation framework is an Evaluator - a function you define which takes an LLM-generated log as an argument and returns a judgment.\nThe judgment is typically either a boolean or a number, indicating how well the model performed according to criteria you determine based on your use case.\nEvaluators can be leveraged for Monitoring your live AI application, as well as for Evaluations to benchmark different version of your AI application against each other pre-deployment." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.evaluators-sources-of-judgement-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.evaluators-sources-of-judgement", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/evaluators", @@ -1350,19 +1342,19 @@ ], "authed": false, "type": "markdown", - "hash": "#sources-of-judgement-", + "hash": "#sources-of-judgement", "content": "Currently, you can define three different Evaluator sources on Humanloop:\nCode - using simple deterministic rules based judgments against attributes like cost, token usage, latency, regex rules on the output, etc. These are generally fast and cheap to run at scale.\n\nAI - using other foundation models to provide judgments on the output. This allows for more qualitative and nuanced judgments for a fraction of the cost of human judgments.\n\nHuman - getting gold standard judgments from either end users of your application, or internal domain experts. This can be the most expensive and slowest option, but also the most reliable.", "hierarchy": { "h2": { - "id": "sources-of-judgement-", - "title": "Sources of Judgement " + "id": "sources-of-judgement", + "title": "Sources of Judgement" } }, "level": "h2", "level_title": "Sources of Judgement" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.evaluators-online-monitoring-versus-offline-evaluation-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.evaluators-online-monitoring-versus-offline-evaluation", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/evaluators", @@ -1389,19 +1381,19 @@ ], "authed": false, "type": "markdown", - "hash": "#online-monitoring-versus-offline-evaluation-", + "hash": "#online-monitoring-versus-offline-evaluation", "content": "Evaluators can be deployed on Humanloop to support both testing new versions of your Prompts and Tools during development and for monitoring live apps that are already in production.", "hierarchy": { "h2": { - "id": "online-monitoring-versus-offline-evaluation-", - "title": "Online Monitoring versus Offline Evaluation " + "id": "online-monitoring-versus-offline-evaluation", + "title": "Online Monitoring versus Offline Evaluation" } }, "level": "h2", "level_title": "Online Monitoring versus Offline Evaluation" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.evaluators-online-monitoring-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.evaluators-online-monitoring", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/evaluators", @@ -1428,23 +1420,23 @@ ], "authed": false, "type": "markdown", - "hash": "#online-monitoring-", + "hash": "#online-monitoring", "content": "Evaluators are run against the Logs generated by your AI applications. Typically, they are used to monitor deployed model performance over time and check for drift or degradation in performance.\nThe Evaluator in this case only takes a single argument - the log generated by the model. The Evaluator is expected to return a judgment based on the Log,\nwhich can be used to trigger alerts or other actions in your monitoring system.\nSee our Monitoring guides for more details.", "hierarchy": { "h2": { - "id": "online-monitoring-", - "title": "Online Monitoring " + "id": "online-monitoring", + "title": "Online Monitoring" }, "h3": { - "id": "online-monitoring-", - "title": "Online Monitoring " + "id": "online-monitoring", + "title": "Online Monitoring" } }, "level": "h3", "level_title": "Online Monitoring" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.evaluators-offline-evaluations-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.evaluators-offline-evaluations", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/evaluators", @@ -1471,23 +1463,23 @@ ], "authed": false, "type": "markdown", - "hash": "#offline-evaluations-", + "hash": "#offline-evaluations", "content": "Offline Evaluators are combined with predefined Datasets in order to evaluate your application as you iterate in your prompt engineering workflow, or to test for regressions in a CI environment.\nA test Dataset is a collection of Datapoints, which are roughly analogous to unit tests or test cases in traditional programming. Each datapoint specifies inputs to your model and (optionally) some target data.\nWhen you run an offline evaluation, a Log needs to be generated using the inputs of each Datapoint and the version of the application being evaluated. Evaluators then need to be run against each Log to provide judgements,\nwhich are then aggregated to provide an overall score for the application. Evaluators in this case take the generated Log and the testcase datapoint that gave rise to it as arguments.\nSee our guides on creating Datasets and running Evaluations for more details.", "hierarchy": { "h2": { - "id": "offline-evaluations-", - "title": "Offline Evaluations " + "id": "offline-evaluations", + "title": "Offline Evaluations" }, "h3": { - "id": "offline-evaluations-", - "title": "Offline Evaluations " + "id": "offline-evaluations", + "title": "Offline Evaluations" } }, "level": "h3", "level_title": "Offline Evaluations" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.evaluators-humanloop-runtime-versus-your-runtime-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.evaluators-humanloop-runtime-versus-your-runtime", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/evaluators", @@ -1514,19 +1506,19 @@ ], "authed": false, "type": "markdown", - "hash": "#humanloop-runtime-versus-your-runtime-", + "hash": "#humanloop-runtime-versus-your-runtime", "content": "Evaluations require the following to be generated:\nLogs for the datapoints.\n\nEvaluator results for those generated logs.\n\n\nEvaluators which are defined within the Humanloop UI can be executed in the Humanloop runtime, whereas Evaluators defined in your code can be executed in your runtime and the results posted back to Humanloop.\nThis provides flexibility for supporting more complex evaluation workflows.", "hierarchy": { "h2": { - "id": "humanloop-runtime-versus-your-runtime-", - "title": "Humanloop runtime versus your runtime " + "id": "humanloop-runtime-versus-your-runtime", + "title": "Humanloop runtime versus your runtime" } }, "level": "h2", "level_title": "Humanloop runtime versus your runtime" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.evaluators-return-types-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.evaluators-return-types", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/evaluators", @@ -1553,12 +1545,12 @@ ], "authed": false, "type": "markdown", - "hash": "#return-types-", + "hash": "#return-types", "content": "Evaluators apply judgment to Logs. This judgment can be of the following types:\nBoolean - A true/false judgment.\n\nNumber - A numerical judgment, which can act as a rating or score.\n\nSelect - One of a predefined set of options. One option must be selected.\n\nMulti-select - Any number of a predefined set of options. None, one, or many options can be selected.\n\nText - A free-form text judgment.\n\n\nCode and AI Evaluators can return either Boolean or Number judgments.\nHuman Evaluators can return Number, Select, Multi-select, or Text judgments.", "hierarchy": { "h2": { - "id": "return-types-", - "title": "Return types " + "id": "return-types", + "title": "Return types" } }, "level": "h2", @@ -1593,8 +1585,7 @@ "authed": false, "type": "markdown", "description": "Logs contain the inputs and outputs of each time a Prompt, Tool or Evaluator is called.\nLogs contain the inputs and outputs of each time a Prompt, Tool or Evaluator is called.", - "content": "All Prompts, Tools and Evaluators produce Logs. A Log contains the inputs and the outputs and tracks which version of Prompt/Tool/Evaluator was used.\nFor the example of a Prompt above, the Log would have one input called ‘topic’ and the output will be the completion.\n\n\nA Log which contains an input query", - "code_snippets": [] + "content": "All Prompts, Tools and Evaluators produce Logs. A Log contains the inputs and the outputs and tracks which version of Prompt/Tool/Evaluator was used.\nFor the example of a Prompt above, the Log would have one input called ‘topic’ and the output will be the completion.\n\n\nA Log which contains an input query" }, { "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.environments", @@ -1625,11 +1616,10 @@ "authed": false, "type": "markdown", "description": "Deployment environments enable you to control the deployment lifecycle of your Prompts and other files between development and production environments.\nDeployment environments enable you to control the deployment lifecycle of your Prompts and other files between development and production environments.", - "content": "Environments enable you to deploy different versions of your files to specific environments, allowing you to separately manage the deployment workflow between testing and production. With environments, you have the control required to manage the full LLM deployment lifecycle.", - "code_snippets": [] + "content": "Environments enable you to deploy different versions of your files to specific environments, allowing you to separately manage the deployment workflow between testing and production. With environments, you have the control required to manage the full LLM deployment lifecycle." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.environments-managing-your-environments-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.environments-managing-your-environments", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/environments", @@ -1656,19 +1646,19 @@ ], "authed": false, "type": "markdown", - "hash": "#managing-your-environments-", + "hash": "#managing-your-environments", "content": "Every organisation automatically receives a default production environment. You can create additional environments with custom names by visiting your organisation's environments page.\n\n\nOnly Enterprise customers can create more than one environment\nThe environments you define for your organisation will be available for each file and can be viewed in the file's dashboard once created.", "hierarchy": { "h3": { - "id": "managing-your-environments-", - "title": "Managing your environments " + "id": "managing-your-environments", + "title": "Managing your environments" } }, "level": "h3", "level_title": "Managing your environments" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.environments-the-default-environment-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.environments-the-default-environment", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/environments", @@ -1695,23 +1685,23 @@ ], "authed": false, "type": "markdown", - "hash": "#the-default-environment-", + "hash": "#the-default-environment", "content": "By default, the production environment is marked as the Default environment. This means that all API calls that don't explicitly target a specific environment will use this environment. You can rename the default environment on the organisation's environments page.\n\n\nRenaming the environments will take immediate effect, so ensure that this\nchange is planned and does not disrupt your production workflows.", "hierarchy": { "h3": { - "id": "the-default-environment-", - "title": "The default environment " + "id": "the-default-environment", + "title": "The default environment" }, "h4": { - "id": "the-default-environment-", - "title": "The default environment " + "id": "the-default-environment", + "title": "The default environment" } }, "level": "h4", "level_title": "The default environment" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.environments-using-environments-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.environments-using-environments", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/environments", @@ -1738,19 +1728,19 @@ ], "authed": false, "type": "markdown", - "hash": "#using-environments-", + "hash": "#using-environments", "content": "Once created on the environments page, environments can be used for each file and are visible in the respective dashboards.\nYou can deploy directly to a specific environment by selecting it in the Deployments section.\n\nAlternatively, you can deploy to multiple environments simultaneously by deploying a version from either the Editor or the Versions table.", "hierarchy": { "h3": { - "id": "using-environments-", - "title": "Using environments " + "id": "using-environments", + "title": "Using environments" } }, "level": "h3", "level_title": "Using environments" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.environments-using-environments-via-api-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.getting-started.concepts.environments-using-environments-via-api", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/concepts/environments", @@ -1777,12 +1767,12 @@ ], "authed": false, "type": "markdown", - "hash": "#using-environments-via-api-", + "hash": "#using-environments-via-api", "content": "You can now call the version deployed in a specific environment by including an optional additional environment field. An exmaple of this field can be seen in the v5 Prompt Call documentation.", "hierarchy": { "h3": { - "id": "using-environments-via-api-", - "title": "Using environments via API " + "id": "using-environments-via-api", + "title": "Using environments via API" } }, "level": "h3", @@ -1817,8 +1807,7 @@ "authed": false, "type": "markdown", "description": "Directories can be used to group together related files. This is useful for organizing your work as part of prompt engineering and collaboration.\nDirectories can be used to group together related files.", - "content": "Directories in Humanloop serve as an organizational tool, allowing users to group related files and structure their work logically. They function similarly to folders in a traditional file system, providing a hierarchical structure for managing Prompts, Tools, Datasets, and other resources.\n\n\nDirectories are primarily for organizational needs but they can have\nfunctional impacts if you are referencing Prompts, Tools etc. by path.\nWe recommend to always refer to Prompts, Tools etc. by their id as this will\nmake your workflows more robust and avoid issues if the files are moved.\nFor more information on how to create and manage directories, see our Create a Directory guide.", - "code_snippets": [] + "content": "Directories in Humanloop serve as an organizational tool, allowing users to group related files and structure their work logically. They function similarly to folders in a traditional file system, providing a hierarchical structure for managing Prompts, Tools, Datasets, and other resources.\n\n\nDirectories are primarily for organizational needs but they can have\nfunctional impacts if you are referencing Prompts, Tools etc. by path.\nWe recommend to always refer to Prompts, Tools etc. by their id as this will\nmake your workflows more robust and avoid issues if the files are moved.\nFor more information on how to create and manage directories, see our Create a Directory guide." }, { "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview", @@ -1845,11 +1834,10 @@ "authed": false, "type": "markdown", "description": "Discover how Humanloop manages prompts, with version control and rigorous evaluation for better performance.\nHow to develop and manage your Prompt and Tools on Humanloop", - "content": "Your AI application can be broken down into Prompts, Tools, and Evaluators. Humanloop versions and manages each of these artifacts to enable team collaboration and evaluation of each component of your AI system.\nThis overview will explain the basics of prompt development, versioning, and management, and how to best integrate your LLM calls with Humanloop.", - "code_snippets": [] + "content": "Your AI application can be broken down into Prompts, Tools, and Evaluators. Humanloop versions and manages each of these artifacts to enable team collaboration and evaluation of each component of your AI system.\nThis overview will explain the basics of prompt development, versioning, and management, and how to best integrate your LLM calls with Humanloop." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-prompt-management-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-prompt-management", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -1872,7 +1860,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prompt-management-", + "hash": "#prompt-management", "content": "Prompts are a fundamental part of interacting with large language models (LLMs). They define the instructions and parameters that guide the model's responses. In Humanloop, Prompts are managed with version control, allowing you to track changes and improvements over time.\n\n\nA Prompt on Humanloop encapsulates the instructions and other configuration for how a large language model should perform a specific task. Each change in any of the following properties creates a new version of the Prompt:\nthe template such as Write a song about {{topic}}. For chat models, your template will contain an array of messages.\n\nthe model e.g. gpt-4o\n\nall the parameters to the model such as temperature, max_tokens, top_p etc.\n\nany tools available to the model", "code_snippets": [ { @@ -1886,15 +1874,15 @@ ], "hierarchy": { "h1": { - "id": "prompt-management-", - "title": "Prompt Management " + "id": "prompt-management", + "title": "Prompt Management" } }, "level": "h1", "level_title": "Prompt Management" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-creating-a-prompt-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-creating-a-prompt", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -1917,23 +1905,23 @@ ], "authed": false, "type": "markdown", - "hash": "#creating-a-prompt-", + "hash": "#creating-a-prompt", "content": "You can create a Prompt explicitly in the Prompt Editor or via the API.\nNew prompts can also be created automatically via the API if you specify the Prompt's path (its name and directory) while supplying the Prompt's parameters and template. This is useful if you are developing your prompts in code and want to be able to version them as you make changes to the code.", "hierarchy": { "h1": { - "id": "creating-a-prompt-", - "title": "Creating a Prompt " + "id": "creating-a-prompt", + "title": "Creating a Prompt" }, "h3": { - "id": "creating-a-prompt-", - "title": "Creating a Prompt " + "id": "creating-a-prompt", + "title": "Creating a Prompt" } }, "level": "h3", "level_title": "Creating a Prompt" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-versioning-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-versioning", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -1956,23 +1944,23 @@ ], "authed": false, "type": "markdown", - "hash": "#versioning-", + "hash": "#versioning", "content": "A Prompt will have multiple versions as you experiment with different models, parameters, or templates. However, all versions should perform the same task and generally be interchangeable with one another.\nBy versioning your Prompts, you can track how adjustments to the template or parameters influence the LLM's responses. This is crucial for iterative development, as you can pinpoint which versions produce the most relevant or accurate outputs for your specific use case.\nAs you edit your prompt, new versions of the Prompt are created automatically. Each version is timestamped and given a unique version ID which is deterministically based on the Prompt's contents. For every version that you want to \"save\", you commit that version and it will be recorded as a new committed version of the Prompt with a commit message.", "hierarchy": { "h1": { - "id": "versioning-", - "title": "Versioning " + "id": "versioning", + "title": "Versioning" }, "h3": { - "id": "versioning-", - "title": "Versioning " + "id": "versioning", + "title": "Versioning" } }, "level": "h3", "level_title": "Versioning" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-when-to-create-a-new-prompt-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-when-to-create-a-new-prompt", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -1995,23 +1983,23 @@ ], "authed": false, "type": "markdown", - "hash": "#when-to-create-a-new-prompt-", + "hash": "#when-to-create-a-new-prompt", "content": "You should create a new Prompt for every different 'task to be done' with the LLM. For example each of these tasks are things that can be done by an LLM and should be a separate Prompt File: Writing Copilot, Personal Assistant, Summariser, etc.\nWe've seen people find it useful to also create a Prompt called 'Playground' where they can free form experiment without concern of breaking anything or making a mess of their other Prompts.", "hierarchy": { "h1": { - "id": "when-to-create-a-new-prompt-", - "title": "When to create a new Prompt " + "id": "when-to-create-a-new-prompt", + "title": "When to create a new Prompt" }, "h4": { - "id": "when-to-create-a-new-prompt-", - "title": "When to create a new Prompt " + "id": "when-to-create-a-new-prompt", + "title": "When to create a new Prompt" } }, "level": "h4", "level_title": "When to create a new Prompt" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-prompt-engineering-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-prompt-engineering", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -2034,19 +2022,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prompt-engineering-", + "hash": "#prompt-engineering", "content": "Understanding the best practices for working with large language models can significantly enhance your application's performance. Each model has its own failure modes, and the methods to address or mitigate these issues are not always straightforward. The field of \"prompt engineering\" has evolved beyond just crafting prompts to encompass designing systems that incorporate model queries as integral components.\nFor a start, read our Prompt Engineering 101 guide which covers techniques to improve model reasoning, reduce the chances of model hallucinations, and more.", "hierarchy": { "h1": { - "id": "prompt-engineering-", - "title": "Prompt Engineering " + "id": "prompt-engineering", + "title": "Prompt Engineering" } }, "level": "h1", "level_title": "Prompt Engineering" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-prompt-templates-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-prompt-templates", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -2069,7 +2057,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prompt-templates-", + "hash": "#prompt-templates", "content": "Inputs are defined in the template through the double-curly bracket syntax e.g. {{topic}} and the value of the variable will need to be supplied when you call the Prompt to create a generation.\nThis separation of concerns, keeping configuration separate from the query time data, is crucial for enabling you to experiment with different configurations and evaluate any changes.\nThe Prompt stores the configuration and the query time data in Logs, which can then be used to create Datasets for evaluation purposes.", "code_snippets": [ { @@ -2079,19 +2067,19 @@ ], "hierarchy": { "h1": { - "id": "prompt-templates-", - "title": "Prompt templates " + "id": "prompt-templates", + "title": "Prompt templates" }, "h3": { - "id": "prompt-templates-", - "title": "Prompt templates " + "id": "prompt-templates", + "title": "Prompt templates" } }, "level": "h3", "level_title": "Prompt templates" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-tool-use-function-calling-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-tool-use-function-calling", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -2114,23 +2102,23 @@ ], "authed": false, "type": "markdown", - "hash": "#tool-use-function-calling-", + "hash": "#tool-use-function-calling", "content": "Certain large language models support tool use or \"function calling\". For these models, you can supply the description of functions and the model can choose to call one or more of them by providing the values to call the functions with.\nFunction calling enables the model to perform various tasks:\n1. Call external APIs: The model can translate natural language into API calls, allowing it to interact with external services and retrieve information.\n2. Take actions: The model can exhibit agentic behavior, making decisions and taking actions based on the given context.\n3. Provide structured output: The model's responses can be constrained to a specific structured format, ensuring consistency and ease of parsing in downstream applications.\n\n\nTools for function calling can be defined inline in the Prompt editor in which case they form part of the Prompt version. Alternatively, they can be pulled out in a Tool file which is then referenced in the Prompt.\nEach Tool has functional interface that can be supplied as the JSON Schema needed for function calling. Additionally, if the Tool is executable on Humanloop, the result of any tool will automatically be inserted into the response in the API and in the Editor.", "hierarchy": { "h1": { - "id": "tool-use-function-calling-", - "title": "Tool Use (Function Calling) " + "id": "tool-use-function-calling", + "title": "Tool Use (Function Calling)" }, "h3": { - "id": "tool-use-function-calling-", - "title": "Tool Use (Function Calling) " + "id": "tool-use-function-calling", + "title": "Tool Use (Function Calling)" } }, "level": "h3", "level_title": "Tool Use (Function Calling)" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-using-prompts-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-using-prompts", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -2153,23 +2141,23 @@ ], "authed": false, "type": "markdown", - "hash": "#using-prompts-", + "hash": "#using-prompts", "content": "Prompts are callable as an API. You supply and query-time data such as input values or user messages, and the model will respond with its text output.\n\n\nA Prompt is callable in that if you supply the necessary inputs, it will return a response from the model.\nOnce you have created and versioned your Prompt, you can call it as an API to generate responses from the large language model directly. You can also fetch the log the data from your LLM calls, enabling you to evaluate and improve your models.", "hierarchy": { "h1": { - "id": "using-prompts-", - "title": "Using Prompts " + "id": "using-prompts", + "title": "Using Prompts" }, "h2": { - "id": "using-prompts-", - "title": "Using Prompts " + "id": "using-prompts", + "title": "Using Prompts" } }, "level": "h2", "level_title": "Using Prompts" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-proxying-your-llm-calls-vs-async-logging-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-proxying-your-llm-calls-vs-async-logging", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -2192,23 +2180,23 @@ ], "authed": false, "type": "markdown", - "hash": "#proxying-your-llm-calls-vs-async-logging-", + "hash": "#proxying-your-llm-calls-vs-async-logging", "content": "The easiest way to both call the large language model with your Prompt and to log the data is to use the Prompt.call() method (see the guide on Calling a Prompt) which will do both in a single API request. However, there are two main reasons why you may wish to log the data seperately from generation:\nYou are using your own model that is not natively supported in the Humanloop runtime.\n\nYou wish to avoid relying on Humanloop runtime as the proxied calls adds a small additional latency, or\n\n\nThe prompt.call() Api encapsulates the LLM provider calls (for example openai.Completions.create()), the model-config selection and logging steps in a single unified interface. There may be scenarios that you wish to manage the LLM provider calls directly in your own code instead of relying on Humanloop.\nHumanloop provides a comprehensive platform for developing, managing, and versioning Prompts, Tools and your other artifacts of you AI systems. This explainer will show you how to create, version and manage your Prompts, Tools and other artifacts.\nYou can also use Prompts without proxying through Humanloop to the model provider and instead call the model yourself and explicitly log the results to your Prompt.", "hierarchy": { "h1": { - "id": "proxying-your-llm-calls-vs-async-logging-", - "title": "Proxying your LLM calls vs async logging " + "id": "proxying-your-llm-calls-vs-async-logging", + "title": "Proxying your LLM calls vs async logging" }, "h2": { - "id": "proxying-your-llm-calls-vs-async-logging-", - "title": "Proxying your LLM calls vs async logging " + "id": "proxying-your-llm-calls-vs-async-logging", + "title": "Proxying your LLM calls vs async logging" } }, "level": "h2", "level_title": "Proxying your LLM calls vs async logging" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-serialization-prompt-file-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-serialization-prompt-file", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -2231,23 +2219,23 @@ ], "authed": false, "type": "markdown", - "hash": "#serialization-prompt-file-", + "hash": "#serialization-prompt-file", "content": "Our .prompt file format is a serialized version of a model config that is designed to be human-readable and suitable for checking into your version control systems alongside your code. See the .prompt files reference reference for more details.", "hierarchy": { "h1": { - "id": "serialization-prompt-file-", - "title": "Serialization (.prompt file) " + "id": "serialization-prompt-file", + "title": "Serialization (.prompt file)" }, "h2": { - "id": "serialization-prompt-file-", - "title": "Serialization (.prompt file) " + "id": "serialization-prompt-file", + "title": "Serialization (.prompt file)" } }, "level": "h2", "level_title": "Serialization (.prompt file)" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-format-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-format", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -2270,23 +2258,23 @@ ], "authed": false, "type": "markdown", - "hash": "#format-", + "hash": "#format", "content": "The .prompt file is heavily inspired by MDX, with model and hyperparameters specified in a YAML header alongside a JSX-inspired format for your Chat Template.", "hierarchy": { "h1": { - "id": "format-", - "title": "Format " + "id": "format", + "title": "Format" }, "h3": { - "id": "format-", - "title": "Format " + "id": "format", + "title": "Format" } }, "level": "h3", "level_title": "Format" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-basic-examples-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-basic-examples", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -2309,8 +2297,7 @@ ], "authed": false, "type": "markdown", - "hash": "#basic-examples-", - "content": "", + "hash": "#basic-examples", "code_snippets": [ { "lang": "jsx", @@ -2335,19 +2322,19 @@ ], "hierarchy": { "h1": { - "id": "basic-examples-", - "title": "Basic examples " + "id": "basic-examples", + "title": "Basic examples" }, "h3": { - "id": "basic-examples-", - "title": "Basic examples " + "id": "basic-examples", + "title": "Basic examples" } }, "level": "h3", "level_title": "Basic examples" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-dealing-with-sensitive-data-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.overview-dealing-with-sensitive-data", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/overview", @@ -2370,16 +2357,16 @@ ], "authed": false, "type": "markdown", - "hash": "#dealing-with-sensitive-data-", + "hash": "#dealing-with-sensitive-data", "content": "When working with sensitive data in your AI applications, it's crucial to handle it securely. Humanloop provides options to help you manage sensitive information while still benefiting from our platform's features.\nIf you need to process sensitive data without storing it in Humanloop, you can use the save: false parameter when making calls to the API or logging data. This ensures that only metadata about the request is stored, while the actual sensitive content is not persisted in our systems.\nFor PII detection, you can set up Guardrails to detect and prevent the generation of sensitive information.", "hierarchy": { "h1": { - "id": "dealing-with-sensitive-data-", - "title": "Dealing with sensitive data " + "id": "dealing-with-sensitive-data", + "title": "Dealing with sensitive data" }, "h2": { - "id": "dealing-with-sensitive-data-", - "title": "Dealing with sensitive data " + "id": "dealing-with-sensitive-data", + "title": "Dealing with sensitive data" } }, "level": "h2", @@ -2414,11 +2401,10 @@ "authed": false, "type": "markdown", "description": "Learn how to create a Prompt in Humanloop using the UI or SDK, version it, and use it to generate responses from your AI models. Prompt management is a key part of the Humanloop platform.\nHow to create, version and use a Prompt in Humanloop", - "content": "Humanloop acts as a registry of your Prompts so you can centrally manage all their versions and Logs, and evaluate and improve your AI systems.\nThis guide will show you how to create a Prompt in the UI or via the SDK/API.\n\n\nPrerequisite: A Humanloop account.\nYou can create an account now by going to the Sign up page.", - "code_snippets": [] + "content": "Humanloop acts as a registry of your Prompts so you can centrally manage all their versions and Logs, and evaluate and improve your AI systems.\nThis guide will show you how to create a Prompt in the UI or via the SDK/API.\n\n\nPrerequisite: A Humanloop account.\nYou can create an account now by going to the Sign up page." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-prompt-create-a-prompt-in-the-ui-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-prompt-create-a-prompt-in-the-ui", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/create-prompt", @@ -2445,7 +2431,7 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-prompt-in-the-ui-", + "hash": "#create-a-prompt-in-the-ui", "content": "Create a Prompt File\nWhen you first open Humanloop you’ll see your File navigation on the left. Click ‘+ New’ and create a Prompt.\n\n\nIn the sidebar, rename this file to \"Comedian Bot\" now or later.\nCreate the Prompt template in the Editor\nThe left hand side of the screen defines your Prompt – the parameters such as model, temperature and template. The right hand side is a single chat session with this Prompt.\n\n\nClick the \"+ Message\" button within the chat template to add a system message to the chat template.\n\n\nAdd the following templated message to the chat template.\nThis message forms the chat template. It has an input slot called topic (surrounded by two curly brackets) for an input value that is provided each time you call this Prompt.\nOn the right hand side of the page, you’ll now see a box in the Inputs section for topic.\nAdd a value fortopic e.g. music, jogging, whatever.\n\nClick Run in the bottom right of the page.\n\n\nThis will call OpenAI’s model and return the assistant response. Feel free to try other values, the model is very funny.\nYou now have a first version of your prompt that you can use.\nCommit your first version of this Prompt\nClick the Commit button\n\nPut “initial version” in the commit message field\n\nClick Commit\n\n\n\n\nView the logs\nUnder the Prompt File click ‘Logs’ to view all the generations from this Prompt\nClick on a row to see the details of what version of the prompt generated it. From here you can give feedback to that generation, see performance metrics, open up this example in the Editor, or add this log to a dataset.", "code_snippets": [ { @@ -2457,15 +2443,15 @@ ], "hierarchy": { "h2": { - "id": "create-a-prompt-in-the-ui-", - "title": "Create a Prompt in the UI " + "id": "create-a-prompt-in-the-ui", + "title": "Create a Prompt in the UI" } }, "level": "h2", "level_title": "Create a Prompt in the UI" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-prompt-create-a-prompt-using-the-sdk-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-prompt-create-a-prompt-using-the-sdk", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/create-prompt", @@ -2492,7 +2478,7 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-prompt-using-the-sdk-", + "hash": "#create-a-prompt-using-the-sdk", "content": "The Humanloop Python SDK allows you to programmatically create and version your Prompts in Humanloop, and log generations from your models. This guide will show you how to create a Prompt using the SDK.\nNote that you can also version your prompts dynamically with every Prompt\n\n\nPrerequisite: A Humanloop SDK Key.\nYou can get this from your Organisation Settings page if you have the right permissions.\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)\n\n\nAfter initializing the SDK client, you can call the Prompt creation endpoint.\n\n\nCreate the Prompt\n\n\nGo to the App\nGo to the Humanloop app and you will see your new project as a Prompt with the model config you just created.\nYou now have a Prompt in Humanloop that contains your initial version. You can call the Prompt in Editor and invite team members by going to your organization's members page.", "code_snippets": [ { @@ -2518,15 +2504,15 @@ ], "hierarchy": { "h2": { - "id": "create-a-prompt-using-the-sdk-", - "title": "Create a Prompt using the SDK " + "id": "create-a-prompt-using-the-sdk", + "title": "Create a Prompt using the SDK" } }, "level": "h2", "level_title": "Create a Prompt using the SDK" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-prompt-next-steps-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-prompt-next-steps", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/create-prompt", @@ -2553,12 +2539,12 @@ ], "authed": false, "type": "markdown", - "hash": "#next-steps-", + "hash": "#next-steps", "content": "With the Prompt set up, you can now integrate it into your app by following the Call a Prompt Guide.", "hierarchy": { "h2": { - "id": "next-steps-", - "title": "Next Steps " + "id": "next-steps", + "title": "Next Steps" } }, "level": "h2", @@ -2593,11 +2579,10 @@ "authed": false, "type": "markdown", "description": "Learn how to call your Prompts that are managed on Humanloop.\nA guide on how to call your Prompts that are managed on Humanloop.", - "content": "This guide will show you how to call your Prompts as an API, enabling you to generate responses from the large language model that uses the versioned template and parameters. If you want to call an LLM with a prompt that you're defining in code follow the guide on Calling a LLM through the Humanloop Proxy.", - "code_snippets": [] + "content": "This guide will show you how to call your Prompts as an API, enabling you to generate responses from the large language model that uses the versioned template and parameters. If you want to call an LLM with a prompt that you're defining in code follow the guide on Calling a LLM through the Humanloop Proxy." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.call-prompt-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.call-prompt-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/call-prompt", @@ -2624,7 +2609,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "Before you can use the new prompt.call() method, you need to have a Prompt. If you don't have one, please follow our Prompt creation guide first.\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)\n\n\n\n\nGet the Prompt ID\nIn Humanloop, navigate to the Prompt and copy the Prompt ID by clicking on the ID in the top right corner of the screen.\n\n\nUse the SDK to call your model\nNow you can use the SDK to generate completions and log the results to your Prompt using the new prompt.call() method:\n\n\n\n\nNavigate to the Logs tab of the Prompt\nAnd you'll be able to see the recorded inputs, messages and responses of your chat.", "code_snippets": [ { @@ -2650,19 +2635,19 @@ ], "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.call-prompt-call-the-llm-with-a-prompt-that-youre-defining-in-code-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.call-prompt-call-the-llm-with-a-prompt-that-youre-defining-in-code", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/call-prompt", @@ -2689,12 +2674,12 @@ ], "authed": false, "type": "markdown", - "hash": "#call-the-llm-with-a-prompt-that-youre-defining-in-code-", + "hash": "#call-the-llm-with-a-prompt-that-youre-defining-in-code", "content": "🎉 Now that you have chat messages flowing through your Prompt you can start to log your end user feedback to evaluate and improve your models.", "hierarchy": { "h2": { - "id": "call-the-llm-with-a-prompt-that-youre-defining-in-code-", - "title": "Call the LLM with a prompt that you're defining in code " + "id": "call-the-llm-with-a-prompt-that-youre-defining-in-code", + "title": "Call the LLM with a prompt that you're defining in code" } }, "level": "h2", @@ -2729,11 +2714,10 @@ "authed": false, "type": "markdown", "description": "Learn how to leverage the Humanloop proxy to call various AI models from different providers using a unified interface\nA guide on calling large language model providers (OpenAI, Anthropic, Google etc.) through the Humanloop API", - "content": "This guide walks you through how to call various models through the Humanloop API. This is the same as calling a Prompt but instead of using a version of the Prompt that is defined in Humanloop, you're setting the template and parameters directly in code.\nThe benefits of using the Humanloop proxy are:\nconsistent interface across different AI providers: OpenAI, Anthropic, Google and more – see the full list of supported models\n\nall your requests are logged automatically\n\ncreates versions of your Prompts automatically, so you can track performance over time\n\ncan call multiple providers while managing API keys centrally (you can also supply keys at runtime)\n\n\nIn this guide, we'll cover how to call LLMs using the Humanloop proxy.", - "code_snippets": [] + "content": "This guide walks you through how to call various models through the Humanloop API. This is the same as calling a Prompt but instead of using a version of the Prompt that is defined in Humanloop, you're setting the template and parameters directly in code.\nThe benefits of using the Humanloop proxy are:\nconsistent interface across different AI providers: OpenAI, Anthropic, Google and more – see the full list of supported models\n\nall your requests are logged automatically\n\ncreates versions of your Prompts automatically, so you can track performance over time\n\ncan call multiple providers while managing API keys centrally (you can also supply keys at runtime)\n\n\nIn this guide, we'll cover how to call LLMs using the Humanloop proxy." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.proxy-model-calls-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.proxy-model-calls-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/proxy-model-calls", @@ -2760,7 +2744,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "First you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)\n\n\n\n\nUse the SDK to call your model\nNow you can use the SDK to generate completions and log the results to your Prompt using the new prompt.call() method:\n\n\n\n\nNavigate to the Logs tab of the Prompt\nAnd you'll be able to see the recorded inputs, messages and responses of your chat.\n🎉 Now that you have chat messages flowing through your Prompt you can start to log your end user feedback to evaluate and improve your models.", "code_snippets": [ { @@ -2786,12 +2770,12 @@ ], "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", @@ -2826,11 +2810,10 @@ "authed": false, "type": "markdown", "description": "Learn how to create a Prompt in Humanloop using the UI or SDK, version it, and use it to generate responses from your AI models. Prompt management is a key part of the Humanloop platform.\nHow to log generations from any large language model (LLM) to Humanloop", - "content": "This guide will show you how to capture the Logs of your LLM calls into Humanloop.\nThe easiest way to log LLM generations to Humanloop is to use the Prompt.call() method (see the guide on Calling a Prompt). You will only need to supply prompt ID and the inputs needed by the prompt template, and the endpoint will handle fetching the latest template, making the LLM call and logging the result.\nHowever, there may be scenarios that you wish to manage the LLM provider calls directly in your own code instead of relying on Humanloop. For example, you may be using an LLM provider that is not directly supported by Humanloop such as a custom self-hosted model, or you may want to avoid adding Humanloop to the critical path of the LLM API calls.", - "code_snippets": [] + "content": "This guide will show you how to capture the Logs of your LLM calls into Humanloop.\nThe easiest way to log LLM generations to Humanloop is to use the Prompt.call() method (see the guide on Calling a Prompt). You will only need to supply prompt ID and the inputs needed by the prompt template, and the endpoint will handle fetching the latest template, making the LLM call and logging the result.\nHowever, there may be scenarios that you wish to manage the LLM provider calls directly in your own code instead of relying on Humanloop. For example, you may be using an LLM provider that is not directly supported by Humanloop such as a custom self-hosted model, or you may want to avoid adding Humanloop to the critical path of the LLM API calls." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.log-to-a-prompt-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.log-to-a-prompt-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/log-to-a-prompt", @@ -2857,7 +2840,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\n\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)", "code_snippets": [ { @@ -2883,15 +2866,15 @@ ], "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.log-to-a-prompt-log-data-to-your-prompt-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.log-to-a-prompt-log-data-to-your-prompt", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/log-to-a-prompt", @@ -2918,7 +2901,7 @@ ], "authed": false, "type": "markdown", - "hash": "#log-data-to-your-prompt-", + "hash": "#log-data-to-your-prompt", "content": "To log LLM generations to Humanloop, you will need to make a call to the /prompts/log endpoint.\nNote that you can either specify a version of the Prompt you are logging against - in which case you will need to take care that you are supplying the correct version ID and inputs. Or you can supply the full prompt and a new version will be created if it has not been seen before.\n\n\nGet your Prompt\nFetch a Prompt from Humanloop by specifying the ID. You can ignore this step if your prompts are created dynamically in code.\n\n\n\n\nHere's how to do this in code:\n\n\n\n\n\n\nCall your Prompt\nThis can be your own model, or any other LLM provider. Here is an example of calling OpenAI:\n\n\n\n\n\n\nLog the result\nFinally, log the result to your project:", "code_snippets": [ { @@ -2972,8 +2955,8 @@ ], "hierarchy": { "h2": { - "id": "log-data-to-your-prompt-", - "title": "Log data to your Prompt " + "id": "log-data-to-your-prompt", + "title": "Log data to your Prompt" } }, "level": "h2", @@ -3008,11 +2991,10 @@ "authed": false, "type": "markdown", "description": "Learn how to use tool calling in your large language models and intract with it in the Humanloop Prompt Editor.\nHow to use Tool Calling to have the model interact with external functions.", - "content": "Humanloop's Prompt Editor supports for Tool Calling functionality, enabling models to interact with external functions. This feature, akin to OpenAI's function calling, is implemented through JSON Schema tools in Humanloop. These Tools adhere to the widely-used JSON Schema syntax, providing a standardized way to define data structures.\nWithin the editor, you have the flexibility to create inline JSON Schema tools as part of your model configuration. This capability allows you to establish a structured framework for the model's responses, enhancing control and predictability. Throughout this guide, we'll explore the process of leveraging these tools within the editor environment.", - "code_snippets": [] + "content": "Humanloop's Prompt Editor supports for Tool Calling functionality, enabling models to interact with external functions. This feature, akin to OpenAI's function calling, is implemented through JSON Schema tools in Humanloop. These Tools adhere to the widely-used JSON Schema syntax, providing a standardized way to define data structures.\nWithin the editor, you have the flexibility to create inline JSON Schema tools as part of your model configuration. This capability allows you to establish a structured framework for the model's responses, enhancing control and predictability. Throughout this guide, we'll explore the process of leveraging these tools within the editor environment." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.tool-calling-editor-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.tool-calling-editor-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/tool-calling-editor", @@ -3039,19 +3021,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.tool-calling-editor-create-and-use-a-tool-in-the-prompt-editor-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.tool-calling-editor-create-and-use-a-tool-in-the-prompt-editor", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/tool-calling-editor", @@ -3078,7 +3060,7 @@ ], "authed": false, "type": "markdown", - "hash": "#create-and-use-a-tool-in-the-prompt-editor-", + "hash": "#create-and-use-a-tool-in-the-prompt-editor", "content": "To create and use a tool follow the following steps:\n\n\nOpen the editor\nGo to a Prompt and open the Editor.\nSelect a model that supports Tool Calling\n\n\nTo view the list of models that support Tool calling, see the Models\npage.\nIn the editor, you'll see an option to select the model. Choose a model like gpt-4o which supports Tool Calling.\nDefine the Tool\nTo get started with tool definition, it's recommended to begin with one of our preloaded example tools. For this guide, we'll use the get_current_weather tool. Select this from the dropdown menu of preloaded examples.\nIf you choose to edit or create your own tool, you'll need to use the universal JSON Schema syntax. When creating a custom tool, it should correspond to a function you have defined in your own code. The JSON Schema you define here specifies the parameters and structure you want the AI model to use when interacting with your function.\n\n\nTest it out\nNow, let's test our tool by inputting a relevant query. Since we're working with a weather-related tool, try typing: What's the weather in Boston?. This should prompt OpenAI to respond using the parameters we've defined.\n\n\nKeep in mind that the model's use of the tool depends on the relevance of the user's input. For instance, a question like 'how are you today?' is unlikely to trigger a weather-related tool response.\nCheck assistant response for a tool call\nUpon successful setup, the assistant should respond by invoking the tool, providing both the tool's name and the required data. For our get_current_weather tool, the response might look like this:\nInput tool response\nAfter the tool call, the editor will automatically add a partially filled tool message for you to complete.\nYou can paste in the exact response that the Tool would respond with. For prototyping purposes, you can also just simulate the repsonse yourself (LLMs can handle it!). Provide in a mock response:\nTo input the tool response:\nFind the tool response field in the editor.\n\nEnter theresponse matching the expected format, such as:\n\n\nRemember, the goal is to simulate the tool's output as if it were actually fetching real-time weather data. This allows you to test and refine your prompt and tool interaction without needing to implement the actual weather API.\nSubmit tool response\nAfter entering the simulated tool response, click on the 'Run' button to send the Tool message to the AI model.\nReview assistant response\nThe assistant should now respond using the information provided in your simulated tool response. For example, if you input that the weather in London was drizzling at 12°C, the assistant might say:\nBased on the current weather data, it's drizzling in London with a temperature of 12 degrees Celsius.\nThis response demonstrates how the AI model incorporates the tool's output into its reply, providing a more contextual and data-driven answer.\n\n\nIterate and refine\nFeel free to experiment with different queries and simulated tool responses. This iterative process helps you fine-tune your prompt and understand how the AI model interacts with the tool, ultimately leading to more effective and accurate responses in your application.\nSave your Prompt\nBy saving your prompt, you're creating a new version that includes the tool configuration.\nCongratulations! You've successfully learned how to use tool calling in the Humanloop editor. This powerful feature allows you to simulate and test tool interactions, helping you create more dynamic and context-aware AI applications.\nKeep experimenting with different scenarios and tool responses to fully explore the capabilities of your AI model and create even more impressive applications!", "code_snippets": [ { @@ -3094,15 +3076,15 @@ ], "hierarchy": { "h2": { - "id": "create-and-use-a-tool-in-the-prompt-editor-", - "title": "Create and use a tool in the Prompt Editor " + "id": "create-and-use-a-tool-in-the-prompt-editor", + "title": "Create and use a tool in the Prompt Editor" } }, "level": "h2", "level_title": "Create and use a tool in the Prompt Editor" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.tool-calling-editor-next-steps-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.tool-calling-editor-next-steps", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/tool-calling-editor", @@ -3129,12 +3111,12 @@ ], "authed": false, "type": "markdown", - "hash": "#next-steps-", + "hash": "#next-steps", "content": "After you've created and tested your tool configuration, you might want to reuse it across multiple prompts. Humanloop allows you to link a tool, making it easier to share and manage tool configurations.\nFor more detailed instructions on how to link and manage tools, check out our guide on Linking a JSON Schema Tool.", "hierarchy": { "h2": { - "id": "next-steps-", - "title": "Next steps " + "id": "next-steps", + "title": "Next steps" } }, "level": "h2", @@ -3169,11 +3151,10 @@ "authed": false, "type": "markdown", "description": "Learn how to use the Snippet tool to manage common text snippets that you want to reuse across your different prompts.\nHow to re-use common text snippets in your Prompt templates with the Snippet Tool", - "content": "The Snippet Tool supports managing common text 'snippets' that you want to reuse across your different prompts. A Snippet tool acts as a simple key/value store, where the key is the name of the common re-usable text snippet and the value is the corresponding text.\nFor example, you may have some common persona descriptions that you found to be effective across a range of your LLM features. Or maybe you have some specific formatting instructions that you find yourself re-using again and again in your prompts.\nInstead of needing to copy and paste between your editor sessions and keep track of which projects you edited, you can instead inject the text into your prompt using the Snippet tool.", - "code_snippets": [] + "content": "The Snippet Tool supports managing common text 'snippets' that you want to reuse across your different prompts. A Snippet tool acts as a simple key/value store, where the key is the name of the common re-usable text snippet and the value is the corresponding text.\nFor example, you may have some common persona descriptions that you found to be effective across a range of your LLM features. Or maybe you have some specific formatting instructions that you find yourself re-using again and again in your prompts.\nInstead of needing to copy and paste between your editor sessions and keep track of which projects you edited, you can instead inject the text into your prompt using the Snippet tool." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.reusable-snippets-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.reusable-snippets-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/reusable-snippets", @@ -3200,16 +3181,16 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\n\n\n\nThis feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan\nTo create and use a snippet tool, follow the following steps:\n\n\nCreate a new Snippet Tool\n\n\nName the Tool\nName it assistant-personalities and give it a description Useful assistant personalities.\nAdd a key called \"helpful-assistant\"\nIn the initial box add helpful-assistant and give it a value of You are a helpful assistant. You like to tell jokes and if anyone asks your name is Sam.\nAdd another key called \"grumpy-assistant\"\nLet's add another key-value pair, so press the Add a key/value pair button and add a new key of grumpy-assistant and give it a value of You are a grumpy assistant. You rarely try to help people and if anyone asks your name is Freddy..\n\n\nPress Create Tool.\nNow your Snippets are set up, you can use it to populate strings in your prompt templates across your projects.\nNavigate to the Editor\nGo to the Editor of your previously created project.\nAdd {{ assistant-personalities(key) }} to your prompt\nDelete the existing prompt template and add {{ assistant-personalities(key) }} to your prompt.\n\n\nDouble curly bracket syntax is used to call a tool in the editor. Inside the curly brackets you put the tool name, e.g. {{ my-tool-name(key) }}.\nEnter the key as an input\nIn the input area set the value to helpful-assistant. The tool requires an input value to be provided for the key. When adding the tool an inputs field will appear in the top right of the editor where you can specify your key.\nPress the Run button\nStart the chat with the LLM and you can see the response of the LLM, as well as, see the key you previously defined add in the Chat on the right.\n\n\nChange the key to grumpy-assistant.\n\n\nIf you want to see the corresponding snippet to the key you either need to\nfirst run the conversation to fetch the string and see it in the preview.\nPlay with the LLM\nAsk the LLM, I'm a customer and need help solving this issue. Can you help?'. You should see a grumpy response from \"Freddy\" now.\nIf you have a specific key you would like to hardcode in the prompt, you can define it using the literal key value: {{ (\"key\") }}, so in this case it would be {{ assistant-personalities(\"grumpy-assistant\") }}. Delete the grumpy-assistant field and add it into your chat template.\nSave your Prompt.\nIf you're happy with you're grumpy assistant, save this new version of your Prompt.\n\n\nThe Snippet tool is particularly useful because you can define passages of text once in a Snippet tool and reuse them across multiple prompts, without needing to copy/paste them and manually keep them all in sync. Editing the values in your tool allows the changes to automatically propagate to the Prompts when you update them, as long as the key is the same.\n\n\nSince the values for a Snippet are saved on the Tool, not the Prompt, changing\nthe values (or keys) defined in your Snippet tools can affect the Prompt's\nbehaviour in way that won't be captured by the Prompt's version.\nThis could be exactly what you intend, however caution should still be used make sure the\nchanges are expected.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", @@ -3244,11 +3225,10 @@ "authed": false, "type": "markdown", "description": "Environments are a tagging system for deploying Prompts. They enable you to deploy maintain a streamlined deployment workflow and keep track of different versions of Prompts.\nHow to create and use environments to manage the deployment lifecycle of Prompts", - "content": "Environments are a tagging system for deploying Prompts. They enable you to deploy maintain a streamlined deployment workflow and keep track of different versions of Prompts.\nThe default environment is your production environment. Everytime you fetch a Prompt, Tool, Dataset etc. without specifying an alternative environment or specific version, the version that is tagged with the default environment is returned.", - "code_snippets": [] + "content": "Environments are a tagging system for deploying Prompts. They enable you to deploy maintain a streamlined deployment workflow and keep track of different versions of Prompts.\nThe default environment is your production environment. Everytime you fetch a Prompt, Tool, Dataset etc. without specifying an alternative environment or specific version, the version that is tagged with the default environment is returned." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-deployment-environments-create-an-environment-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-deployment-environments-create-an-environment", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/create-deployment-environments", @@ -3275,19 +3255,19 @@ ], "authed": false, "type": "markdown", - "hash": "#create-an-environment-", + "hash": "#create-an-environment", "content": "Go to your Environments tab in your Organization's settings.\nClick the '+ Environment' button to open the new environment dialog\nAssign a custom name to the environment\nWe recommend something short. For example, you could use staging, prod, qa, dev, testing, etc. This name is be used to identify the environment in the UI and in the API.\nClick Create.", "hierarchy": { "h2": { - "id": "create-an-environment-", - "title": "Create an environment " + "id": "create-an-environment", + "title": "Create an environment" } }, "level": "h2", "level_title": "Create an environment" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-deployment-environments-updating-the-default-environment-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-deployment-environments-updating-the-default-environment", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/create-deployment-environments", @@ -3314,19 +3294,19 @@ ], "authed": false, "type": "markdown", - "hash": "#updating-the-default-environment-", + "hash": "#updating-the-default-environment", "content": "Only Enterprise customers can update their default environment", "hierarchy": { "h2": { - "id": "updating-the-default-environment-", - "title": "Updating the default environment " + "id": "updating-the-default-environment", + "title": "Updating the default environment" } }, "level": "h2", "level_title": "Updating the default environment" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-deployment-environments-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-deployment-environments-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/create-deployment-environments", @@ -3353,16 +3333,16 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You have multiple environments - if not first go through the Create an\nenvironment section.\n\n\nEvery organization will have a default environment. This can be updated by the following:\n\n\nGo to your Organization's Environments page.\nClick on the dropdown menu of an environment that is not already the default.\nClick the Make default option\nA dialog will open asking you if you are certain this is a change you want to make. If so, click the Make default button.\nVerify the default tag has moved to the environment you selected.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", @@ -3397,11 +3377,10 @@ "authed": false, "type": "markdown", "description": "Environments enable you to deploy model configurations and experiments, making them accessible via API, while also maintaining a streamlined production workflow.\nIn this guide we will demonstrate how to create and use environments.", - "content": "Environments are a tagging system for deploying Prompts. They enable you to deploy maintain a streamlined deployment workflow and keep track of different versions of Prompts.", - "code_snippets": [] + "content": "Environments are a tagging system for deploying Prompts. They enable you to deploy maintain a streamlined deployment workflow and keep track of different versions of Prompts." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.deploy-to-environment-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.deploy-to-environment-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/deploy-to-environment", @@ -3428,12 +3407,12 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\n\nTo deploy a model config to an environment:\n\n\nNavigate to the Dashboard of your Prompt\nClick the dropdown menu of the environment.\n\n\nClick the Change deployment button\nSelect a version\nChoose the version you want to deploy from the list of available versions.\n\n\nClick the Deploy button.", "hierarchy": { "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", @@ -3468,11 +3447,10 @@ "authed": false, "type": "markdown", "description": "Directories can be used to group together related files. This is useful for organizing your work.\nDirectories group together related files", - "content": "This guide will show you how to create a Directory in the UI. A directory is a collection of files and other directories.\n\n\nPrerequisite: A Humanloop account.\nYou can create an account now by going to the Sign up page.", - "code_snippets": [] + "content": "This guide will show you how to create a Directory in the UI. A directory is a collection of files and other directories.\n\n\nPrerequisite: A Humanloop account.\nYou can create an account now by going to the Sign up page." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-directory-create-a-directory-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.create-directory-create-a-directory", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/create-directory", @@ -3499,12 +3477,12 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-directory-", + "hash": "#create-a-directory", "content": "Create a Directory\nOpen Humanloop and navigate to the File navigation on the left.\n\nClick '+ New' and select Directory.\n\nName your new directory, for example, \"Summarization App\".\n\n\n\n\nYou can call files and directories anything you want. Capital letters, spaces\nare all ok!\n\n\n(Optional) Move a File into the Directory\nIn the File navigation sidebar, right-click on the file in the sidebar and select \"Move\" from the context menu\n\nChoose the destination directory\n\n\n\n\nYou have now successfully created a directory and moved a file into it. This organization can help you manage your AI applications more efficiently within Humanloop.", "hierarchy": { "h2": { - "id": "create-a-directory-", - "title": "Create a Directory " + "id": "create-a-directory", + "title": "Create a Directory" } }, "level": "h2", @@ -3539,11 +3517,10 @@ "authed": false, "type": "markdown", "description": "Learn how to create a JSON Schema tool that can be reused across multiple Prompts.\nManaging and versioning a Tool seperately from your Prompts", - "content": "It's possible to re-use tool definitions them across multiple Prompts. You achieve this by having a Prompt file which defines a JSON schema, and linking them to your Prompt.\nYou achieve this by creating a JSON Schema Tool and linking that to as many Prompts as you need.\nImportantly, updates to this Tool defined here will then propagate automatically to all the Prompts you've linked it to, without having to deploy new versions of the Prompt.", - "code_snippets": [] + "content": "It's possible to re-use tool definitions them across multiple Prompts. You achieve this by having a Prompt file which defines a JSON schema, and linking them to your Prompt.\nYou achieve this by creating a JSON Schema Tool and linking that to as many Prompts as you need.\nImportantly, updates to this Tool defined here will then propagate automatically to all the Prompts you've linked it to, without having to deploy new versions of the Prompt." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.link-tool-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.link-tool-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/link-tool", @@ -3570,19 +3547,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.link-tool-creating-and-linking-a-json-schema-tool-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.link-tool-creating-and-linking-a-json-schema-tool", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/link-tool", @@ -3609,7 +3586,7 @@ ], "authed": false, "type": "markdown", - "hash": "#creating-and-linking-a-json-schema-tool-", + "hash": "#creating-and-linking-a-json-schema-tool", "content": "To create a reusable JSON Schema tool for your organization, follow these steps:\n\n\nCreate a new Tool file\nNavigate to the homepage or sidebar and click the 'New File' button.\nChoose the JSON Schema Tool type\nFrom the available options, select Json Schema as the Tool type.\nDefine your tool's structure\nPaste the following JSON into the provided dialog to define your tool's structure:\nIf you choose to edit or create your own tool, you'll need to use the universal JSON Schema syntax. When creating a custom tool, it should correspond to a function you have defined in your own code. The JSON Schema you define here specifies the parameters and structure you want the AI model to use when interacting with your function.\nCommit this version of the Tool\nPress the Commit button to commit this version of the Tool, and set it as the default version by deploying it.\nNavigate to the Editor of a Prompt\nSwitch to a model that supports tool calling, such as gpt-4o.\n\n\nTo view the list of models that support Tool calling, see the Models\npage.\nAdd Tool to the Prompt definition.\nSelect 'Link existing Tool'\nIn the dropdown, go to the Link existing tool option. You should see your get_current_weather tool, click on it to link it to your editor.\n\n\nTest that the Prompt is working with the tool\nNow that your Tool is linked you can start using it. In the Chat section, in the User input, enter \"what is the weather in london?\"\nPress the Run button.\nYou should see the Assistant respond with the tool response and a new Tool field inserted to allow you to insert an answer. In this case, put in 22 into the tool response and press Run.\n\n\nThe model will respond with The current weather in London is 22 degrees.\nCommit the Prompt\nYou've linked a Tool to your Prompt, now let's save it. Press the Save button and name your Prompt weather-model-config.\n(Optional) Update the Tool\nNow that's we've linked your get_current_weather tool to your Prompt, let's try updating the base tool and see how it propagates the changes down into your saved weather-model-config config. Navigate back to the Tool in the sidebar and go to the Editor.\nUpdate the Tool\nLet's update both the name, as well as the required fields. For the name, update it to get_current_weather_updated and for the required fields, add unit as a required field. The should look like this now:\nCommit and deploy the Tool\nPress the Commmmit button and then follow the steps to deloy this version of the Tool.\nYour Tool is now updated.\nTry the Prompt again\nNavigate back to your previous project, and open the editor. You should see the weather-model-config loaded as the active config. You should also be able to see the name of your previously linked tool in the Tools section now says get_current_weather_updated.\nIn the Chat section enter in again, What is the weather in london?, and press Run again.\nCheck the response\nYou should see the updated tool response, and how it now contains the unit field. Congratulations, you've successfully linked a JSON Schema tool to your Prompt.\n\n\n\n\nWhen updating your Tool, remember that the change will affect all the Prompts\nthat link to it. Be careful when making updates to not inadvertently change\nsomething you didn't intend.", "code_snippets": [ { @@ -3631,8 +3608,8 @@ ], "hierarchy": { "h2": { - "id": "creating-and-linking-a-json-schema-tool-", - "title": "Creating and linking a JSON Schema Tool " + "id": "creating-and-linking-a-json-schema-tool", + "title": "Creating and linking a JSON Schema Tool" } }, "level": "h2", @@ -3667,11 +3644,10 @@ "authed": false, "type": "markdown", "description": "Learn how to create a JSON Schema tool that can be reused across multiple Prompts.\nManaging and versioning a Tool seperately from your Prompts", - "content": "It's possible to re-use tool definitions them across multiple Prompts. You achieve this by having a Prompt file which defines a JSON schema, and linking them to your Prompt.\nYou can achieve this by first defining an instance of a JSON Schema tool in your global Tools tab. Here you can define a tool once, such as get_current_weather(location: string, unit: 'celsius' | 'fahrenheit'), and then link that to as many model configs as you need within the Editor as shown below.\nImportantly, updates to the get_current_weather JSON Schema tool defined here will then propagate automatically to all the model configs you've linked it to, without having to publish new versions of the prompt.", - "code_snippets": [] + "content": "It's possible to re-use tool definitions them across multiple Prompts. You achieve this by having a Prompt file which defines a JSON schema, and linking them to your Prompt.\nYou can achieve this by first defining an instance of a JSON Schema tool in your global Tools tab. Here you can define a tool once, such as get_current_weather(location: string, unit: 'celsius' | 'fahrenheit'), and then link that to as many model configs as you need within the Editor as shown below.\nImportantly, updates to the get_current_weather JSON Schema tool defined here will then propagate automatically to all the model configs you've linked it to, without having to publish new versions of the prompt." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.link-json-schema-tool-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.link-json-schema-tool-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/link-json-schema-tool", @@ -3698,19 +3674,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "A Humanloop account - you can create one by going to our sign up page.\n\nBe on a paid plan - your organization has been upgraded from the Free tier.\n\nYou already have a Prompt — if not, please follow our Prompt creation guide first.\n\n\nTo create a JSON Schema tool that can be reusable across your organization, follow the following steps:", "hierarchy": { "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.link-json-schema-tool-creating-and-linking-a-json-schema-tool-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.development.guides.link-json-schema-tool-creating-and-linking-a-json-schema-tool", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/development/guides/link-json-schema-tool", @@ -3737,7 +3713,7 @@ ], "authed": false, "type": "markdown", - "hash": "#creating-and-linking-a-json-schema-tool-", + "hash": "#creating-and-linking-a-json-schema-tool", "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan\n\n\nCreate a Tool file\nClick the 'New File' button on the homepage or in the sidebar.\nSelect the Json Schema Tool type\nDefine your tool\nSet the name, description, and parameters values. Our guide for using Tool Calling in the Prompt Editor can be a useful reference in this case. We can use the get_current_weather schema in this case. Paste the following into the dialog:\nPress the Create button.\nNavigate to the Editor\nMake sure you are using a model that supports tool calling, such as gpt-4o.\n\n\nSee the Models page for a list of models that support tool calling.\nAdd Tool to the Prompt definition.\nSelect 'Link existing Tool'\nIn the dropdown, go to the Link existing tool option. You should see your get_current_weather tool, click on it to link it to your editor.\n\n\nTest that the Prompt is working with the tool\nNow that your tool is linked you can start using it as you would normally use an inline tool. In the Chat section, in the User input, enter \"What is the weather in london?\"\nPress the Run button.\nYou should see the Assistant respond with the tool response and a new Tool field inserted to allow you to insert an answer. In this case, put in 22 into the tool response and press Run.\n\n\nThe model will respond with The current weather in London is 22 degrees.\nSave the Prompt\nYou've linked a tool to your model config, now let's save it. Press the Save button and name your model config weather-model-config.\n(Optional) Update the Tool\nNow that's we've linked your get_current_weather tool to your model config, let's try updating the base tool and see how it propagates the changes down into your saved weather-model-config config. Navigate back to the Tools in the sidebar and go to the Editor.\nChange the tool.\nLet's update both the name, as well as the required fields. For the name, update it to get_current_weather_updated and for the required fields, add unit as a required field. The should look like this now:\nSave the Tool\nPress the Save button, then the following Continue button to confirm.\nYour tool is now updated.\nTry the Prompt again\nNavigate back to your previous project, and open the editor. You should see the weather-model-config loaded as the active config. You should also be able to see the name of your previously linked tool in the Tools section now says get_current_weather_updated.\nIn the Chat section enter in again, What is the weather in london?, and press Run again.\nCheck the response\nYou should see the updated tool response, and how it now contains the unit field. Congratulations, you've successfully linked a JSON Schema tool to your model config.\n\n\n\n\nWhen updating your organization-level JSON Schema tools, remember that the\nchange will affect all the places you've previously linked the tool. Be\ncareful when making updates to not inadvertently change something you didn't\nintend.", "code_snippets": [ { @@ -3759,8 +3735,8 @@ ], "hierarchy": { "h2": { - "id": "creating-and-linking-a-json-schema-tool-", - "title": "Creating and linking a JSON Schema Tool " + "id": "creating-and-linking-a-json-schema-tool", + "title": "Creating and linking a JSON Schema Tool" } }, "level": "h2", @@ -3791,11 +3767,10 @@ "authed": false, "type": "markdown", "description": "Learn how to set up and use Humanloop's evaluation framework to test and track the performance of your AI apps.\nHumanloop's evaluation framework allows you to test and track the performance of your LLM apps in a rigorous way.", - "content": "A key part of successful prompt engineering and deployment for LLMs is a robust evaluation framework. In this section we provide guides for how to set up Humanloop's evaluation framework for your Prompts and Tools.\nThe core entity in the Humanloop evaluation framework is an Evaluator - a function you define which takes an LLM-generated log as an argument and returns a judgment.\nThe judgment is typically either a boolean or a number, indicating how well the model performed according to criteria you determine based on your use case.", - "code_snippets": [] + "content": "A key part of successful prompt engineering and deployment for LLMs is a robust evaluation framework. In this section we provide guides for how to set up Humanloop's evaluation framework for your Prompts and Tools.\nThe core entity in the Humanloop evaluation framework is an Evaluator - a function you define which takes an LLM-generated log as an argument and returns a judgment.\nThe judgment is typically either a boolean or a number, indicating how well the model performed according to criteria you determine based on your use case." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.overview-sources-of-judgement-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.overview-sources-of-judgement", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/overview", @@ -3818,19 +3793,19 @@ ], "authed": false, "type": "markdown", - "hash": "#sources-of-judgement-", + "hash": "#sources-of-judgement", "content": "Currently, you can define three different Evaluator sources on Humanloop:\nCode - using simple deterministic rules based judgments against attributes like cost, token usage, latency, regex rules on the output, etc. These are generally fast and cheap to run at scale.\n\nAI - using other foundation models to provide judgments on the output. This allows for more qualitative and nuanced judgments for a fraction of the cost of human judgments.\n\nHuman - getting gold standard judgments from either end users of your application, or internal domain experts. This can be the most expensive and slowest option, but also the most reliable.", "hierarchy": { "h2": { - "id": "sources-of-judgement-", - "title": "Sources of Judgement " + "id": "sources-of-judgement", + "title": "Sources of Judgement" } }, "level": "h2", "level_title": "Sources of Judgement" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.overview-online-monitoring-vs-offline-evaluation-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.overview-online-monitoring-vs-offline-evaluation", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/overview", @@ -3853,19 +3828,19 @@ ], "authed": false, "type": "markdown", - "hash": "#online-monitoring-vs-offline-evaluation-", + "hash": "#online-monitoring-vs-offline-evaluation", "content": "Evaluators can be deployed on Humanloop to support both testing new versions of your Prompts and Tools during development and for monitoring live apps that are already in production.", "hierarchy": { "h2": { - "id": "online-monitoring-vs-offline-evaluation-", - "title": "Online Monitoring vs. Offline Evaluation " + "id": "online-monitoring-vs-offline-evaluation", + "title": "Online Monitoring vs. Offline Evaluation" } }, "level": "h2", "level_title": "Online Monitoring vs. Offline Evaluation" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.overview-online-monitoring-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.overview-online-monitoring", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/overview", @@ -3888,23 +3863,23 @@ ], "authed": false, "type": "markdown", - "hash": "#online-monitoring-", + "hash": "#online-monitoring", "content": "Evaluators are run against the Logs generated by your AI applications. Typically, they are used to monitor deployed model performance over time and check for drift or degradation in performance.\nThe Evaluator in this case only takes a single argument - the log generated by the model. The Evaluator is expected to return a judgment based on the Log,\nwhich can be used to trigger alerts or other actions in your monitoring system.\nSee our Monitoring guides for more details.", "hierarchy": { "h2": { - "id": "online-monitoring-", - "title": "Online Monitoring " + "id": "online-monitoring", + "title": "Online Monitoring" }, "h3": { - "id": "online-monitoring-", - "title": "Online Monitoring " + "id": "online-monitoring", + "title": "Online Monitoring" } }, "level": "h3", "level_title": "Online Monitoring" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.overview-offline-evaluations-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.overview-offline-evaluations", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/overview", @@ -3927,23 +3902,23 @@ ], "authed": false, "type": "markdown", - "hash": "#offline-evaluations-", + "hash": "#offline-evaluations", "content": "Offline Evaluators are combined with predefined Datasets in order to evaluate your application as you iterate in your prompt engineering workflow, or to test for regressions in a CI environment.\nA test Dataset is a collection of Datapoints, which are roughly analogous to unit tests or test cases in traditional programming. Each datapoint specifies inputs to your model and (optionally) some target data.\nWhen you run an offline evaluation, a Log needs to be generated using the inputs of each Datapoint and the version of the application being evaluated. Evaluators then need to be run against each Log to provide judgements,\nwhich are then aggregated to provide an overall score for the application. Evaluators in this case take the generated Log and the testcase datapoint that gave rise to it as arguments.\nSee our guides on creating Datasets and running Evaluations for more details.", "hierarchy": { "h2": { - "id": "offline-evaluations-", - "title": "Offline Evaluations " + "id": "offline-evaluations", + "title": "Offline Evaluations" }, "h3": { - "id": "offline-evaluations-", - "title": "Offline Evaluations " + "id": "offline-evaluations", + "title": "Offline Evaluations" } }, "level": "h3", "level_title": "Offline Evaluations" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.overview-humanloop-runtime-vs-your-runtime-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.overview-humanloop-runtime-vs-your-runtime", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/overview", @@ -3966,19 +3941,19 @@ ], "authed": false, "type": "markdown", - "hash": "#humanloop-runtime-vs-your-runtime-", + "hash": "#humanloop-runtime-vs-your-runtime", "content": "Evaluations require the following to be generated:\nLogs for the datapoints.\n\nEvaluator results for those generated logs.\n\n\nUsing the Evaluations API, Humanloop offers the ability to generate logs either within the Humanloop runtime, or within your own runtime.\nSimilarly, Evaluators which are defined within the Humanloop UI can be executed in the Humanloop runtime, whereas Evaluators defined in your code can be executed in your runtime and the results posted back to Humanloop.\nThis provides flexibility for supporting more complex evaluation workflows.", "hierarchy": { "h2": { - "id": "humanloop-runtime-vs-your-runtime-", - "title": "Humanloop runtime vs. your runtime " + "id": "humanloop-runtime-vs-your-runtime", + "title": "Humanloop runtime vs. your runtime" } }, "level": "h2", "level_title": "Humanloop runtime vs. your runtime" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.overview-cicd-integration-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.overview-cicd-integration", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/overview", @@ -4001,12 +3976,12 @@ ], "authed": false, "type": "markdown", - "hash": "#cicd-integration-", + "hash": "#cicd-integration", "content": "Humanloop's evaluation framework can be integrated into your CI/CD pipeline, allowing you to automatically test your AI applications as part of your development workflow. This integration enables you to catch potential regressions or performance issues before they make it to production.\nOne powerful way to leverage this integration is by triggering evaluation runs in GitHub Actions and having the results commented directly on your Pull Requests. This provides immediate feedback to developers and reviewers about the impact of changes on your AI application's performance.\nTo set up CI/CD evaluation follow the guide on CI/CD Integration.", "hierarchy": { "h2": { - "id": "cicd-integration-", - "title": "CI/CD Integration " + "id": "cicd-integration", + "title": "CI/CD Integration" } }, "level": "h2", @@ -4041,11 +4016,10 @@ "authed": false, "type": "markdown", "description": "In this guide, we will walk through comparing the outputs from multiple Prompts side-by-side using the Humanloop Editor environment and using diffs to help debugging.", - "content": "You can compare Prompt versions interactively side-by-side to get a sense for how their behaviour differs; before then triggering more systematic Evaluations.\nAll the interactions in Editor are stored as Logs within your Prompt and can be inspected further and added to a Dataset for Evaluations.", - "code_snippets": [] + "content": "You can compare Prompt versions interactively side-by-side to get a sense for how their behaviour differs; before then triggering more systematic Evaluations.\nAll the interactions in Editor are stored as Logs within your Prompt and can be inspected further and added to a Dataset for Evaluations." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.comparing-prompt-editor-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.comparing-prompt-editor-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/comparing-prompt-editor", @@ -4072,19 +4046,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.comparing-prompt-editor-compare-prompt-versions-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.comparing-prompt-editor-compare-prompt-versions", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/comparing-prompt-editor", @@ -4111,7 +4085,7 @@ ], "authed": false, "type": "markdown", - "hash": "#compare-prompt-versions-", + "hash": "#compare-prompt-versions", "content": "In this example we will use a simple Support Agent Prompt that answers user queries about Humanloop's product and docs.\n\n\n\n\nCreate a new version of your Prompt\nOpen your Prompt in the Editor and expand Parameters and change some details such as the choice of Model.\nIn this example, we change from gpt-4o to gpt-4o-mini.\nThis will create a new uncommitted version of the Prompt.\n\n\nNow commit the new version of your Prompt by selecting the blue Commit button over Parameters and providing a helpful commit message like:\nLoad up two versions of your Prompt in the Editor\nTo load up the previous version side-by-side, select the menu beside the Load button and select the New panel option (depending on your screen real-estate, you can add more than 2 panels).\n\n\nThen select to Load button in the new panel and select another version of your Prompt to compare.\n\n\nCompare the outputs of both versions\nNow you can run the same user messages through both models to compare their behaviours live side-by-side.", "code_snippets": [ { @@ -4125,15 +4099,15 @@ ], "hierarchy": { "h2": { - "id": "compare-prompt-versions-", - "title": "Compare Prompt versions " + "id": "compare-prompt-versions", + "title": "Compare Prompt versions" } }, "level": "h2", "level_title": "Compare Prompt versions" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.comparing-prompt-editor-view-prompt-diff-for-debugging-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.comparing-prompt-editor-view-prompt-diff-for-debugging", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/comparing-prompt-editor", @@ -4160,12 +4134,12 @@ ], "authed": false, "type": "markdown", - "hash": "#view-prompt-diff-for-debugging-", + "hash": "#view-prompt-diff-for-debugging", "content": "When debugging more complex Prompts, it's important to understand what changes were made between different versions. Humanloop provides a diff view to support this.\n\n\nNavigate to your Prompt dashboard\nIn the sidebar, select the Dashboard section under your Prompt file, where you will find a table of all your historic Prompt versions.\n\n\nSelect the versions to compare\nIn the table, select two rows you would like understand the changes between. Then select the Compare Versions button above the table.\n\n\nWhile in the Compare tab, look for the Diff section.\n\nThis section will highlight the changes made between the selected versions, showing additions, deletions, and modifications.\n\nUse this diff view to understand how specific changes in your prompt configuration affect the output.\n\n\nBy following these steps, you can effectively compare different versions of your Prompts and iterate on your instructions to improve performance.", "hierarchy": { "h2": { - "id": "view-prompt-diff-for-debugging-", - "title": "View Prompt diff for debugging " + "id": "view-prompt-diff-for-debugging", + "title": "View Prompt diff for debugging" } }, "level": "h2", @@ -4200,11 +4174,10 @@ "authed": false, "type": "markdown", "description": "Learn how to create Datasets in Humanloop to define fixed examples for your projects, and build up a collection of input-output pairs for evaluation and fine-tuning.\nIn this guide, we will walk through the different ways to create Datasets on Humanloop.", - "content": "Datasets are a collection of input-output pairs that can be used to evaluate your Prompts, Tools or even Evaluators.\nThis guide will show you how to create Datasets in Humanloop in three different ways:\nCreate a Dataset from existing Logs - useful for curating Datasets based on how your AI application has been behaving in the wild.\n\nUpload data from CSV - useful for quickly uploading existing tabular data you've collected outside of Humanloop.\n\nUpload via API - useful for uploading more complex Datasets that may have nested JSON structures, which are difficult to represent in tabular .CSV format, and for integrating with your existing data pipelines.", - "code_snippets": [] + "content": "Datasets are a collection of input-output pairs that can be used to evaluate your Prompts, Tools or even Evaluators.\nThis guide will show you how to create Datasets in Humanloop in three different ways:\nCreate a Dataset from existing Logs - useful for curating Datasets based on how your AI application has been behaving in the wild.\n\nUpload data from CSV - useful for quickly uploading existing tabular data you've collected outside of Humanloop.\n\nUpload via API - useful for uploading more complex Datasets that may have nested JSON structures, which are difficult to represent in tabular .CSV format, and for integrating with your existing data pipelines." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.create-dataset-create-a-dataset-from-logs-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.create-dataset-create-a-dataset-from-logs", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/create-dataset", @@ -4231,19 +4204,19 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-dataset-from-logs-", + "hash": "#create-a-dataset-from-logs", "content": "Prerequisites\nYou should have an existing Prompt on Humanloop and already generated some Logs.\nFollow our guide on creating a Prompt.\nSteps\nTo create a Dataset from existing Logs:\n\n\nNavigate to the Logs of your Prompt\nOur Prompt in this example is a Support Agent that answers user queries about Humanloop's product and docs:\n\n\nSelect a subset of the Logs to add\nFilter logs on a criteria of interest, such as the version of the Prompt used, then multi-select Logs.\nIn the menu in the top right of the page, select Add to dataset.\n\n\nAdd to a new Dataset\nProvide a name of the new Dataset and click Create (or you can click add to existing Dataset to append the selection to an existing Dataset).\nThen provide a suitable commit message describing the datapoints you've added.\n\n\nYou will then see the new Dataset appear at the same level in the filesystem as your Prompt.", "hierarchy": { "h2": { - "id": "create-a-dataset-from-logs-", - "title": "Create a Dataset from Logs " + "id": "create-a-dataset-from-logs", + "title": "Create a Dataset from Logs" } }, "level": "h2", "level_title": "Create a Dataset from Logs" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.create-dataset-upload-a-dataset-from-csv-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.create-dataset-upload-a-dataset-from-csv", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/create-dataset", @@ -4270,19 +4243,19 @@ ], "authed": false, "type": "markdown", - "hash": "#upload-a-dataset-from-csv-", + "hash": "#upload-a-dataset-from-csv", "content": "Prerequisites\nYou should have an existing Prompt on Humanloop with a variable defined with our double curly bracket syntax {{variable}}. If not, first follow our guide on creating a Prompt.\nIn this example, we'll use a Prompt that categorises user queries about Humanloop's product and docs by which feature they relate to.\n\n\nSteps\nTo create a dataset from a CSV file, we'll first create a CSV in Google Sheets that contains values for our Prompt variable {{query}} and then upload it to a Dataset on Humanloop.\n\n\nCreate a CSV file.\nIn our Google Sheets example below, we have a column called query which contains possible values for our Prompt variable {{query}}. You can include as many columns as you have variables in your Prompt template.\n\nThere is additionally a column called target which will populate the target output for the classifier Prompt. In this case, we use simple strings to define the target.\n\nMore complex Datapoints that contain messages and structured objects for targets are suppoerted, but are harder to incorporate into a CSV file as they tend to be hard-to-read JSON. If you need more complex Datapoints, use the API instead.\n\n\n\n\nExport the Google Sheet to CSV\nIn Google sheets, choose File → Download → Comma-separated values (.csv)\nCreate a new Dataset File\nOn Humanloop, select New at the bottom of the left hand sidebar, then select Dataset.\n\n\nClick Upload CSV\nFirst name your dataset when prompted in the sidebar, then select the Upload CSV button and drag and drop the CSV file you created above using the file explorer.\nYou will then be prompted to provide a commit message to describe the initial state of the dataset.\n\n\nFollow the link in the pop-up to inspect the Dataset created\nYou'll see the input-output pairs that were included in the CSV file and you can the rows to inspect and edit the individual Datapoints.", "hierarchy": { "h2": { - "id": "upload-a-dataset-from-csv-", - "title": "Upload a Dataset from CSV " + "id": "upload-a-dataset-from-csv", + "title": "Upload a Dataset from CSV" } }, "level": "h2", "level_title": "Upload a Dataset from CSV" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.create-dataset-upload-a-dataset-via-api-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.create-dataset-upload-a-dataset-via-api", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/create-dataset", @@ -4309,7 +4282,7 @@ ], "authed": false, "type": "markdown", - "hash": "#upload-a-dataset-via-api-", + "hash": "#upload-a-dataset-via-api", "content": "Prerequisites\nIf you are using the SDK, the only prerequisite is to have the SDK installed and configured. If you are using the API directly, you will need to have an API key.\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)\n\n\nSteps\nUsing the API is a great way to integrate Humanloop with your existing data pipeline or just to once-off upload a more complex Dataset that is hard to represent in a CSV file, such as one that contains an array of messages and JSON targets.\n\n\nPost data to the Datasets API\nWe first define some sample data that contains user messages and desired responses from our Support Agent Prompt and call the POST /datasets endpoint to upload it as follows:\n\n\nInspect the uploaded Dataset\nAfter running this code, in your Humanloop workspace you will now see a Dataset called Support Query Ground Truth (or whatever value was in path) with your sample data.", "code_snippets": [ { @@ -4335,15 +4308,15 @@ ], "hierarchy": { "h2": { - "id": "upload-a-dataset-via-api-", - "title": "Upload a Dataset via API " + "id": "upload-a-dataset-via-api", + "title": "Upload a Dataset via API" } }, "level": "h2", "level_title": "Upload a Dataset via API" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.create-dataset-next-steps-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.create-dataset-next-steps", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/create-dataset", @@ -4370,12 +4343,12 @@ ], "authed": false, "type": "markdown", - "hash": "#next-steps-", + "hash": "#next-steps", "content": "🎉 Now that you have Datasets defined in Humanloop, you can leverage our Evaluations feature to systematically measure and improve the performance of your AI applications.\nSee our guides on setting up Evaluators and Running an Evaluation to get started.", "hierarchy": { "h1": { - "id": "next-steps-", - "title": "Next steps " + "id": "next-steps", + "title": "Next steps" } }, "level": "h1", @@ -4410,11 +4383,10 @@ "authed": false, "type": "markdown", "description": "Learn how to create a code Evaluators in Humanloop to assess the performance of your AI applications. This guide covers setting up an offline evaluator, writing evaluation logic, and using the debug console.\nIn this guide we will show how to create and use a code Evaluator in Humanloop", - "content": "A code Evaluator is a Python function that takes a generated Log (and optionally a testcase Datapoint if comparing to expected results) as input and returns a judgement.\nThe judgement is in the form of a boolean or number that measures some criteria of the generated Log defined within the code.\nCode Evaluators provide a flexible way to evaluate the performance of your AI applications, allowing you to re-use existing evaluation packages as well as define custom evaluation heuristics.\nWe support a fully featured Python environment; details on the supported packages can be found in the environment reference", - "code_snippets": [] + "content": "A code Evaluator is a Python function that takes a generated Log (and optionally a testcase Datapoint if comparing to expected results) as input and returns a judgement.\nThe judgement is in the form of a boolean or number that measures some criteria of the generated Log defined within the code.\nCode Evaluators provide a flexible way to evaluate the performance of your AI applications, allowing you to re-use existing evaluation packages as well as define custom evaluation heuristics.\nWe support a fully featured Python environment; details on the supported packages can be found in the environment reference" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.code-based-evaluator-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.code-based-evaluator-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/code-based-evaluator", @@ -4441,19 +4413,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You should have an existing Prompt to evaluate and already generated some Logs.\nFollow our guide on creating a Prompt.\nIn this example, we'll reference a Prompt that categorises a user query about Humanloop's product and docs by which feature it relates to.", "hierarchy": { "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.code-based-evaluator-create-a-code-evaluator-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.code-based-evaluator-create-a-code-evaluator", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/code-based-evaluator", @@ -4480,7 +4452,7 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-code-evaluator-", + "hash": "#create-a-code-evaluator", "content": "Create a new Evaluator\nClick the New button at the bottom of the left-hand sidebar, select Evaluator, then select Code.\n\n\n\n\nGive the Evaluator a name when prompted in the sidebar, for example Category Validator.\n\n\nDefine the Evaluator code\nAfter creating the Evaluator, you will automatically be taken to the code editor.\nFor this example, our Evaluator will check that the feature category returned by the Prompt is from the list of allowed feature categories. We want to ensure our categoriser isn't hallucinating new features.\nMake sure the Mode of the Evaluator is set to Online in the options on the left.\n\nCopy and paste the following code into the code editor:\n\n\n\n\nYou can define multiple functions in the code Editor to organize your\nevaluation logic. The final function defined is used as the main Evaluator\nentry point that takes the Log argument and returns a valid judgement.\nDebug the code with Prompt Logs\nIn the debug console beneath where you pasted the code, click Select Prompt or Dataset and find and select the Prompt you're evaluating.\nThe debug console will load a sample of Logs from that Prompt.\n\n\n\n\nClick the Run button at the far right of one of the loaded Logs to trigger a debug run. This causes the code to be executed with the selected Log as input and populates the Result column.\n\nInspect the output of the executed code by selecting the arrow to the right of Result.\n\n\n\n\nCommit the code\nNow that you've validated the behaviour, commit the code by selecting the Commit button at the top right of the Editor and provide a suitable commit message describing your changes.\nInspect Evaluator logs\nNavigate to the Logs tab of the Evaluator to see and debug all the historic usages of this Evaluator.", "code_snippets": [ { @@ -4496,15 +4468,15 @@ ], "hierarchy": { "h2": { - "id": "create-a-code-evaluator-", - "title": "Create a code Evaluator " + "id": "create-a-code-evaluator", + "title": "Create a code Evaluator" } }, "level": "h2", "level_title": "Create a code Evaluator" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.code-based-evaluator-monitor-a-prompt-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.code-based-evaluator-monitor-a-prompt", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/code-based-evaluator", @@ -4531,19 +4503,19 @@ ], "authed": false, "type": "markdown", - "hash": "#monitor-a-prompt-", + "hash": "#monitor-a-prompt", "content": "Now that you have an Evaluator, you can use it to monitor the performance of your Prompt by linking it so that it is automatically run on new Logs.\n\n\nLink the Evaluator to the Prompt\nNavigate to the Dashboard of your Prompt\n\nSelect the Monitoring button above the graph and select Connect Evaluators.\n\nFind and select the Evaluator you just created and click Chose.\n\n\n\n\n\n\nYou can link to a deployed version of the Evaluator by choosing the\nenvironment such as production, or you can link to a specific version of the\nEvaluator. If you want changes deployed to your Evaluator to be automatically\nreflected in Monitoring, link to the environment, otherwise link to a specific\nversion.\nThis linking results in: - An additional graph on your Prompt dashboard showing the Evaluator results over time. - An additional column in your Prompt Versions table showing the aggregated Evaluator results for each version. - An additional column in your Logs table showing the Evaluator results for each Log.\nGenerate new Logs\nNavigate to the Editor tab of your Prompt and generate a new Log by entering a query and clicking Run.\nInspect the Monitoring results\nNavigate to the Logs tab of your Prompt and see the result of the linked Evaluator against the new Log. You can filter on this value in order to create a Dataset of interesting examples.", "hierarchy": { "h2": { - "id": "monitor-a-prompt-", - "title": "Monitor a Prompt " + "id": "monitor-a-prompt", + "title": "Monitor a Prompt" } }, "level": "h2", "level_title": "Monitor a Prompt" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.code-based-evaluator-next-steps-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.code-based-evaluator-next-steps", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/code-based-evaluator", @@ -4570,12 +4542,12 @@ ], "authed": false, "type": "markdown", - "hash": "#next-steps-", + "hash": "#next-steps", "content": "Explore AI Evaluators and Human Evaluators to complement your code-based judgements for more qualitative and subjective criteria.\n\nCombine your Evaluator with a Dataset to run Evaluations to systematically compare the performance of different versions of your AI application.", "hierarchy": { "h2": { - "id": "next-steps-", - "title": "Next steps " + "id": "next-steps", + "title": "Next steps" } }, "level": "h2", @@ -4610,11 +4582,10 @@ "authed": false, "type": "markdown", "description": "Learn how to use LLM as a judge to check for PII in Logs.\nIn this guide, we will set up an LLM evaluator to check for PII (Personally Identifiable Information) in Logs.", - "content": "LLMs can be used for evaluating the quality and characteristics of other AI-generated outputs. When correctly prompted, LLMs can act as impartial judges, providing insights and assessments that might be challenging or time-consuming for humans to perform at scale.\nIn this guide, we'll explore how to setup an LLM as an AI Evaluator in Humanloop, demonstrating their effectiveness in assessing various aspects of AI-generated content, such as checking for the presence of Personally Identifiable Information (PII).\nAn AI Evaluator is a Prompt that takes attributes from a generated Log (and optionally from a testcase Datapoint if comparing to expected results) as context and returns a judgement.\nThe judgement is in the form of a boolean or number that measures some criteria of the generated Log defined within the Prompt instructions.", - "code_snippets": [] + "content": "LLMs can be used for evaluating the quality and characteristics of other AI-generated outputs. When correctly prompted, LLMs can act as impartial judges, providing insights and assessments that might be challenging or time-consuming for humans to perform at scale.\nIn this guide, we'll explore how to setup an LLM as an AI Evaluator in Humanloop, demonstrating their effectiveness in assessing various aspects of AI-generated content, such as checking for the presence of Personally Identifiable Information (PII).\nAn AI Evaluator is a Prompt that takes attributes from a generated Log (and optionally from a testcase Datapoint if comparing to expected results) as context and returns a judgement.\nThe judgement is in the form of a boolean or number that measures some criteria of the generated Log defined within the Prompt instructions." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.llm-as-a-judge-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.llm-as-a-judge-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/llm-as-a-judge", @@ -4641,19 +4612,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You should have an existing Prompt to evaluate and already generated some Logs.\nFollow our guide on creating a Prompt.\nIn this example we will use a simple Support Agent Prompt that answers user queries about Humanloop's product and docs.", "hierarchy": { "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.llm-as-a-judge-create-an-llm-evaluator-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.llm-as-a-judge-create-an-llm-evaluator", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/llm-as-a-judge", @@ -4680,7 +4651,7 @@ ], "authed": false, "type": "markdown", - "hash": "#create-an-llm-evaluator-", + "hash": "#create-an-llm-evaluator", "content": "Create a new Evaluator\nClick the New button at the bottom of the left-hand sidebar, select Evaluator, then select AI.\n\nGive the Evaluator a name when prompted in the sidebar, for example PII Identifier.\n\n\nDefine the Evaluator Prompt\nAfter creating the Evaluator, you will automatically be taken to the Evaluator editor.\nFor this example, our Evaluator will check whether the request to, or response from, our support agent contains PII. We want to understand whether this is a potential issue that we wish to mitigate with additional Guardrails in our agent workflow.\nMake sure the Mode of the Evaluator is set to Online in the options on the left.\n\nCopy and paste the following Prompt into the Editor:\n\n\n\n\nIn the Prompt Editor for an LLM evaluator, you have access to the underlying log you are evaluating as well as the testcase Datapoint that gave rise to it if you are using a Dataset for offline Evaluations.\nThese are accessed with the standard {{ variable }} syntax, enhanced with a familiar dot notation to pick out specific values from inside the log and testcase objects.\nFor example, suppose you are evaluating a Log object like this.\nIn the LLM Evaluator Prompt, {{ log.inputs.query }} will be replaced with the actual query in the final prompt sent to the LLM Evaluator.\nIn order to get access to the fully populated Prompt that was sent in the underlying Log, you can use the special variable {{ log_prompt }}.\nDebug the code with Prompt Logs\nIn the debug console beneath where you pasted the code, click Select Prompt or Dataset and find and select the Prompt you're evaluating.\nThe debug console will load a sample of Logs from that Prompt.\n\n\n\n\nClick the Run button at the far right of one of the loaded Logs to trigger a debug run. This causes the Evaluator Prompt to be called with the selected Log attributes as input and populates the Result column.\n\nInspect the output of the executed code by selecting the arrow to the right of Result.\n\n\n\n\nCommit the code\nNow that you've validated the behaviour, commit the Evaluator Prompt by selecting the Commit button at the top right of the Editor and provide a suitable commit message describing your changes.\nInspect Evaluator logs\nNavigate to the Logs tab of the Evaluator to see and debug all the historic usages of this Evaluator.", "code_snippets": [ { @@ -4702,15 +4673,15 @@ ], "hierarchy": { "h2": { - "id": "create-an-llm-evaluator-", - "title": "Create an LLM Evaluator " + "id": "create-an-llm-evaluator", + "title": "Create an LLM Evaluator" } }, "level": "h2", "level_title": "Create an LLM Evaluator" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.llm-as-a-judge-next-steps-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.llm-as-a-judge-next-steps", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/llm-as-a-judge", @@ -4737,12 +4708,12 @@ ], "authed": false, "type": "markdown", - "hash": "#next-steps-", + "hash": "#next-steps", "content": "Explore Code Evaluators and Human Evaluators to complement your AI judgements.\n\nCombine your Evaluator with a Dataset to run Evaluations to systematically compare the performance of different versions of your AI application.", "hierarchy": { "h2": { - "id": "next-steps-", - "title": "Next steps " + "id": "next-steps", + "title": "Next steps" } }, "level": "h2", @@ -4777,11 +4748,10 @@ "authed": false, "type": "markdown", "description": "Learn how to set up a Human Evaluator in Humanloop. Human Evaluators allow your subject-matter experts and end-users to provide feedback on Prompt Logs.\nIn this guide we will show how to create and use a Human Evaluator in Humanloop", - "content": "Human Evaluators allow your subject-matter experts and end-users to provide feedback on Prompt Logs.\nThese Evaluators can be attached to Prompts and Evaluations.", - "code_snippets": [] + "content": "Human Evaluators allow your subject-matter experts and end-users to provide feedback on Prompt Logs.\nThese Evaluators can be attached to Prompts and Evaluations." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.human-evaluators-creating-a-human-evaluator-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.human-evaluators-creating-a-human-evaluator", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/human-evaluators", @@ -4808,19 +4778,19 @@ ], "authed": false, "type": "markdown", - "hash": "#creating-a-human-evaluator-", + "hash": "#creating-a-human-evaluator", "content": "This section will bring you through creating and setting up a Human Evaluator.\nAs an example, we'll use a \"Tone\" Evaluator that allows feedback to be provided by\nselecting from a list of options.\n\n\nCreate a new Evaluator\nClick the New button at the bottom of the left-hand sidebar, select Evaluator, then select Human.\n\n\nNew Evaluator dialog\nGive the Evaluator a name when prompted in the sidebar, for example \"Tone\".\n\n\nCreated Human Evaluator being renamed to \"Tone\"\nDefine the Judgment Schema\nAfter creating the Evaluator, you will automatically be taken to the Editor.\nHere, you can define the schema detailing the kinds of judgments to be applied for the Evaluator.\nThe Evaluator will be initialized to a 5-point rating scale by default.\nIn this example, we'll set up a feedback schema for a \"Tone\" Evaluator.\nSee the Return types documentation for more information on return types.\nSelect Multi-select within the Return type dropdown. \"Multi-select\" allows you to apply multiple options to a single Log.\n\nAdd the following options, and set the valence for each:\nEnthusiastic [positive]\n\nInformative [postiive]\n\nRepetitive [negative]\n\nTechnical [negative]\n\n\n\nUpdate the instructions to \"Select all options that apply to the output.\"\n\n\nTone evaluator set up with options and instructions\nCommit and deploy the Evaluator\nClick Commit in the top-right corner.\n\nEnter \"Added initial tone options\" as a commit message. Click Commit.\n\n\nCommit dialog over the \"Tone\" Evaluator\nIn the \"Version committed\" dialog, click Deploy.\n\nSelect the checkbox for you default Environment (usually named \"production\"), and confirm your deployment.\n\n\nDialog deploying the \"Tone\" Evaluator to the \"production\" Environment\n:tada: You've now created a Human Evaluator that can be used to collect feedback on Prompt Logs.", "hierarchy": { "h2": { - "id": "creating-a-human-evaluator-", - "title": "Creating a Human Evaluator " + "id": "creating-a-human-evaluator", + "title": "Creating a Human Evaluator" } }, "level": "h2", "level_title": "Creating a Human Evaluator" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.human-evaluators-next-steps-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.human-evaluators-next-steps", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/human-evaluators", @@ -4847,12 +4817,12 @@ ], "authed": false, "type": "markdown", - "hash": "#next-steps-", + "hash": "#next-steps", "content": "Use Human Evaluators in Evaluations to collect annotations on Prompt Logs from subject-matter experts.\n\nAttach Human Evaluators to Prompts to collect end-user feedback", "hierarchy": { "h2": { - "id": "next-steps-", - "title": "Next steps " + "id": "next-steps", + "title": "Next steps" } }, "level": "h2", @@ -4887,11 +4857,10 @@ "authed": false, "type": "markdown", "description": "How to use Humanloop to Evaluate multiple different Prompts across a Dataset.\nIn this guide, we will walk through how to run an Evaluation to compare multiple different Prompts across a Dataset when Prompts and Evaluators are run on Humanloop.", - "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan\nAn Evaluation on Humanloop leverages a Dataset, a set of Evaluators and different versions of a Prompt to compare.\nThe Dataset contains testcases describing the inputs (and optionally the expected results) for a given task. The Evaluators define the criteria for judging the performance of the Prompts when executed using these inputs.\nEach of the Prompt versions you want to compare are run against the same Dataset producing Logs; judgements are then provided by Evaluators.\nThe Evaluation then uses these judgements to provide a summary report of the performance allowing you to systematically compare the performance of the different Prompt versions.", - "code_snippets": [] + "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan\nAn Evaluation on Humanloop leverages a Dataset, a set of Evaluators and different versions of a Prompt to compare.\nThe Dataset contains testcases describing the inputs (and optionally the expected results) for a given task. The Evaluators define the criteria for judging the performance of the Prompts when executed using these inputs.\nEach of the Prompt versions you want to compare are run against the same Dataset producing Logs; judgements are then provided by Evaluators.\nThe Evaluation then uses these judgements to provide a summary report of the performance allowing you to systematically compare the performance of the different Prompt versions." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.run-evaluation-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.run-evaluation-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/run-evaluation", @@ -4918,19 +4887,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "A set of Prompt versions you want to compare - see the guide on creating Prompts.\n\nA Dataset containing testcases for the task - see the guide on creating a Dataset.\n\nAt least one Evaluator to judge the performance of the Prompts - see the guides on creating Code, AI and Human Evaluators.\n\n\n\n\nYou can combine multiple different types of Evaluator in a single Evaluation.\nFor example, you might use an AI Evaluator to judge the quality of the output\nof the Prompt and a code Evaluator to check the output is below some latency\nand cost threshold.\nFor this example, we're going to evaluate the performance of a Support Agent that responds to user queries about Humanloop's product and documentation.\nOur goal is to understand which base model between gpt-4o, gpt-4o-mini and claude-3-5-sonnet-20240620 is most appropriate for this task.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.run-evaluation-run-an-evaluation-via-ui-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.run-evaluation-run-an-evaluation-via-ui", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/run-evaluation", @@ -4957,19 +4926,19 @@ ], "authed": false, "type": "markdown", - "hash": "#run-an-evaluation-via-ui-", + "hash": "#run-an-evaluation-via-ui", "content": "For Product and AI teams, the ability to trigger Evaluations against a Dataset within the Humanloop UI allows them to systematically compare the performance to make informed decisions on which to deploy.\n\n\nNavigate to the Evaluations tab of your Prompt\nOn the left-hand sidebar, click on the Evaluations tab beneath your Prompt.\n\nClick the Evaluate button top right, which presents the setup panel for the Evaluation.\n\n\n\n\nSetup the Evaluation\nSelect a Dataset using +Dataset.\n\nAdd the Prompt versions you want to compare using +Version - note you can multi-select versions in the modal resulting in multiple columns.\n\nAdd the Evaluators you want to use to judge the performance of the Prompts using +Evaluator. By default, Cost, Tokens and Latency Evaluators are pre-selected.\n\n\n\n\nBy default the system will re-use Logs if they exist for the chosen Dataset, Prompts and Evaluators. This makes it easy to extend reports without paying the cost of re-running your Prompts and Evaluators.\nIf you want to force the system to re-run the Prompts against the Dataset producing a new batch of Logs, you can select the Manage button in the setup panel and choose +New Batch.\nSelect Save to trigger the Evaluation report. You will see the report below the setup panel populate with a progress bar and status pending as the Logs are generated on Humanloop.\n\n\n\n\n\n\nThis guide assumes both the Prompt and Evaluator Logs are generated using the\nHumanloop runtime. For certain use cases where more flexibility is required,\nthe runtime for producing Logs instead lives in your code - see our guide on\nLogging, which also works with our\nEvaluations feature. We have a guide for how to run Evaluations with Logs\ngenerated in your code coming soon!\nReview the results\nIt will generally take at least a couple of minutes before the Evaluation report is marked as completed as the system generates all the required Prompt and Evaluator Logs.\nOnce the report is completed, you can review the performance of the different Prompt versions using the Evaluators you selected.\nThe top spider plot provides you with a summary of the average Evaluator performance across all the Prompt versions.\nIn our case, gpt-4o, although on average slightly slower and more expensive on average, is significantly better when it comes to User Satisfaction.\n\n\n\n\nBelow the spider plot, you can see the breakdown of performance per Evaluator.\n\n\n\n\nTo drill into and debug the Logs that were generated, select the Logs button top right of the Evaluation report.\nThis brings you to the Evaluation Logs table and you can filter and review logs to understand the performance better and replay Logs in our Prompt Editor.", "hierarchy": { "h2": { - "id": "run-an-evaluation-via-ui-", - "title": "Run an Evaluation via UI " + "id": "run-an-evaluation-via-ui", + "title": "Run an Evaluation via UI" } }, "level": "h2", "level_title": "Run an Evaluation via UI" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.run-evaluation-run-an-evaluation-via-api-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.run-evaluation-run-an-evaluation-via-api", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/run-evaluation", @@ -4996,19 +4965,19 @@ ], "authed": false, "type": "markdown", - "hash": "#run-an-evaluation-via-api-", + "hash": "#run-an-evaluation-via-api", "content": "For Engineering teams, the ability to trigger Evaluations via the API allows them to integrate the Evaluation process into their existing pipelines.\n\n\nThis content is currently under development. Please refer to our V4\ndocumentation for the current docs.", "hierarchy": { "h2": { - "id": "run-an-evaluation-via-api-", - "title": "Run an Evaluation via API " + "id": "run-an-evaluation-via-api", + "title": "Run an Evaluation via API" } }, "level": "h2", "level_title": "Run an Evaluation via API" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.run-evaluation-next-steps-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.run-evaluation-next-steps", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/run-evaluation", @@ -5035,16 +5004,16 @@ ], "authed": false, "type": "markdown", - "hash": "#next-steps-", + "hash": "#next-steps", "content": "Incorporate this Evaluation process into your Prompt engineering and deployment workflow.\n\nSetup Evaluations where the runtime for producing Logs lives in your code - see our guide on Logging.\n\nUtilise Evaluations as part of your CI/CD pipeline", "hierarchy": { "h2": { - "id": "next-steps-", - "title": "Next Steps " + "id": "next-steps", + "title": "Next Steps" }, "h3": { - "id": "next-steps-", - "title": "Next Steps " + "id": "next-steps", + "title": "Next Steps" } }, "level": "h3", @@ -5079,11 +5048,10 @@ "authed": false, "type": "markdown", "description": "Learn how to set up an Evaluation that uses Human Evaluators to collect annotations from your subject-matter experts.\nA walkthrough for setting up Human Evaluators in Evaluations to allow subject-matter experts to evaluate your LLM outputs.", - "content": "By attaching Human Evaluators to your Evaluations, you can collect annotations from your subject-matter experts\nto evaluate the quality of your Prompts' outputs.", - "code_snippets": [] + "content": "By attaching Human Evaluators to your Evaluations, you can collect annotations from your subject-matter experts\nto evaluate the quality of your Prompts' outputs." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.run-human-evaluation-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.run-human-evaluation-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/run-human-evaluation", @@ -5110,19 +5078,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You have set up a Human Evaluator appropriate for your use-case. If not, follow our guide to create a Human Evaluator.\n\nYou are familiar with setting up Evaluations in Humanloop. See our guide to creating Evaluations.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.run-human-evaluation-using-a-human-evaluator-in-an-evaluation-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.run-human-evaluation-using-a-human-evaluator-in-an-evaluation", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/run-human-evaluation", @@ -5149,12 +5117,12 @@ ], "authed": false, "type": "markdown", - "hash": "#using-a-human-evaluator-in-an-evaluation-", + "hash": "#using-a-human-evaluator-in-an-evaluation", "content": "Create a new Evaluation\nGo to the Evaluations tab of a Prompt.\n\nClick Evaluate in the top-right corner.\n\nSet up your Evaluation by selecting a Dataset and some Prompt versions to evaluate. See our guide to Running an Evaluation in the UI for more details.\n\nClick the + Evaluator button to add a Human Evaluator to the Evaluation. This will bring up a dialog where you can select the\nHuman Evaluator you created earlier. Within this dialog, select the \"Tone\" Evaluator, and then select its latest version which should be at the top.\n\nClick + Choose to add the Evaluator to the Evaluation.\n\n\nEvaluation set up with \"Tone\" Evaluator\nClick Save/Run to create the Evaluation and start generating Logs to evaluate.\n\n\nApply judgments to generated Logs\nWhen you save an Evaluation, Humanloop will automatically generate Logs using the specified Prompt versions and Dataset.\nWhen the required Logs are generated, a \"Human Evaluations incomplete\" message will be displayed in a toolbar at the top of the Evaluation.\nGo to the Logs tab of the Evaluation to view the generated Logs.\n\n\nEvaluation Logs tab\nExpand the drawer for a Log by clicking on the row to view the Log details. Here, you can view the generated output and apply judgments to the Log.\n\n\nEvaluation Log drawer\nWhen you've completed applying judgments, click on Mark as complete in the toolbar at the top of the page. This will update the Evaluation's status.\n\n\nCompleted Evaluation\nReview judgments stats\nGo to the Overview tab of the Evaluation to view the aggregate stats of the judgments applied to the Logs.\nOn this page, an aggregate view of the judgments provided to each Prompt version is displayed in a table, allowing you to compare the performance of different Prompt versions.\nEvaluation Overview tab", "hierarchy": { "h2": { - "id": "using-a-human-evaluator-in-an-evaluation-", - "title": "Using a Human Evaluator in an Evaluation " + "id": "using-a-human-evaluator-in-an-evaluation", + "title": "Using a Human Evaluator in an Evaluation" } }, "level": "h2", @@ -5189,11 +5157,10 @@ "authed": false, "type": "markdown", "description": "Learn how to automate LLM evaluations as part of your CI/CD pipeline using Humanloop and GitHub Actions.\nIn this guide, we will walk through setting up CI/CD integration for Humanloop evaluations using GitHub Actions.", - "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan", - "code_snippets": [] + "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.cicd-integration-setting-up-cicd-integration-with-github-actions-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.cicd-integration-setting-up-cicd-integration-with-github-actions", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/cicd-integration", @@ -5220,19 +5187,19 @@ ], "authed": false, "type": "markdown", - "hash": "#setting-up-cicd-integration-with-github-actions-", + "hash": "#setting-up-cicd-integration-with-github-actions", "content": "Integrating Humanloop evaluations into your CI/CD pipeline allows you to automatically test your AI applications as part of your development workflow. This guide will walk you through setting up this integration using GitHub Actions.", "hierarchy": { "h2": { - "id": "setting-up-cicd-integration-with-github-actions-", - "title": "Setting up CI/CD Integration with GitHub Actions " + "id": "setting-up-cicd-integration-with-github-actions", + "title": "Setting up CI/CD Integration with GitHub Actions" } }, "level": "h2", "level_title": "Setting up CI/CD Integration with GitHub Actions" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.cicd-integration-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.cicd-integration-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/cicd-integration", @@ -5259,23 +5226,23 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "A GitHub repository for your project\n\nA Humanloop account with access to Evaluations\n\nA Prompt and Dataset set up in Humanloop\n\nAn Evaluator configured in Humanloop", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.cicd-integration-steps-to-set-up-cicd-integration-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.evaluation.guides.cicd-integration-steps-to-set-up-cicd-integration", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/evaluation/guides/cicd-integration", @@ -5302,7 +5269,7 @@ ], "authed": false, "type": "markdown", - "hash": "#steps-to-set-up-cicd-integration-", + "hash": "#steps-to-set-up-cicd-integration", "content": "Create a GitHub Actions Workflow\nIn your GitHub repository, create a new file .github/workflows/humanloop-eval.yml with the following content:\n\n\nThis content is currently under development. Please refer to our V4\ndocumentation for the current docs.", "code_snippets": [ { @@ -5316,8 +5283,8 @@ ], "hierarchy": { "h2": { - "id": "steps-to-set-up-cicd-integration-", - "title": "Steps to Set Up CI/CD Integration " + "id": "steps-to-set-up-cicd-integration", + "title": "Steps to Set Up CI/CD Integration" } }, "level": "h2", @@ -5348,11 +5315,10 @@ "authed": false, "type": "markdown", "description": "Discover how to implement Humanloop's advanced LLM monitoring system for real-time performance tracking, evaluation, and optimization of your AI models in production environments.\nHumanloop allows you to monitor LLMs which extends beyond simple logging but also allows you to track and police the high-level behavior of your LLMs", - "content": "At the core of Humanloop's monitoring system are evaluators - functions you define that analyze LLM-generated logs and produce evaluations. These evaluations can be boolean flags or numerical scores, providing insights into how well your model is performing based on criteria specific to your use case.\nEvaluators in the monitoring context act as continuous checks on your deployed models, helping you maintain quality, detect anomalies, and ensure your LLMs are behaving as expected in the production environment.", - "code_snippets": [] + "content": "At the core of Humanloop's monitoring system are evaluators - functions you define that analyze LLM-generated logs and produce evaluations. These evaluations can be boolean flags or numerical scores, providing insights into how well your model is performing based on criteria specific to your use case.\nEvaluators in the monitoring context act as continuous checks on your deployed models, helping you maintain quality, detect anomalies, and ensure your LLMs are behaving as expected in the production environment." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.overview-types-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.overview-types", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/overview", @@ -5375,19 +5341,19 @@ ], "authed": false, "type": "markdown", - "hash": "#types-", + "hash": "#types", "content": "Humanloop supports three types of evaluators for monitoring:\nCode based - Using our in-browser editor, define simple Python functions to act as evaluators. These run automatically on your logs.\n\nLLM as judge - Use LLMs to evaluate the outputs of other Prompts or Tools. Our editor lets you create prompts that pass log data to a model for assessment. This is ideal for subjective evaluations like tone and factual accuracy. These also run automatically.\n\nHuman evaluators - Collect feedback from human evaluators using our feedback API. This allows you to incorporate human judgment or in-app actions into your monitoring process.\n\n\nBoth code-based and LLM-based evaluators run automatically on your logs, while human evaluators provide a way to incorporate manual feedback when needed.", "hierarchy": { "h2": { - "id": "types-", - "title": "Types " + "id": "types", + "title": "Types" } }, "level": "h2", "level_title": "Types" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.overview-monitoring-vs-evaluation-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.overview-monitoring-vs-evaluation", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/overview", @@ -5410,12 +5376,12 @@ ], "authed": false, "type": "markdown", - "hash": "#monitoring-vs-evaluation-", + "hash": "#monitoring-vs-evaluation", "content": "While monitoring and evaluation are closely related, they serve different purposes in the lifecycle of your LLM-powered applications:\nMonitoring is the continuous assessment of your deployed models in production environments. It involves real-time analysis of logs generated by your live system, providing immediate insights into performance and behavior.\n\nEvaluation, on the other hand, typically refers to offline testing and assessment during the development phase or for periodic performance checks.\n\n\nHumanloop's monitoring capabilities allow you to set up evaluators that automatically run on logs from your production environment, giving you real-time insights into your model's performance.\nFor detailed information on offline evaluation and testing during development, please refer to our Evaluation guide.", "hierarchy": { "h2": { - "id": "monitoring-vs-evaluation-", - "title": "Monitoring vs Evaluation " + "id": "monitoring-vs-evaluation", + "title": "Monitoring vs Evaluation" } }, "level": "h2", @@ -5446,11 +5412,10 @@ "authed": false, "type": "markdown", "description": "This guide demonstrates how to configure automated alerts for your AI system's performance using Humanloop's monitoring capabilities.\nLearn how to set up alerts in Humanloop using monitoring evaluators and webhooks.", - "content": "Monitoring your AI system's performance in production is crucial for maintaining quality and catching issues early. Humanloop provides tools to set up automated alerts based on your custom evaluation criteria, and guardrails to ensure that issues are prevented from happening.", - "code_snippets": [] + "content": "Monitoring your AI system's performance in production is crucial for maintaining quality and catching issues early. Humanloop provides tools to set up automated alerts based on your custom evaluation criteria, and guardrails to ensure that issues are prevented from happening." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-alerting-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-alerting", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/alerts-and-guardrails", @@ -5473,19 +5438,19 @@ ], "authed": false, "type": "markdown", - "hash": "#alerting-", + "hash": "#alerting", "content": "Alerting is a critical component of any robust monitoring system. It allows you to be promptly notified of important events or issues in your Humanloop environment. By setting up alerts, you can proactively respond to potential problems and maintain the health and performance of your AI system.\nAlerting in Humanloop takes advantage of the Evaluators you have enabled, and uses webhooks to send alerts to your preferred communication channels.", "hierarchy": { "h2": { - "id": "alerting-", - "title": "Alerting " + "id": "alerting", + "title": "Alerting" } }, "level": "h2", "level_title": "Alerting" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-overview-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-overview", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/alerts-and-guardrails", @@ -5508,23 +5473,23 @@ ], "authed": false, "type": "markdown", - "hash": "#overview-", + "hash": "#overview", "content": "Alerts are triggered when certain predefined conditions are met in your system. These conditions are typically monitored using log evaluators, which continuously analyze system logs and metrics.", "hierarchy": { "h2": { - "id": "overview-", - "title": "Overview " + "id": "overview", + "title": "Overview" }, "h3": { - "id": "overview-", - "title": "Overview " + "id": "overview", + "title": "Overview" } }, "level": "h3", "level_title": "Overview" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-use-cases-for-alerting-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-use-cases-for-alerting", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/alerts-and-guardrails", @@ -5547,23 +5512,23 @@ ], "authed": false, "type": "markdown", - "hash": "#use-cases-for-alerting-", + "hash": "#use-cases-for-alerting", "content": "Performance Issues\nUse Case: Alert when API response times exceed a certain threshold.\n\nBenefit: Quickly identify and address performance bottlenecks.\n\n\n\nError Rate Spikes\nUse Case: Notify when the error rate for a specific service surpasses normal levels.\n\nBenefit: Detect and investigate unusual error patterns promptly.\n\n\n\nResource Utilization\nUse Case: Alert when CPU or memory usage approaches capacity limits.\n\nBenefit: Prevent system crashes and maintain optimal performance.\n\n\n\nSecurity Incidents\nUse Case: Notify on multiple failed login attempts or unusual access patterns.\n\nBenefit: Rapidly respond to potential security breaches.\n\n\n\nData Quality Issues\nUse Case: Alert when incoming data doesn't meet predefined quality standards.\n\nBenefit: Maintain data integrity and prevent propagation of bad data.\n\n\n\nSLA Violations\nUse Case: Notify when service level agreements are at risk of being breached.\n\nBenefit: Proactively manage client expectations and service quality.", "hierarchy": { "h2": { - "id": "use-cases-for-alerting-", - "title": "Use Cases for Alerting " + "id": "use-cases-for-alerting", + "title": "Use Cases for Alerting" }, "h3": { - "id": "use-cases-for-alerting-", - "title": "Use Cases for Alerting " + "id": "use-cases-for-alerting", + "title": "Use Cases for Alerting" } }, "level": "h3", "level_title": "Use Cases for Alerting" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-best-practices-for-alerting-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-best-practices-for-alerting", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/alerts-and-guardrails", @@ -5586,23 +5551,23 @@ ], "authed": false, "type": "markdown", - "hash": "#best-practices-for-alerting-", + "hash": "#best-practices-for-alerting", "content": "Define Clear Thresholds: Establish meaningful thresholds based on historical data and business requirements.\n\nPrioritize Alerts: Categorize alerts by severity to ensure critical issues receive immediate attention.\n\nProvide Context: Include relevant information in alerts to aid in quick diagnosis and resolution.\n\nAvoid Alert Fatigue: Regularly review and refine alert conditions to minimize false positives.\n\nEstablish Escalation Procedures: Define clear processes for handling and escalating different types of alerts.", "hierarchy": { "h2": { - "id": "best-practices-for-alerting-", - "title": "Best Practices for Alerting " + "id": "best-practices-for-alerting", + "title": "Best Practices for Alerting" }, "h3": { - "id": "best-practices-for-alerting-", - "title": "Best Practices for Alerting " + "id": "best-practices-for-alerting", + "title": "Best Practices for Alerting" } }, "level": "h3", "level_title": "Best Practices for Alerting" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-webhooks-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-webhooks", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/alerts-and-guardrails", @@ -5625,23 +5590,23 @@ ], "authed": false, "type": "markdown", - "hash": "#webhooks-", + "hash": "#webhooks", "content": "Webhooks are a crucial component of Humanloop's alerting system, allowing you to integrate alerts into your existing workflows and communication channels. By leveraging webhooks, you can:\nReceive real-time notifications when alert conditions are met\n\nIntegrate alerts with your preferred messaging platforms (e.g., Slack, Microsoft Teams)\n\nTrigger automated responses or workflows in external systems\n\nCentralize alert management in your existing incident response tools\n\n\nSetting up webhooks enables you to respond quickly to critical events, maintain system health, and streamline your MLOps processes. Many Humanloop users find webhooks invaluable for managing their AI systems effectively at scale.\nFor detailed instructions on setting up webhooks, please refer to our Set up Webhooks guide.", "hierarchy": { "h2": { - "id": "webhooks-", - "title": "Webhooks " + "id": "webhooks", + "title": "Webhooks" }, "h3": { - "id": "webhooks-", - "title": "Webhooks " + "id": "webhooks", + "title": "Webhooks" } }, "level": "h3", "level_title": "Webhooks" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-guardrails-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-guardrails", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/alerts-and-guardrails", @@ -5664,19 +5629,19 @@ ], "authed": false, "type": "markdown", - "hash": "#guardrails-", + "hash": "#guardrails", "content": "Guardrails are protective measures implemented to prevent undesired actions or states in your Humanloop environment. They act as a safety net, automatically enforcing rules and limits to maintain system integrity.", "hierarchy": { "h1": { - "id": "guardrails-", - "title": "Guardrails " + "id": "guardrails", + "title": "Guardrails" } }, "level": "h1", "level_title": "Guardrails" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-overview--1", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-overview-1", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/alerts-and-guardrails", @@ -5699,23 +5664,23 @@ ], "authed": false, "type": "markdown", - "hash": "#overview--1", + "hash": "#overview-1", "content": "Guardrails typically work by setting boundaries on various system parameters and automatically taking action when these boundaries are approached or exceeded.", "hierarchy": { "h1": { - "id": "overview--1", - "title": "Overview " + "id": "overview-1", + "title": "Overview" }, "h3": { - "id": "overview--1", - "title": "Overview " + "id": "overview-1", + "title": "Overview" } }, "level": "h3", "level_title": "Overview" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-how-guardrails-works-in-humanloop-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-how-guardrails-works-in-humanloop", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/alerts-and-guardrails", @@ -5738,19 +5703,19 @@ ], "authed": false, "type": "markdown", - "hash": "#how-guardrails-works-in-humanloop-", + "hash": "#how-guardrails-works-in-humanloop", "content": "set up evaluators\n\nconfigure them as a guardrail\nspecify the type of guardrail (e.g. rate limiting, content moderation, etc.)\n\nspecify the threshold for the guardrail\n\nspecify the action to take when the guardrail is violated", "hierarchy": { "h1": { - "id": "how-guardrails-works-in-humanloop-", - "title": "How Guardrails works in Humanloop " + "id": "how-guardrails-works-in-humanloop", + "title": "How Guardrails works in Humanloop" } }, "level": "h1", "level_title": "How Guardrails works in Humanloop" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-use-cases-for-guardrails-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-use-cases-for-guardrails", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/alerts-and-guardrails", @@ -5773,23 +5738,23 @@ ], "authed": false, "type": "markdown", - "hash": "#use-cases-for-guardrails-", + "hash": "#use-cases-for-guardrails", "content": "Content Moderation\nUse Case: Automatically filter or flag inappropriate, offensive, or harmful content generated by LLMs.\n\nBenefit: Maintain a safe and respectful environment for users, comply with content policies.\n\n\n\nPII Protection\nUse Case: Detect and redact personally identifiable information (PII) in LLM outputs.\n\nBenefit: Ensure data privacy, comply with regulations like GDPR and CCPA.\n\n\n\nBias Detection\nUse Case: Identify and mitigate biased language or unfair treatment in LLM responses.\n\nBenefit: Promote fairness and inclusivity, reduce discriminatory outputs.\n\n\n\nFairness Assurance\nUse Case: Ensure equal treatment and representation across different demographic groups in LLM interactions.\n\nBenefit: Maintain ethical AI practices, avoid reinforcing societal biases.\n\n\n\nToxicity Filtering\nUse Case: Detect and prevent the generation of toxic, abusive, or hateful content.\n\nBenefit: Create a positive user experience, protect brand reputation.\n\n\n\nHallucination Protections\nUse Case: Detect and prevent the generation of false or fabricated information by the LLM.\n\nBenefit: Ensure output reliability, maintain user trust, and avoid potential misinformation spread.", "hierarchy": { "h1": { - "id": "use-cases-for-guardrails-", - "title": "Use Cases for Guardrails " + "id": "use-cases-for-guardrails", + "title": "Use Cases for Guardrails" }, "h3": { - "id": "use-cases-for-guardrails-", - "title": "Use Cases for Guardrails " + "id": "use-cases-for-guardrails", + "title": "Use Cases for Guardrails" } }, "level": "h3", "level_title": "Use Cases for Guardrails" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-best-practices-for-implementing-guardrails-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.alerts-and-guardrails-best-practices-for-implementing-guardrails", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/alerts-and-guardrails", @@ -5812,16 +5777,16 @@ ], "authed": false, "type": "markdown", - "hash": "#best-practices-for-implementing-guardrails-", + "hash": "#best-practices-for-implementing-guardrails", "content": "Start Conservative: Begin with more restrictive guardrails and loosen them as you gain confidence.\n\nMonitor Guardrail Actions: Keep track of when and why guardrails are triggered to identify patterns.\n\nRegular Reviews: Periodically assess the effectiveness of your guardrails and adjust as needed.\n\nProvide Override Mechanisms: Allow authorized personnel to bypass guardrails in controlled situations.\n\nDocument Thoroughly: Maintain clear documentation of all implemented guardrails for team awareness.", "hierarchy": { "h1": { - "id": "best-practices-for-implementing-guardrails-", - "title": "Best Practices for Implementing Guardrails " + "id": "best-practices-for-implementing-guardrails", + "title": "Best Practices for Implementing Guardrails" }, "h3": { - "id": "best-practices-for-implementing-guardrails-", - "title": "Best Practices for Implementing Guardrails " + "id": "best-practices-for-implementing-guardrails", + "title": "Best Practices for Implementing Guardrails" } }, "level": "h3", @@ -5856,11 +5821,10 @@ "authed": false, "type": "markdown", "description": "Learn how to create and use online evaluators to observe the performance of your models.\nIn this guide, we will demonstrate how to create and use online evaluators to observe the performance of your models.", - "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan", - "code_snippets": [] + "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-monitoring-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-monitoring-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/set-up-monitoring", @@ -5887,23 +5851,23 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You need to have access to evaluations.\n\nYou also need to have a Prompt – if not, please follow our Prompt creation guide.\n\nFinally, you need at least a few logs in your project. Use the Editor to generate some logs if you don't have any yet.\n\n\nTo set up an online Python evaluator:\n\n\nGo to the Evaluations page in one of your projects and select the Evaluators tab\nSelect + New Evaluator and choose Code Evaluator in the dialog\n\n\nFrom the library of presets on the left-hand side, we'll choose Valid JSON for this guide. You'll see a pre-populated evaluator with Python code that checks the output of our model is valid JSON grammar.\n\n\nIn the debug console at the bottom of the dialog, click Random logs from project. The console will be populated with five datapoints from your project.\n\n\nClick the Run button at the far right of one of the log rows. After a moment, you'll see the Result column populated with a True or False.\n\n\nExplore the log dictionary in the table to help understand what is available on the Python object passed into the evaluator.\nClick Create on the left side of the page.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-monitoring-activate-an-evaluator-for-a-project-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-monitoring-activate-an-evaluator-for-a-project", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/set-up-monitoring", @@ -5930,19 +5894,19 @@ ], "authed": false, "type": "markdown", - "hash": "#activate-an-evaluator-for-a-project-", + "hash": "#activate-an-evaluator-for-a-project", "content": "On the new **Valid JSON ** evaluator in the Evaluations tab, toggle the switch to on - the evaluator is now activated for the current project.\n\n\nGo to the Editor, and generate some fresh logs with your model.\nOver in the Logs tab you'll see the new logs. The Valid JSON evaluator runs automatically on these new logs, and the results are displayed in the table.", "hierarchy": { "h2": { - "id": "activate-an-evaluator-for-a-project-", - "title": "Activate an evaluator for a project " + "id": "activate-an-evaluator-for-a-project", + "title": "Activate an evaluator for a project" } }, "level": "h2", "level_title": "Activate an evaluator for a project" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-monitoring-prerequisites--1", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-monitoring-prerequisites-1", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/set-up-monitoring", @@ -5969,16 +5933,16 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites--1", + "hash": "#prerequisites-1", "content": "A Humanloop project with a reasonable amount of data.\n\nAn Evaluator activated in that project.\n\n\nTo track the performance of different model configs in your project:\n\n\nGo to the Dashboard tab.\nIn the table of model configs at the\nbottom, choose a subset of the project's model configs.\nUse the graph controls\nAt the top of the page to select the date range and time granularity\nof interest.\nReview the relative performance\nFor each activated Evaluator shown in the graphs, you can see the relative performance of the model configs you selected.\n\n\n\n\nThe following Python modules are available to be imported in your code evaluators:\nre\n\nmath\n\nrandom\n\ndatetime\n\njson (useful for validating JSON grammar as per the example above)\n\njsonschema (useful for more fine-grained validation of JSON output - see the in-app example)\n\nsqlglot (useful for validating SQL query grammar)\n\nrequests (useful to make further LLM calls as part of your evaluation - see the in-app example for a suggestion of how to get started).", "hierarchy": { "h2": { - "id": "prerequisites--1", - "title": "Prerequisites " + "id": "prerequisites-1", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites--1", - "title": "Prerequisites " + "id": "prerequisites-1", + "title": "Prerequisites" } }, "level": "h3", @@ -6013,11 +5977,10 @@ "authed": false, "type": "markdown", "description": "Learn how to set up webhooks via API for alerting on your monitoring evaluators.\nIn this guide, we will demonstrate how to set up webhooks via API for alerting on your monitoring evaluators.", - "content": "This content is currently under development. Please refer to our V4\ndocumentation for the current docs.\n\n\nThis feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan\n\n\nIn this guide, we'll walk you through the process of setting up webhooks using the Humanloop API to notify you in Slack when certain events occur with your monitoring evaluators.", - "code_snippets": [] + "content": "This content is currently under development. Please refer to our V4\ndocumentation for the current docs.\n\n\nThis feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan\n\n\nIn this guide, we'll walk you through the process of setting up webhooks using the Humanloop API to notify you in Slack when certain events occur with your monitoring evaluators." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-webhooks-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-webhooks-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/set-up-webhooks", @@ -6044,7 +6007,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "Before you begin, make sure you have:\nA Humanloop account with API access\n\nA Slack workspace where you have permissions to add webhooks\n\nA Humanloop project with at least one LLM model and monitoring evaluator set up\n\n\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)", "code_snippets": [ { @@ -6070,15 +6033,15 @@ ], "hierarchy": { "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-webhooks-setting-up-a-webhook-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-webhooks-setting-up-a-webhook", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/set-up-webhooks", @@ -6105,7 +6068,7 @@ ], "authed": false, "type": "markdown", - "hash": "#setting-up-a-webhook-", + "hash": "#setting-up-a-webhook", "content": "To set up a webhook, you'll use the hl.webhook.create() method from the Humanloop Python SDK. Here's a step-by-step guide:\n\n\nCreate a Slack incoming webhook\nGo to your Slack workspace and create a new Slack app (or use an existing one).\n\nUnder \"Add features and functionality\", choose \"Incoming Webhooks\" and activate them.\n\nClick \"Add New Webhook to Workspace\" and choose the channel where you want to receive notifications.\n\nCopy the webhook URL provided by Slack.\n\n\nImport the Humanloop SDK and initialize the client\nReplace \"your-api-key\" with your actual Humanloop API key.\nCreate a webhook\nReplace the following:\n\"https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK\" with your Slack webhook URL\n\n\"your-model-name\" with the name of the model you want to monitor\n\n\"your-shared-secret\" with a secret string of your choice for added security\n\n\nTest the webhook\nTo test if your webhook is working correctly, you can trigger an evaluation:\nReplace \"your-project-id\" and \"your-model-name\" with your actual project ID and model name.", "code_snippets": [ { @@ -6135,15 +6098,15 @@ ], "hierarchy": { "h3": { - "id": "setting-up-a-webhook-", - "title": "Setting up a webhook " + "id": "setting-up-a-webhook", + "title": "Setting up a webhook" } }, "level": "h3", "level_title": "Setting up a webhook" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-webhooks-verifying-the-webhook-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-webhooks-verifying-the-webhook", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/set-up-webhooks", @@ -6170,7 +6133,7 @@ ], "authed": false, "type": "markdown", - "hash": "#verifying-the-webhook-", + "hash": "#verifying-the-webhook", "content": "After setting up the webhook and triggering an evaluation, you should see a message in your specified Slack channel. The message will contain details about the evaluation event, such as:", "code_snippets": [ { @@ -6179,15 +6142,15 @@ ], "hierarchy": { "h3": { - "id": "verifying-the-webhook-", - "title": "Verifying the webhook " + "id": "verifying-the-webhook", + "title": "Verifying the webhook" } }, "level": "h3", "level_title": "Verifying the webhook" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-webhooks-managing-webhooks-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-webhooks-managing-webhooks", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/set-up-webhooks", @@ -6214,7 +6177,7 @@ ], "authed": false, "type": "markdown", - "hash": "#managing-webhooks-", + "hash": "#managing-webhooks", "content": "You can list, update, or delete webhooks using the following methods:\nReplace \"webhook-id\" with the ID of the webhook you want to manage.", "code_snippets": [ { @@ -6224,15 +6187,15 @@ ], "hierarchy": { "h3": { - "id": "managing-webhooks-", - "title": "Managing webhooks " + "id": "managing-webhooks", + "title": "Managing webhooks" } }, "level": "h3", "level_title": "Managing webhooks" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-webhooks-conclusion-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.set-up-webhooks-conclusion", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/set-up-webhooks", @@ -6259,12 +6222,12 @@ ], "authed": false, "type": "markdown", - "hash": "#conclusion-", + "hash": "#conclusion", "content": "You've now set up a webhook to receive notifications in Slack when your monitoring evaluators complete evaluations or detect drift. This will help you stay informed about the performance and behavior of your LLM models in real-time.", "hierarchy": { "h3": { - "id": "conclusion-", - "title": "Conclusion " + "id": "conclusion", + "title": "Conclusion" } }, "level": "h3", @@ -6299,11 +6262,10 @@ "authed": false, "type": "markdown", "description": "Learn how to record user feedback on your generated Prompt Logs using the Humanloop SDK.\nIn this guide, we show how to record end-user feedback using the Humanloop Python SDK. This allows you to monitor how your generations perform with your users.", - "content": "This guide shows how to use the Humanloop SDK to record end-user feedback on Logs.\n\n\nDifferent use-cases and user interfaces may require different kinds of feedback that need to be mapped to the appropriate end user interaction.\nThere are broadly 3 important kinds of feedback:\nExplicit feedback: these are purposeful actions to review the generations. For example, ‘thumbs up/down’ button presses.\n\nImplicit feedback: indirect actions taken by your users may signal whether the generation was good or bad, for example, whether the user ‘copied’ the generation, ‘saved it’ or ‘dismissed it’ (which is negative feedback).\n\nFree-form feedback: Corrections and explanations provided by the end-user on the generation.\n\n\nYou should create Human Evaluators structured to capture the feedback you need.\nFor example, a Human Evaluator with return type \"text\" can be used to capture free-form feedback, while a Human Evaluator with return type \"multi_select\" can be used to capture user actions\nthat provide implicit feedback.\nIf you have not done so, you can follow our guide to create a Human Evaluator to set up the appropriate feedback schema.", - "code_snippets": [] + "content": "This guide shows how to use the Humanloop SDK to record end-user feedback on Logs.\n\n\nDifferent use-cases and user interfaces may require different kinds of feedback that need to be mapped to the appropriate end user interaction.\nThere are broadly 3 important kinds of feedback:\nExplicit feedback: these are purposeful actions to review the generations. For example, ‘thumbs up/down’ button presses.\n\nImplicit feedback: indirect actions taken by your users may signal whether the generation was good or bad, for example, whether the user ‘copied’ the generation, ‘saved it’ or ‘dismissed it’ (which is negative feedback).\n\nFree-form feedback: Corrections and explanations provided by the end-user on the generation.\n\n\nYou should create Human Evaluators structured to capture the feedback you need.\nFor example, a Human Evaluator with return type \"text\" can be used to capture free-form feedback, while a Human Evaluator with return type \"multi_select\" can be used to capture user actions\nthat provide implicit feedback.\nIf you have not done so, you can follow our guide to create a Human Evaluator to set up the appropriate feedback schema." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.capture-user-feedback-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.capture-user-feedback-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/capture-user-feedback", @@ -6330,7 +6292,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\nYou have created a Human Evaluator. This can be done by following the steps in our guide to Human Evaluator creation.\n\n\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)", "code_snippets": [ { @@ -6356,15 +6318,15 @@ ], "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.capture-user-feedback-attach-human-evaluator-to-enable-feedback-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.capture-user-feedback-attach-human-evaluator-to-enable-feedback", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/capture-user-feedback", @@ -6391,19 +6353,19 @@ ], "authed": false, "type": "markdown", - "hash": "#attach-human-evaluator-to-enable-feedback-", + "hash": "#attach-human-evaluator-to-enable-feedback", "content": "In this example, we'll be attaching a \"Tweet Issues\" Human Evaluator to an \"Impersonator\" Prompt.\nThe specifics of the \"Tweet Issues\" Evaluator are not important for this guide, but for completeness, it is a Human Evaluator with the return type \"multi_select\" and options like \"Inappropriate\", \"Too many emojis\", \"Too long\", etc.\n\n\nGo to the Prompt's Dashboard\nClick Monitoring in the top right to open the Monitoring Dialog\nPrompt dashboard showing Monitoring dialog\nClick Connect Evaluators and select the Human Evaluator you created.\nDialog connecting the \"Tweet Issues\" Evaluator as a Monitoring Evaluator\nYou should now see the selected Human Evaluator attached to the Prompt in the Monitoring dialog.\nMonitoring dialog showing the \"Tweet Issues\" Evaluator attached to the Prompt", "hierarchy": { "h2": { - "id": "attach-human-evaluator-to-enable-feedback-", - "title": "Attach Human Evaluator to enable feedback " + "id": "attach-human-evaluator-to-enable-feedback", + "title": "Attach Human Evaluator to enable feedback" } }, "level": "h2", "level_title": "Attach Human Evaluator to enable feedback" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.capture-user-feedback-record-feedback-against-a-log-by-its-id-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.capture-user-feedback-record-feedback-against-a-log-by-its-id", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/capture-user-feedback", @@ -6430,7 +6392,7 @@ ], "authed": false, "type": "markdown", - "hash": "#record-feedback-against-a-log-by-its-id-", + "hash": "#record-feedback-against-a-log-by-its-id", "content": "With the Human Evaluator attached to the Prompt, you can now record judgments against the Prompt's Logs.\nTo make API calls to record feedback, you will need the Log ID of the Log you want to record feedback against.\nThe steps below illustrate a typical workflow for recording feedback against a Log generated in your code.\n\n\nRetrieve the Log ID from the client.prompts.call() response.\nCall client.evaluators.log(...) referencing the above Log ID as parent_id to record user feedback.\n\n\nThe \"rating\" and \"correction\" Evaluators are attached to all Prompts by default.\nYou can record feedback using these Evaluators as well.\nThe \"rating\" Evaluator can be used to record explicit feedback (e.g. from a 👍/👎 button).\nThe \"correction\" Evaluator can be used to record user-provided corrections to the generations (e.g. If the user edits the generation before copying it).\nIf the user removes their feedback (e.g. if the user deselects a previous 👎 feedback), you can record this by passing judgment=None.", "code_snippets": [ { @@ -6476,15 +6438,15 @@ ], "hierarchy": { "h2": { - "id": "record-feedback-against-a-log-by-its-id-", - "title": "Record feedback against a Log by its ID " + "id": "record-feedback-against-a-log-by-its-id", + "title": "Record feedback against a Log by its ID" } }, "level": "h2", "level_title": "Record feedback against a Log by its ID" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.capture-user-feedback-viewing-feedback-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.capture-user-feedback-viewing-feedback", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/capture-user-feedback", @@ -6511,19 +6473,19 @@ ], "authed": false, "type": "markdown", - "hash": "#viewing-feedback-", + "hash": "#viewing-feedback", "content": "You can view the applied in two main ways: through the Logs that the feedback was applied to, and through the Human Evaluator itself.", "hierarchy": { "h2": { - "id": "viewing-feedback-", - "title": "Viewing feedback " + "id": "viewing-feedback", + "title": "Viewing feedback" } }, "level": "h2", "level_title": "Viewing feedback" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.capture-user-feedback-viewing-feedback-applied-to-logs-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.capture-user-feedback-viewing-feedback-applied-to-logs", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/capture-user-feedback", @@ -6550,23 +6512,23 @@ ], "authed": false, "type": "markdown", - "hash": "#viewing-feedback-applied-to-logs-", + "hash": "#viewing-feedback-applied-to-logs", "content": "The feedback recorded for each Log can be viewed in the Logs table of your Prompt.\nLogs table showing feedback applied to Logs\nYour internal users can also apply feedback to the Logs directly through the Humanloop app.\nLog drawer showing feedback section", "hierarchy": { "h2": { - "id": "viewing-feedback-applied-to-logs-", - "title": "Viewing Feedback applied to Logs " + "id": "viewing-feedback-applied-to-logs", + "title": "Viewing Feedback applied to Logs" }, "h3": { - "id": "viewing-feedback-applied-to-logs-", - "title": "Viewing Feedback applied to Logs " + "id": "viewing-feedback-applied-to-logs", + "title": "Viewing Feedback applied to Logs" } }, "level": "h3", "level_title": "Viewing Feedback applied to Logs" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.capture-user-feedback-viewing-feedback-through-its-human-evaluator-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.observability.guides.capture-user-feedback-viewing-feedback-through-its-human-evaluator", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/observability/guides/capture-user-feedback", @@ -6593,16 +6555,16 @@ ], "authed": false, "type": "markdown", - "hash": "#viewing-feedback-through-its-human-evaluator-", + "hash": "#viewing-feedback-through-its-human-evaluator", "content": "Alternatively, you can view all feedback recorded for a specific Evaluator in the Logs tab of the Evaluator.\nThis will display all feedback recorded for the Evaluator across all other Files.\nLogs table for \"Tweet Issues\" Evaluator showing feedback", "hierarchy": { "h2": { - "id": "viewing-feedback-through-its-human-evaluator-", - "title": "Viewing Feedback through its Human Evaluator " + "id": "viewing-feedback-through-its-human-evaluator", + "title": "Viewing Feedback through its Human Evaluator" }, "h3": { - "id": "viewing-feedback-through-its-human-evaluator-", - "title": "Viewing Feedback through its Human Evaluator " + "id": "viewing-feedback-through-its-human-evaluator", + "title": "Viewing Feedback through its Human Evaluator" } }, "level": "h3", @@ -6633,11 +6595,10 @@ "authed": false, "type": "markdown", "description": "Learn about the different roles and permissions in Humanloop to help you with prompt and data management for large language models.", - "content": "Everyone invited to the organization can access all projects currently (controlling project access coming soon).\nA user can be one of the following rolws:\nAdmin: The highest level of control. They can manage, modify, and oversee the Organization's settings and have full functionality across all projects.\nDeveloper: (Enterprise tier only) Can deploy Files, manage environments, create and add API keys, but lacks the ability to access billing or invite others.\nMember: (Enterprise tier only) The basic level of access. Can create and save Files, run Evaluations, but not deploy. Can not see any org-wide API keys.", - "code_snippets": [] + "content": "Everyone invited to the organization can access all projects currently (controlling project access coming soon).\nA user can be one of the following rolws:\nAdmin: The highest level of control. They can manage, modify, and oversee the Organization's settings and have full functionality across all projects.\nDeveloper: (Enterprise tier only) Can deploy Files, manage environments, create and add API keys, but lacks the ability to access billing or invite others.\nMember: (Enterprise tier only) The basic level of access. Can create and save Files, run Evaluations, but not deploy. Can not see any org-wide API keys." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.access-roles-rbacs-summary-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.access-roles-rbacs-summary", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/access-roles", @@ -6660,12 +6621,12 @@ ], "authed": false, "type": "markdown", - "hash": "#rbacs-summary-", + "hash": "#rbacs-summary", "content": "Here is the full breakdown of roles and access:\nAction Member Developer Admin \nCreate and manage Files ✔️ ✔️ ✔️ \nInspect logs and feedback ✔️ ✔️ ✔️ \nCreate and manage Evaluators ✔️ ✔️ ✔️ \nRun Evaluations ✔️ ✔️ ✔️ \nCreate and manage Datasets ✔️ ✔️ ✔️ \nCreate and manage API keys ✔️ ✔️ \nManage prompt deployments ✔️ ✔️ \nCreate and manage environments ✔️ ✔️ \nSend invites ✔️ \nSet user roles ✔️ \nManage billing ✔️ \nChange Organization settings ✔️", "hierarchy": { "h2": { - "id": "rbacs-summary-", - "title": "RBACs summary " + "id": "rbacs-summary", + "title": "RBACs summary" } }, "level": "h2", @@ -6696,11 +6657,10 @@ "authed": false, "type": "markdown", "description": "Learn about Single Sign-On (SSO) and authentication options for Humanloop\nSSO and Authentication for Humanloop", - "content": "Humanloop offers authentication options to ensure secure access to your organization's resources. This guide covers our Single Sign-On (SSO) capabilities and other authentication methods.", - "code_snippets": [] + "content": "Humanloop offers authentication options to ensure secure access to your organization's resources. This guide covers our Single Sign-On (SSO) capabilities and other authentication methods." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-single-sign-on-sso-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-single-sign-on-sso", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/sso-and-authentication", @@ -6723,19 +6683,19 @@ ], "authed": false, "type": "markdown", - "hash": "#single-sign-on-sso-", + "hash": "#single-sign-on-sso", "content": "Single Sign-On allows users to access multiple applications with a single set of credentials. Humanloop supports SSO integration with major identity providers, enhancing security and simplifying user management.", "hierarchy": { "h2": { - "id": "single-sign-on-sso-", - "title": "Single Sign-On (SSO) " + "id": "single-sign-on-sso", + "title": "Single Sign-On (SSO)" } }, "level": "h2", "level_title": "Single Sign-On (SSO)" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-supported-sso-providers-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-supported-sso-providers", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/sso-and-authentication", @@ -6758,23 +6718,23 @@ ], "authed": false, "type": "markdown", - "hash": "#supported-sso-providers-", + "hash": "#supported-sso-providers", "content": "Google Workspace\n\nOkta\n\nAzure Active Directory\n\nOneLogin\n\nCustom SAML 2.0 providers", "hierarchy": { "h2": { - "id": "supported-sso-providers-", - "title": "Supported SSO Providers " + "id": "supported-sso-providers", + "title": "Supported SSO Providers" }, "h3": { - "id": "supported-sso-providers-", - "title": "Supported SSO Providers " + "id": "supported-sso-providers", + "title": "Supported SSO Providers" } }, "level": "h3", "level_title": "Supported SSO Providers" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-benefits-of-sso-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-benefits-of-sso", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/sso-and-authentication", @@ -6797,23 +6757,23 @@ ], "authed": false, "type": "markdown", - "hash": "#benefits-of-sso-", + "hash": "#benefits-of-sso", "content": "Enhanced security with centralized authentication\n\nSimplified user management\n\nImproved user experience with reduced password fatigue\n\nStreamlined onboarding and offboarding processes", "hierarchy": { "h2": { - "id": "benefits-of-sso-", - "title": "Benefits of SSO " + "id": "benefits-of-sso", + "title": "Benefits of SSO" }, "h3": { - "id": "benefits-of-sso-", - "title": "Benefits of SSO " + "id": "benefits-of-sso", + "title": "Benefits of SSO" } }, "level": "h3", "level_title": "Benefits of SSO" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-setting-up-sso-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-setting-up-sso", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/sso-and-authentication", @@ -6836,23 +6796,23 @@ ], "authed": false, "type": "markdown", - "hash": "#setting-up-sso-", + "hash": "#setting-up-sso", "content": "To set up SSO for your organization:\nContact our sales team to enable SSO for your account\n\nChoose your identity provider\n\nConfigure the connection between Humanloop and your identity provider\n\nTest the SSO integration\n\nRoll out to your users", "hierarchy": { "h2": { - "id": "setting-up-sso-", - "title": "Setting up SSO " + "id": "setting-up-sso", + "title": "Setting up SSO" }, "h3": { - "id": "setting-up-sso-", - "title": "Setting up SSO " + "id": "setting-up-sso", + "title": "Setting up SSO" } }, "level": "h3", "level_title": "Setting up SSO" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-multi-factor-authentication-mfa-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-multi-factor-authentication-mfa", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/sso-and-authentication", @@ -6875,19 +6835,19 @@ ], "authed": false, "type": "markdown", - "hash": "#multi-factor-authentication-mfa-", + "hash": "#multi-factor-authentication-mfa", "content": "For accounts not using SSO, we strongly recommend enabling Multi-Factor Authentication for an additional layer of security.", "hierarchy": { "h2": { - "id": "multi-factor-authentication-mfa-", - "title": "Multi-Factor Authentication (MFA) " + "id": "multi-factor-authentication-mfa", + "title": "Multi-Factor Authentication (MFA)" } }, "level": "h2", "level_title": "Multi-Factor Authentication (MFA)" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-mfa-options-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-mfa-options", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/sso-and-authentication", @@ -6910,23 +6870,23 @@ ], "authed": false, "type": "markdown", - "hash": "#mfa-options-", + "hash": "#mfa-options", "content": "Time-based One-Time Password (TOTP) apps\n\nSMS-based verification\n\nHardware security keys (e.g., YubiKey)", "hierarchy": { "h2": { - "id": "mfa-options-", - "title": "MFA Options " + "id": "mfa-options", + "title": "MFA Options" }, "h3": { - "id": "mfa-options-", - "title": "MFA Options " + "id": "mfa-options", + "title": "MFA Options" } }, "level": "h3", "level_title": "MFA Options" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-api-authentication-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-api-authentication", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/sso-and-authentication", @@ -6949,19 +6909,19 @@ ], "authed": false, "type": "markdown", - "hash": "#api-authentication-", + "hash": "#api-authentication", "content": "For programmatic access to Humanloop, we use API keys. These should be kept secure and rotated regularly.", "hierarchy": { "h2": { - "id": "api-authentication-", - "title": "API Authentication " + "id": "api-authentication", + "title": "API Authentication" } }, "level": "h2", "level_title": "API Authentication" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-managing-api-keys-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-managing-api-keys", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/sso-and-authentication", @@ -6984,23 +6944,23 @@ ], "authed": false, "type": "markdown", - "hash": "#managing-api-keys-", + "hash": "#managing-api-keys", "content": "Generate API keys in your account settings\n\nUse environment variables to store API keys in your applications\n\nImplement key rotation policies for enhanced security", "hierarchy": { "h2": { - "id": "managing-api-keys-", - "title": "Managing API Keys " + "id": "managing-api-keys", + "title": "Managing API Keys" }, "h3": { - "id": "managing-api-keys-", - "title": "Managing API Keys " + "id": "managing-api-keys", + "title": "Managing API Keys" } }, "level": "h3", "level_title": "Managing API Keys" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-user-provisioning-and-deprovisioning-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-user-provisioning-and-deprovisioning", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/sso-and-authentication", @@ -7023,19 +6983,19 @@ ], "authed": false, "type": "markdown", - "hash": "#user-provisioning-and-deprovisioning-", + "hash": "#user-provisioning-and-deprovisioning", "content": "Humanloop supports automated user lifecycle management through our Directory Sync feature. This allows for:\nAutomatic user creation based on directory group membership\n\nReal-time updates to user attributes and permissions\n\nImmediate deprovisioning when users are removed from directory groups", "hierarchy": { "h2": { - "id": "user-provisioning-and-deprovisioning-", - "title": "User Provisioning and Deprovisioning " + "id": "user-provisioning-and-deprovisioning", + "title": "User Provisioning and Deprovisioning" } }, "level": "h2", "level_title": "User Provisioning and Deprovisioning" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-best-practices-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-best-practices", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/sso-and-authentication", @@ -7058,19 +7018,19 @@ ], "authed": false, "type": "markdown", - "hash": "#best-practices-", + "hash": "#best-practices", "content": "Use SSO when possible for centralized access control\n\nEnable MFA for all user accounts\n\nRegularly audit user access and permissions\n\nImplement the principle of least privilege\n\nUse secure protocols (HTTPS) for all communications with Humanloop\n\n\nFor more information on setting up SSO or other authentication methods, please contact our support team or refer to our API documentation.", "hierarchy": { "h2": { - "id": "best-practices-", - "title": "Best Practices " + "id": "best-practices", + "title": "Best Practices" } }, "level": "h2", "level_title": "Best Practices" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-active-directory-sync-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.sso-and-authentication-active-directory-sync", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/sso-and-authentication", @@ -7093,12 +7053,12 @@ ], "authed": false, "type": "markdown", - "hash": "#active-directory-sync-", + "hash": "#active-directory-sync", "content": "Humanloop supports Active Directory Sync for automated user provisioning and deprovisioning. This feature allows you to:\nAutomatically create and update user accounts based on your Active Directory groups\n\nSync user attributes and roles in real-time\n\nInstantly deprovision access when users are removed from AD groups\n\nMaintain consistent access control across your organization\n\nReduce manual user management tasks and potential security risks\n\n\nTo set up Active Directory Sync:\nContact our sales team to enable this feature for your account\n\nConfigure the connection between Humanloop and your Active Directory\n\nMap your AD groups to Humanloop roles and permissions\n\nTest the sync process with a small group of users\n\nRoll out to your entire organization\n\n\nFor more information on implementing Active Directory Sync, please contact our support team.", "hierarchy": { "h2": { - "id": "active-directory-sync-", - "title": "Active Directory Sync " + "id": "active-directory-sync", + "title": "Active Directory Sync" } }, "level": "h2", @@ -7133,11 +7093,10 @@ "authed": false, "type": "markdown", "description": "Inviting people to your organization allows them to interact with your Humanloop projects.\nHow to invite collaborators to your Humanloop organization.", - "content": "Inviting people to your organization allows them to interact with your Humanloop projects:\nTeammates will be able to create new model configs and experiments\n\nDevelopers will be able to get an API key to interact with projects through the SDK\n\nAnnotators may provide feedback on logged datapoints using the Data tab (in addition to feedback captured from your end-users via the SDK feedback integration)", - "code_snippets": [] + "content": "Inviting people to your organization allows them to interact with your Humanloop projects:\nTeammates will be able to create new model configs and experiments\n\nDevelopers will be able to get an API key to interact with projects through the SDK\n\nAnnotators may provide feedback on logged datapoints using the Data tab (in addition to feedback captured from your end-users via the SDK feedback integration)" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.guides.invite-collaborators-invite-users-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.guides.invite-collaborators-invite-users", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/guides/invite-collaborators", @@ -7164,12 +7123,12 @@ ], "authed": false, "type": "markdown", - "hash": "#invite-users-", + "hash": "#invite-users", "content": "To invite users to your organization:\n\n\nGo to your organization's Members page\nEnter the email address\nEnter the email of the person you wish to invite into the Invite members box.\n\n\nClick Send invite.\nAn email will be sent to the entered email address, inviting them to the organization. If the entered email address is not already a Humanloop user, they will be prompted to create an account before being added to the organization.\n🎉 Once they create an account, they can view your projects at the same URL to begin collaborating.", "hierarchy": { "h2": { - "id": "invite-users-", - "title": "Invite Users " + "id": "invite-users", + "title": "Invite Users" } }, "level": "h2", @@ -7203,11 +7162,10 @@ ], "authed": false, "type": "markdown", - "description": "How to create, share and manage you Humanloop API keys. The API keys allow you to access the Humanloop API programmatically in your app.\nAPI keys allow you to access the Humanloop API programmatically in your app.", - "code_snippets": [] + "description": "How to create, share and manage you Humanloop API keys. The API keys allow you to access the Humanloop API programmatically in your app.\nAPI keys allow you to access the Humanloop API programmatically in your app." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.guides.manage-api-keys-create-a-new-api-key-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.guides.manage-api-keys-create-a-new-api-key", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/guides/manage-api-keys", @@ -7234,19 +7192,19 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-new-api-key-", + "hash": "#create-a-new-api-key", "content": "Go to your Organization's API Keys page.\nClick the Create new API key button.\nEnter a name for your API key.\nChoose a name that helps you identify the key's purpose. You can't change the name of an API key after it's created.\nClick Create.\n\n\nCopy the generated API key\nSave it in a secure location. You will not be shown the full API key again.", "hierarchy": { "h2": { - "id": "create-a-new-api-key-", - "title": "Create a new API key " + "id": "create-a-new-api-key", + "title": "Create a new API key" } }, "level": "h2", "level_title": "Create a new API key" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.guides.manage-api-keys-revoke-an-api-key-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.guides.manage-api-keys-revoke-an-api-key", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/guides/manage-api-keys", @@ -7273,12 +7231,12 @@ ], "authed": false, "type": "markdown", - "hash": "#revoke-an-api-key-", + "hash": "#revoke-an-api-key", "content": "You can revoke an existing API key if it is no longer needed.\n\n\nWhen an API key is revoked, future API requests that use this key will be\nrejected. Any systems that are dependent on this key will no longer work.\n\n\nGo to API keys page\nGo to your Organization's API Keys\npage.\nIdentify the API key\nFind the key you wish to revoke by its name or by the displayed trailing characters.\nClick 'Revoke'\nClick the three dots button on the right of its row to open its menu.\nClick Revoke.\nA confirmation dialog will be displayed. Click Remove.", "hierarchy": { "h2": { - "id": "revoke-an-api-key-", - "title": "Revoke an API key " + "id": "revoke-an-api-key", + "title": "Revoke an API key" } }, "level": "h2", @@ -7312,11 +7270,10 @@ ], "authed": false, "type": "markdown", - "description": "How to create and manage environments for your organization.\nEnvironments enable you to deploy different versions of your files, enabling multiple workflows.", - "code_snippets": [] + "description": "How to create and manage environments for your organization.\nEnvironments enable you to deploy different versions of your files, enabling multiple workflows." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.guides.manage-environments-create-a-new-environment-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.guides.manage-environments-create-a-new-environment", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/guides/manage-environments", @@ -7343,19 +7300,19 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-new-environment-", + "hash": "#create-a-new-environment", "content": "Only Enterprise customers can create more than one environment.\n\n\nGo to your Organization's Environments page.\nClick the + Environment button.\nEnter a name for your environment.\nChoose a name that is relevant to the development workflow you intend to support, such as staging or development.\nClick Create.", "hierarchy": { "h2": { - "id": "create-a-new-environment-", - "title": "Create a new environment " + "id": "create-a-new-environment", + "title": "Create a new environment" } }, "level": "h2", "level_title": "Create a new environment" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.guides.manage-environments-rename-an-environment-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.admin.guides.manage-environments-rename-an-environment", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/admin/guides/manage-environments", @@ -7382,12 +7339,12 @@ ], "authed": false, "type": "markdown", - "hash": "#rename-an-environment-", + "hash": "#rename-an-environment", "content": "You can rename an environment to re-arrange your development workflows. Since each new file is automatically deployed to the default environment, which is production unless altered, it may make more sense to create a separate production environment and rename your current environments.\n\n\nRenaming the environments will take immediate effect, so ensure that this\nchange is planned and does not disrupt your production workflows.\n\n\nGo to environments page\nGo to your Organization's environments\npage.\nIdentify the environments\nFind the environments you wish to rename.\nClick 'Rename'\nClick the three dots button on the right of its row to open its menu.\nClick Rename.\nA confirmation dialog will be displayed. Update the name and click Rename.", "hierarchy": { "h2": { - "id": "rename-an-environment-", - "title": "Rename an environment " + "id": "rename-an-environment", + "title": "Rename an environment" } }, "level": "h2", @@ -7418,8 +7375,7 @@ "authed": false, "type": "markdown", "description": "Humanloop is SOC-2 compliant, offers within your VPC and never trains on your data. Learn more about our hosting options.\nHumanloop provides a range of hosting options and guarantees to meet enterprise needs.", - "content": "Humanloop offers a broad range of hosting environments to meet the security and compliance needs of enterprise customers.\nOur menu of hosting options is as follows from basic to more advanced:\nDefault: Our multi-tenanted cloud offering is SOC2 compliant and hosted in AWS US-east region on AWS.\n\nRegion specific: Same as 1, but where additional region requirements for data storage are required - e.g. data can never leave the EU for GDPR reasons. We offer UK, EU and US guarantees for data storage regions.\n\nDedicated: We provision your own dedicated instance of Humanloop in your region of choice. With the additional added benefits:\nFull HIPAA compliant AWS setup.\n\nAbility to manage your own encryption keys in KMS.\n\nAbility to subscribe to application logging and cloudtrail infrastructure monitoring.\n\n\n\nSelf-hosted: You deploy an instance of Humanloop within your own VPC on AWS. We provide an infra as code setup with Pulumi to easily spin up a Humanloop instance in your VPC.", - "code_snippets": [] + "content": "Humanloop offers a broad range of hosting environments to meet the security and compliance needs of enterprise customers.\nOur menu of hosting options is as follows from basic to more advanced:\nDefault: Our multi-tenanted cloud offering is SOC2 compliant and hosted in AWS US-east region on AWS.\n\nRegion specific: Same as 1, but where additional region requirements for data storage are required - e.g. data can never leave the EU for GDPR reasons. We offer UK, EU and US guarantees for data storage regions.\n\nDedicated: We provision your own dedicated instance of Humanloop in your region of choice. With the additional added benefits:\nFull HIPAA compliant AWS setup.\n\nAbility to manage your own encryption keys in KMS.\n\nAbility to subscribe to application logging and cloudtrail infrastructure monitoring.\n\n\n\nSelf-hosted: You deploy an instance of Humanloop within your own VPC on AWS. We provide an infra as code setup with Pulumi to easily spin up a Humanloop instance in your VPC." }, { "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.supported-models", @@ -7446,11 +7402,10 @@ "authed": false, "type": "markdown", "description": "Humanloop supports all the major large language model providers, including OpenAI, Anthropic, Google, Azure, and more. Additionally, you can use your own custom models with with the API and still benefit from the Humanloop platform.", - "content": "Humanloop supports all the major large language model providers, including OpenAI, Anthropic, Google, Azure, and more. Additionally, you can use your own custom models with with the API and still benefit from the Humanloop platform.", - "code_snippets": [] + "content": "Humanloop supports all the major large language model providers, including OpenAI, Anthropic, Google, Azure, and more. Additionally, you can use your own custom models with with the API and still benefit from the Humanloop platform." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.supported-models-providers-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.supported-models-providers", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/supported-models", @@ -7473,19 +7428,19 @@ ], "authed": false, "type": "markdown", - "hash": "#providers-", + "hash": "#providers", "content": "Here is a summary of which providers we support and whether\nProvider Models Cost information Token information \nOpenAI ✅ ✅ ✅ \nAnthropic ✅ ✅ ✅ \nGoogle ✅ ✅ ✅ \nAzure ✅ ✅ ✅ \nCohere ✅ ✅ ✅ \nLlama ✅ \nGroq ✅ \nAWS Bedrock Anthropic, Llama \nCustom ✅ User-defined User-defined \n\nAdding in more providers is driven by customer demand. If you have a specific provider or model you would like to see supported, please reach out to us at support@humanloop.com.", "hierarchy": { "h2": { - "id": "providers-", - "title": "Providers " + "id": "providers", + "title": "Providers" } }, "level": "h2", "level_title": "Providers" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.supported-models-models-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.supported-models-models", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/supported-models", @@ -7508,12 +7463,12 @@ ], "authed": false, "type": "markdown", - "hash": "#models-", + "hash": "#models", "content": "Provider Key Max Prompt Tokens Max Output Tokens Cost per Prompt Token Cost per Output Token Tool Support Image Support \nOpenAI gpt-4 8192 4096 $0.00003 $0.00006 ✅ ❌ \nOpenAI gpt-4o 128000 4096 $0.000005 $0.000015 ✅ ✅ \nOpenAI gpt-4-turbo 128000 4096 $0.00001 $0.00003 ✅ ✅ \nOpenAI gpt-4-turbo-2024-04-09 128000 4096 $0.00001 $0.00003 ✅ ❌ \nOpenAI gpt-4-0 8192 4096 $0.00003 $0.00003 ✅ ❌ \nOpenAI gpt-4-32k 32768 4096 $0.00003 $0.00003 ✅ ❌ \nOpenAI gpt-4-1106-preview 128000 4096 $0.00001 $0.00003 ✅ ❌ \nOpenAI gpt-4-0125-preview 128000 4096 $0.00001 $0.00003 ✅ ❌ \nOpenAI gpt-4-vision 128000 4096 $0.00001 $0.00003 ✅ ✅ \nOpenAI gpt-4-1106-vision-preview 16385 4096 $0.0000015 $0.000002 ✅ ❌ \nOpenAI gpt-3.5-turbo 16385 4096 $0.0000015 $0.000002 ✅ ❌ \nOpenAI gpt-3.5-turbo-instruct 8192 4097 $0.0000015 $0.000002 ✅ ❌ \nOpenAI baggage-002 16384 16384 $0.0000004 $0.0000004 ✅ ❌ \nOpenAI davinci-002 16384 16384 $0.000002 $0.000002 ✅ ❌ \nOpenAI ft:gpt-3.5-turbo 4097 4096 $0.000003 $0.000006 ✅ ❌ \nOpenAI ft:davinci-002 16384 16384 $0.000002 $0.000002 ✅ ❌ \nOpenAI text-moderation 32768 32768 $0.000003 $0.000004 ✅ ❌ \nAnthropic claude-3-opus-20240229 200000 4096 $0.000015 $0.000075 ✅ ❌ \nAnthropic claude-3-sonnet-20240229 200000 4096 $0.000003 $0.000015 ✅ ❌ \nAnthropic claude-3-haiku-20240307 200000 4096 $0.00000025 $0.00000125 ✅ ❌ \nAnthropic claude-2.1 100000 4096 $0.00000025 $0.000024 ❌ ❌ \nAnthropic claude-2 100000 4096 $0.000008 $0.000024 ❌ ❌ \nAnthropic claude-instant-1.2 100000 4096 $0.000008 $0.000024 ❌ ❌ \nAnthropic claude-instant-1 100000 4096 $0.0000008 $0.0000024 ❌ ❌ \nGroq mixtral-8x7b-32768 32768 32768 $0.0 $0.0 ❌ ❌ \nGroq llama3-8b-8192 8192 8192 $0.0 $0.0 ❌ ❌ \nGroq llama3-70b-8192 8192 8192 $0.0 $0.0 ❌ ❌ \nGroq llama2-70b-4096 4096 4096 $0.0 $0.0 ❌ ❌ \nGroq gemma-7b-it 8192 8192 $0.0 $0.0 ❌ ❌ \nReplicate llama-3-70b-instruct 8192 8192 $0.00000065 $0.00000275 ❌ ❌ \nReplicate llama-3-70b 8192 8192 $0.00000065 $0.00000275 ❌ ❌ \nReplicate llama-3-8b-instruct 8192 8192 $0.00000005 $0.00000025 ❌ ❌ \nReplicate llama-3-8b 8192 8192 $0.00000005 $0.00000025 ❌ ❌ \nReplicate llama-2-70b 4096 4096 $0.00003 $0.00006 ❌ ❌ \nReplicate llama70b-v2 4096 4096 N/A N/A ❌ ❌ \nReplicate mixtral-8x7b 4096 4096 N/A N/A ❌ ❌ \nOpenAI_Azure gpt-4o 128000 4096 $0.000005 $0.000015 ✅ ✅ \nOpenAI_Azure gpt-4o-2024-05-13 128000 4096 $0.000005 $0.000015 ✅ ✅ \nOpenAI_Azure gpt-4-turbo-2024-04-09 128000 4096 $0.00003 $0.00006 ✅ ✅ \nOpenAI_Azure gpt-4 8192 4096 $0.00003 $0.00006 ✅ ❌ \nOpenAI_Azure gpt-4-0314 8192 4096 $0.00003 $0.00006 ✅ ❌ \nOpenAI_Azure gpt-4-32k 32768 4096 $0.00006 $0.00012 ✅ ❌ \nOpenAI_Azure gpt-4-0125 128000 4096 $0.00001 $0.00003 ✅ ❌ \nOpenAI_Azure gpt-4-1106 128000 4096 $0.00001 $0.00003 ✅ ❌ \nOpenAI_Azure gpt-4-0613 8192 4096 $0.00003 $0.00006 ✅ ❌ \nOpenAI_Azure gpt-4-turbo 128000 4096 $0.00001 $0.00003 ✅ ❌ \nOpenAI_Azure gpt-4-turbo-vision 128000 4096 $0.000003 $0.000004 ✅ ✅ \nOpenAI_Azure gpt-4-vision 128000 4096 $0.000003 $0.000004 ✅ ✅ \nOpenAI_Azure gpt-35-turbo-1106 16384 4096 $0.0000015 $0.000002 ✅ ❌ \nOpenAI_Azure gpt-35-turbo-0125 16384 4096 $0.0000005 $0.0000015 ✅ ❌ \nOpenAI_Azure gpt-35-turbo-16k 16384 4096 $0.000003 $0.000004 ✅ ❌ \nOpenAI_Azure gpt-35-turbo 4097 4096 $0.0000015 $0.000002 ✅ ❌ \nOpenAI_Azure gpt-3.5-turbo-instruct 4097 4096 $0.0000015 $0.000002 ✅ ❌ \nOpenAI_Azure gpt-35-turbo-instruct 4097 4097 $0.0000015 $0.000002 ✅ ❌ \nCohere command-r 128000 4000 $0.0000005 $0.0000015 ❌ ❌ \nCohere command-light 4096 4096 $0.000015 $0.000015 ❌ ❌ \nCohere command-r-plus 128000 4000 $0.000003 $0.000015 ❌ ❌ \nCohere command-nightly 4096 4096 $0.000015 $0.000015 ❌ ❌ \nCohere command 4096 4096 $0.000015 $0.000015 ❌ ❌ \nCohere command-medium-beta 4096 4096 $0.000015 $0.000015 ❌ ❌ \nCohere command-xlarge-beta 4096 4096 $0.000015 $0.000015 ❌ ❌ \nGoogle gemini-pro-vision 16384 2048 $0.00000025 $0.0000005 ❌ ✅ \nGoogle gemini-1.0-pro-vision 16384 2048 $0.00000025 $0.0000005 ❌ ✅ \nGoogle gemini-pro 32760 8192 $0.00000025 $0.0000005 ❌ ❌ \nGoogle gemini-1.0-pro 32760 8192 $0.00000025 $0.0000005 ❌ ❌ \nGoogle gemini-1.5-pro-latest 1000000 8192 $0.00000025 $0.0000005 ❌ ❌ \nGoogle gemini-1.5-pro 1000000 8192 $0.00000025 $0.0000005 ❌ ❌ \nGoogle gemini-experimental 1000000 8192 $0.00000025 $0.0000005 ❌ ❌", "hierarchy": { "h2": { - "id": "models-", - "title": "Models " + "id": "models", + "title": "Models" } }, "level": "h2", @@ -7544,11 +7499,10 @@ "authed": false, "type": "markdown", "description": "The .prompt file format is a human-readable and version-control-friendly format for storing model configurations.\nOur file format for serialising prompts to store alongside your source code.", - "content": "Our .prompt file format is a serialized version of a model config that is designed to be human-readable and suitable for checking into your version control systems alongside your code.", - "code_snippets": [] + "content": "Our .prompt file format is a serialized version of a model config that is designed to be human-readable and suitable for checking into your version control systems alongside your code." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.prompt-file-format-format-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.prompt-file-format-format", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/prompt-file-format", @@ -7571,19 +7525,19 @@ ], "authed": false, "type": "markdown", - "hash": "#format-", + "hash": "#format", "content": "The .prompt file is heavily inspired by MDX, with model and hyperparameters specified in a YAML header alongside a JSX-inspired format for your Chat Template.", "hierarchy": { "h2": { - "id": "format-", - "title": "Format " + "id": "format", + "title": "Format" } }, "level": "h2", "level_title": "Format" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.prompt-file-format-basic-examples-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.prompt-file-format-basic-examples", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/prompt-file-format", @@ -7606,8 +7560,7 @@ ], "authed": false, "type": "markdown", - "hash": "#basic-examples-", - "content": "", + "hash": "#basic-examples", "code_snippets": [ { "lang": "jsx", @@ -7632,19 +7585,19 @@ ], "hierarchy": { "h2": { - "id": "basic-examples-", - "title": "Basic examples " + "id": "basic-examples", + "title": "Basic examples" }, "h3": { - "id": "basic-examples-", - "title": "Basic examples " + "id": "basic-examples", + "title": "Basic examples" } }, "level": "h3", "level_title": "Basic examples" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.prompt-file-format-multi-modality-and-images-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.prompt-file-format-multi-modality-and-images", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/prompt-file-format", @@ -7667,7 +7620,7 @@ ], "authed": false, "type": "markdown", - "hash": "#multi-modality-and-images-", + "hash": "#multi-modality-and-images", "content": "Images can be specified using nested tags within a message. To specify text alongside the image, use a tag.", "code_snippets": [ { @@ -7678,19 +7631,19 @@ ], "hierarchy": { "h2": { - "id": "multi-modality-and-images-", - "title": "Multi-modality and Images " + "id": "multi-modality-and-images", + "title": "Multi-modality and Images" }, "h3": { - "id": "multi-modality-and-images-", - "title": "Multi-modality and Images " + "id": "multi-modality-and-images", + "title": "Multi-modality and Images" } }, "level": "h3", "level_title": "Multi-modality and Images" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.prompt-file-format-tools-tool-calls-and-tool-responses-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.prompt-file-format-tools-tool-calls-and-tool-responses", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/prompt-file-format", @@ -7713,7 +7666,7 @@ ], "authed": false, "type": "markdown", - "hash": "#tools-tool-calls-and-tool-responses-", + "hash": "#tools-tool-calls-and-tool-responses", "content": "Specify the tools available to the model as a JSON list in the YAML header.\nTool calls in assistant messages can be added with nested tags. A tag within an tag denotes a tool call of type: \"function\", and requires the attributes name and id. The text wrapped in a tag should be a JSON-formatted string containing the tool call's arguments.\nTool call responses can then be added with tags after the message.", "code_snippets": [ { @@ -7726,12 +7679,12 @@ ], "hierarchy": { "h2": { - "id": "tools-tool-calls-and-tool-responses-", - "title": "Tools, tool calls and tool responses " + "id": "tools-tool-calls-and-tool-responses", + "title": "Tools, tool calls and tool responses" }, "h3": { - "id": "tools-tool-calls-and-tool-responses-", - "title": "Tools, tool calls and tool responses " + "id": "tools-tool-calls-and-tool-responses", + "title": "Tools, tool calls and tool responses" } }, "level": "h3", @@ -7762,11 +7715,10 @@ "authed": false, "type": "markdown", "description": "Example projects demonstrating usage of Humanloop for prompt management, observability, and evaluation.\nA growing collection of example projects demonstrating usage of Humanloop.", - "content": "Visit our Github examples repo for a collection of usage examples of Humanloop.", - "code_snippets": [] + "content": "Visit our Github examples repo for a collection of usage examples of Humanloop." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.example-projects-contents-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.example-projects-contents", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/example-projects", @@ -7789,12 +7741,12 @@ ], "authed": false, "type": "markdown", - "hash": "#contents-", + "hash": "#contents", "content": "Github Description SDK Chat Logging Tool Calling Streaming \nchatbot-starter An open-source AI chatbot app template built with Next.js, the Vercel AI SDK, OpenAI, and Humanloop. TypeScript ✔️ ✔️ ✔️ \nasap CLI assistant for solving dev issues in your projects or the command line. TypeScript ✔️ ✔️ ✔️", "hierarchy": { "h2": { - "id": "contents-", - "title": "Contents " + "id": "contents", + "title": "Contents" } }, "level": "h2", @@ -7825,11 +7777,10 @@ "authed": false, "type": "markdown", "description": "This reference provides details about the Python environment and supported packages.\nHumanloop provides a secure Python runtime to support defining code based Evaluator and Tool implementations.", - "content": "Humanloop allows you to specify the runtime for your code Evaluators and Tool implementations in order\nto run them natively with your Prompts in our Editor and UI based Evaluation workflows.", - "code_snippets": [] + "content": "Humanloop allows you to specify the runtime for your code Evaluators and Tool implementations in order\nto run them natively with your Prompts in our Editor and UI based Evaluation workflows." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.python-environment-environment-details-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.python-environment-environment-details", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/python-environment", @@ -7852,7 +7803,7 @@ ], "authed": false, "type": "markdown", - "hash": "#environment-details-", + "hash": "#environment-details", "content": "Python version: 3.11.4\nIf you have any specific packages you would like to see here, please let us know at support@humanloop.com.", "code_snippets": [ { @@ -7861,8 +7812,8 @@ ], "hierarchy": { "h2": { - "id": "environment-details-", - "title": "Environment details " + "id": "environment-details", + "title": "Environment details" } }, "level": "h2", @@ -7893,11 +7844,10 @@ "authed": false, "type": "markdown", "description": "Explore Humanloop's native, API, and third-party integrations to seamlessly connect with other tools and services, improving efficiency and expanding functionality.\nHumanloop offers a variety of integrations to enhance your workflow and extend the platform's capabilities.", - "content": "Humanloop offers a variety of integrations to enhance your workflow and extend the platform's capabilities. These integrations allow you to seamlessly connect Humanloop with other tools and services, improving efficiency and expanding functionality.", - "code_snippets": [] + "content": "Humanloop offers a variety of integrations to enhance your workflow and extend the platform's capabilities. These integrations allow you to seamlessly connect Humanloop with other tools and services, improving efficiency and expanding functionality." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.integrations-native-integrations-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.integrations-native-integrations", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/integrations", @@ -7920,19 +7870,19 @@ ], "authed": false, "type": "markdown", - "hash": "#native-integrations-", + "hash": "#native-integrations", "content": "These integrations are built directly into Humanloop and offer seamless, out-of-the-box connectivity:\nGit: Integrate your Git repositories (GitHub, GitLab, Bitbucket) with Humanloop for syncronized version control and collaboration.\n\nPinecone Search: Perform vector similarity searches using Pinecone vector DB and OpenAI embeddings.\n\nPostman: Simplify API testing and development with Postman integration.\n\nZapier: Automate workflows by connecting Humanloop with thousands of apps.\n\nWorkOS: Streamline enterprise features like Single Sign-On (SSO) and directory sync.", "hierarchy": { "h2": { - "id": "native-integrations-", - "title": "Native Integrations: " + "id": "native-integrations", + "title": "Native Integrations:" } }, "level": "h2", "level_title": "Native Integrations:" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.integrations-api-integrations-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.integrations-api-integrations", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/integrations", @@ -7955,19 +7905,19 @@ ], "authed": false, "type": "markdown", - "hash": "#api-integrations-", + "hash": "#api-integrations", "content": "Expand Humanloop's capabilities with these API-based integrations:\nGoogle Search - Access Google search results via the SerpAPI.\n\nGET API - Send GET requests to external APIs directly from Humanloop.", "hierarchy": { "h2": { - "id": "api-integrations-", - "title": "API Integrations " + "id": "api-integrations", + "title": "API Integrations" } }, "level": "h2", "level_title": "API Integrations" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.integrations-third-party-integrations-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.integrations-third-party-integrations", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/integrations", @@ -7990,19 +7940,19 @@ ], "authed": false, "type": "markdown", - "hash": "#third-party-integrations-", + "hash": "#third-party-integrations", "content": "Leverage Humanloop's API to create custom integrations with other platforms and services. Explore the following resources to get started:\nAPI Reference Guide: Comprehensive documentation of Humanloop's API endpoints.\n\nSDK Overview: Information on available SDKs for easier integration.\n\nTool Usage: Learn how to extend Humanloop's functionality with custom tools.", "hierarchy": { "h2": { - "id": "third-party-integrations-", - "title": "Third-Party Integrations: " + "id": "third-party-integrations", + "title": "Third-Party Integrations:" } }, "level": "h2", "level_title": "Third-Party Integrations:" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.integrations-benefits-of-integrations-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.integrations-benefits-of-integrations", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/integrations", @@ -8025,12 +7975,12 @@ ], "authed": false, "type": "markdown", - "hash": "#benefits-of-integrations-", + "hash": "#benefits-of-integrations", "content": "Streamline workflows by connecting Humanloop with your existing tools\n\nExtend Humanloop's capabilities with additional data sources and services\n\nAutomate tasks and reduce manual work\n\nCustomize Humanloop to fit your specific use case and requirements\n\n\nFor assistance with integrations or to request a new integration, please contact our support team at support@humanloop.com", "hierarchy": { "h2": { - "id": "benefits-of-integrations-", - "title": "Benefits of Integrations " + "id": "benefits-of-integrations", + "title": "Benefits of Integrations" } }, "level": "h2", @@ -8061,11 +8011,10 @@ "authed": false, "type": "markdown", "description": "Learn about Humanloop's commitment to security, data protection, and compliance with industry standards.\nAn overview of Humanloop's security and compliance measures", - "content": "Humanloop is deeply committed to AI governance, security, and compliance. View our Trust Report and Policy Pages to see all of our certifications, request documentation, and view high-level details on the controls we adhere to.\nHumanloop never trains on user data.", - "code_snippets": [] + "content": "Humanloop is deeply committed to AI governance, security, and compliance. View our Trust Report and Policy Pages to see all of our certifications, request documentation, and view high-level details on the controls we adhere to.\nHumanloop never trains on user data." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-humanloop-security-offerings-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-humanloop-security-offerings", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/security-and-compliance", @@ -8088,19 +8037,19 @@ ], "authed": false, "type": "markdown", - "hash": "#humanloop-security-offerings-", + "hash": "#humanloop-security-offerings", "content": "Data Privacy and Security\nActivate LLMs with your private data, safely and securely. You own your data and models.\n\n\n\nMonitoring & Support\nEnd-to-end monitoring of your AI applications, support guarantees from trusted AI experts.\n\n\n\nData Encryption\n\nData Management & AI Governance", "hierarchy": { "h2": { - "id": "humanloop-security-offerings-", - "title": "Humanloop Security Offerings: " + "id": "humanloop-security-offerings", + "title": "Humanloop Security Offerings:" } }, "level": "h2", "level_title": "Humanloop Security Offerings:" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-authentication--access-control---humanloop-web-app-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-authentication--access-control---humanloop-web-app", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/security-and-compliance", @@ -8123,23 +8072,23 @@ ], "authed": false, "type": "markdown", - "hash": "#authentication--access-control---humanloop-web-app-", + "hash": "#authentication--access-control---humanloop-web-app", "content": "All users of the Humanloop web application require a valid email address and password to use the system:\nEmail addresses are verified on account creation.\n\nPasswords are verified as sufficiently complex.\n\nPasswords are stored using a one-way salted hash.\n\nUser access logs are maintained including date, time, user ID, relevant URL, operation performed, and source IP address for audit purposes.", "hierarchy": { "h2": { - "id": "authentication--access-control---humanloop-web-app-", - "title": "Authentication & Access Control - Humanloop Web App " + "id": "authentication--access-control---humanloop-web-app", + "title": "Authentication & Access Control - Humanloop Web App" }, "h3": { - "id": "authentication--access-control---humanloop-web-app-", - "title": "Authentication & Access Control - Humanloop Web App " + "id": "authentication--access-control---humanloop-web-app", + "title": "Authentication & Access Control - Humanloop Web App" } }, "level": "h3", "level_title": "Authentication & Access Control - Humanloop Web App" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-authentication--access-control---humanloop-api-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-authentication--access-control---humanloop-api", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/security-and-compliance", @@ -8162,23 +8111,23 @@ ], "authed": false, "type": "markdown", - "hash": "#authentication--access-control---humanloop-api-", + "hash": "#authentication--access-control---humanloop-api", "content": "All users of the API are required to authenticate with a unique API token header:\nFollows the OAuth 2.0 pattern.\n\nAPI tokens are only visible once on creation and then obfuscated.\n\nUsers can manage the expiry of API keys.\n\nAPI token access logs are maintained including date, time, user ID, relevant URL, operation performed, and source IP address for audit purposes.", "hierarchy": { "h2": { - "id": "authentication--access-control---humanloop-api-", - "title": "Authentication & Access Control - Humanloop API " + "id": "authentication--access-control---humanloop-api", + "title": "Authentication & Access Control - Humanloop API" }, "h3": { - "id": "authentication--access-control---humanloop-api-", - "title": "Authentication & Access Control - Humanloop API " + "id": "authentication--access-control---humanloop-api", + "title": "Authentication & Access Control - Humanloop API" } }, "level": "h3", "level_title": "Authentication & Access Control - Humanloop API" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-additional-resources-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-additional-resources", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/security-and-compliance", @@ -8201,23 +8150,23 @@ ], "authed": false, "type": "markdown", - "hash": "#additional-resources-", + "hash": "#additional-resources", "content": "Role-based access control (RBAC) - We implement strict role-based access control (RBAC) for all our systems.\n\nMulti-factor authentication (MFA) - MFA is enforced for all employee accounts.", "hierarchy": { "h2": { - "id": "additional-resources-", - "title": "Additional Resources " + "id": "additional-resources", + "title": "Additional Resources" }, "h3": { - "id": "additional-resources-", - "title": "Additional Resources " + "id": "additional-resources", + "title": "Additional Resources" } }, "level": "h3", "level_title": "Additional Resources" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-encryption-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-encryption", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/security-and-compliance", @@ -8240,23 +8189,23 @@ ], "authed": false, "type": "markdown", - "hash": "#encryption-", + "hash": "#encryption", "content": "Humanloop follows best practices for data management and encryption. All data in transit is secured with TLS/SSL, and all data at rest is encrypted using the AES-256 algorithm. All encryption keys are managed using AWS Key Management Service (KMS) as part of the VPC definition.\nAll data in transit is encrypted using TLS 1.2 or higher.\n\nData at rest is encrypted using AES-256 encryption.", "hierarchy": { "h2": { - "id": "encryption-", - "title": "Encryption " + "id": "encryption", + "title": "Encryption" }, "h3": { - "id": "encryption-", - "title": "Encryption " + "id": "encryption", + "title": "Encryption" } }, "level": "h3", "level_title": "Encryption" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-infrastructure-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-infrastructure", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/security-and-compliance", @@ -8279,23 +8228,23 @@ ], "authed": false, "type": "markdown", - "hash": "#infrastructure-", + "hash": "#infrastructure", "content": "All sensitive data is encrypted in transit. For Self-Hosted Cloud (VPC) environments, network traffic is also encrypted in transit and at rest to meet HIPAA requirements. Sensitive application data is only ever processed within the ECS cluster and stored in Aurora. To request a network infrastructure diagram or more information, please contact privacy@humanloop.com.\nLearn More\nFor more information about how Humanloop processes user data, visit our Data Management & Hosting Options page.", "hierarchy": { "h2": { - "id": "infrastructure-", - "title": "Infrastructure " + "id": "infrastructure", + "title": "Infrastructure" }, "h3": { - "id": "infrastructure-", - "title": "Infrastructure " + "id": "infrastructure", + "title": "Infrastructure" } }, "level": "h3", "level_title": "Infrastructure" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-soc2-type-ii-compliance-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-soc2-type-ii-compliance", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/security-and-compliance", @@ -8318,23 +8267,23 @@ ], "authed": false, "type": "markdown", - "hash": "#soc2-type-ii-compliance-", + "hash": "#soc2-type-ii-compliance", "content": "Humanloop is fully SOC2 Type II compliant. Learn more via our Trust Center and our Security Policy page.", "hierarchy": { "h2": { - "id": "soc2-type-ii-compliance-", - "title": "SOC2 Type II Compliance " + "id": "soc2-type-ii-compliance", + "title": "SOC2 Type II Compliance" }, "h3": { - "id": "soc2-type-ii-compliance-", - "title": "SOC2 Type II Compliance " + "id": "soc2-type-ii-compliance", + "title": "SOC2 Type II Compliance" } }, "level": "h3", "level_title": "SOC2 Type II Compliance" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-hipaa-compliance-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-hipaa-compliance", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/security-and-compliance", @@ -8357,23 +8306,23 @@ ], "authed": false, "type": "markdown", - "hash": "#hipaa-compliance-", + "hash": "#hipaa-compliance", "content": "Humanloop actively works with paying customers to help them achieve HIPAA compliance. Official certification is pending.\nTo request references or more information, contact sales@humanloop.com.\nHIPAA Compliance via Hosting Environment:\nHumanloop offers dedicated platform instances on AWS with HIPAA provisions for enterprise customers that have particularly sensitive data. These provisions include:\nThe ability for enterprises to manage their own encryption keys.\n\nA specific AWS Fargate deployment that follows HIPAA practices.", "hierarchy": { "h2": { - "id": "hipaa-compliance-", - "title": "HIPAA Compliance " + "id": "hipaa-compliance", + "title": "HIPAA Compliance" }, "h3": { - "id": "hipaa-compliance-", - "title": "HIPAA Compliance " + "id": "hipaa-compliance", + "title": "HIPAA Compliance" } }, "level": "h3", "level_title": "HIPAA Compliance" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-gdpr-compliance-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-gdpr-compliance", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/security-and-compliance", @@ -8396,23 +8345,23 @@ ], "authed": false, "type": "markdown", - "hash": "#gdpr-compliance-", + "hash": "#gdpr-compliance", "content": "We are fully compliant with the General Data Protection Regulation (GDPR). This includes:\nData minimization practices\n\nUser rights management\n\nData processing agreements", "hierarchy": { "h2": { - "id": "gdpr-compliance-", - "title": "GDPR Compliance " + "id": "gdpr-compliance", + "title": "GDPR Compliance" }, "h3": { - "id": "gdpr-compliance-", - "title": "GDPR Compliance " + "id": "gdpr-compliance", + "title": "GDPR Compliance" } }, "level": "h3", "level_title": "GDPR Compliance" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-how-humanloop-helps-customers-maintain-compliance-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-how-humanloop-helps-customers-maintain-compliance", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/security-and-compliance", @@ -8435,19 +8384,19 @@ ], "authed": false, "type": "markdown", - "hash": "#how-humanloop-helps-customers-maintain-compliance-", + "hash": "#how-humanloop-helps-customers-maintain-compliance", "content": "Self-Hosted Cloud (VPC) environments\n\nData Processing Agreements (DPAs)\n\nData Minimization and Retention Policies\n\nRole-Based Access Controls\n\nData Encryption\n\nRobust Security Measures\n\nIncident Response Plan SLAs\n\nRegular Training & Audits", "hierarchy": { "h2": { - "id": "how-humanloop-helps-customers-maintain-compliance-", - "title": "How Humanloop helps customers maintain compliance: " + "id": "how-humanloop-helps-customers-maintain-compliance", + "title": "How Humanloop helps customers maintain compliance:" } }, "level": "h2", "level_title": "How Humanloop helps customers maintain compliance:" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-learn-more-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.security-and-compliance-learn-more", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/security-and-compliance", @@ -8470,16 +8419,16 @@ ], "authed": false, "type": "markdown", - "hash": "#learn-more-", + "hash": "#learn-more", "content": "Cloud Hosting Options\n\nData Management Protocols\n\nSecurity Policy\n\nPrivacy Policy\n\nTrust Center\n\n\nTo request references or more information, contact sales@humanloop.com", "hierarchy": { "h2": { - "id": "learn-more-", - "title": "Learn more: " + "id": "learn-more", + "title": "Learn more:" }, "h3": { - "id": "learn-more-", - "title": "Learn more: " + "id": "learn-more", + "title": "Learn more:" } }, "level": "h3", @@ -8509,11 +8458,10 @@ ], "authed": false, "type": "markdown", - "description": "Discover Humanloop's robust data management practices and state-of-the-art encryption methods ensuring maximum security and compliance for AI applications.\nAn overview of the data management practices and encryption methodologies used by Humanloop", - "code_snippets": [] + "description": "Discover Humanloop's robust data management practices and state-of-the-art encryption methods ensuring maximum security and compliance for AI applications.\nAn overview of the data management practices and encryption methodologies used by Humanloop" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-data-handling-and-segregation-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-data-handling-and-segregation", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/data-management", @@ -8536,19 +8484,19 @@ ], "authed": false, "type": "markdown", - "hash": "#data-handling-and-segregation-", + "hash": "#data-handling-and-segregation", "content": "Separate environments are provisioned and maintained for development, quality assurance/user acceptance testing, and production to ensure data segregation at the environment level.", "hierarchy": { "h3": { - "id": "data-handling-and-segregation-", - "title": "Data Handling and Segregation " + "id": "data-handling-and-segregation", + "title": "Data Handling and Segregation" } }, "level": "h3", "level_title": "Data Handling and Segregation" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-data-classification--access-control-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-data-classification--access-control", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/data-management", @@ -8571,19 +8519,19 @@ ], "authed": false, "type": "markdown", - "hash": "#data-classification--access-control-", + "hash": "#data-classification--access-control", "content": "All platform data received from the user and data derived from user data is classified as sensitive. All platform audit and telemetry data that does not contain PII and reference to specific user data is classified as not sensitive.\nBy default, only authenticated users can see their own sensitive data. Data classified as not sensitive can be accessed by dedicated Humanloop support staff using a secure VPN connection to the private network of the VPC for the target environment. This access is for debugging issues and improving system performance. The Terms of Service define further details around data ownership and access on a case-by-case basis.", "hierarchy": { "h3": { - "id": "data-classification--access-control-", - "title": "Data Classification & Access Control " + "id": "data-classification--access-control", + "title": "Data Classification & Access Control" } }, "level": "h3", "level_title": "Data Classification & Access Control" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-encryption-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-encryption", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/data-management", @@ -8606,23 +8554,23 @@ ], "authed": false, "type": "markdown", - "hash": "#encryption-", + "hash": "#encryption", "content": "Humanloop follows best practices for data management and encryption. All data in transit is secured with TLS/SSL, and all data at rest is encrypted using the AES-256 algorithm. All encryption keys are managed using AWS Key Management Service (KMS) as part of the VPC definition.", "hierarchy": { "h3": { - "id": "encryption-", - "title": "Encryption " + "id": "encryption", + "title": "Encryption" }, "h4": { - "id": "encryption-", - "title": "Encryption " + "id": "encryption", + "title": "Encryption" } }, "level": "h4", "level_title": "Encryption" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-infrastructure-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-infrastructure", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/data-management", @@ -8645,19 +8593,19 @@ ], "authed": false, "type": "markdown", - "hash": "#infrastructure-", + "hash": "#infrastructure", "content": "All sensitive data is encrypted in transit. For Self-Hosted Cloud (VPC) environments, network traffic is also encrypted in transit and at rest to meet HIPAA requirements. Sensitive application data is only processed within the ECS cluster and stored in Aurora. To request a network infrastructure diagram or more information, please contact privacy@humanloop.com.", "hierarchy": { "h3": { - "id": "infrastructure-", - "title": "Infrastructure " + "id": "infrastructure", + "title": "Infrastructure" } }, "level": "h3", "level_title": "Infrastructure" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-learn-more-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-learn-more", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/data-management", @@ -8680,19 +8628,19 @@ ], "authed": false, "type": "markdown", - "hash": "#learn-more-", + "hash": "#learn-more", "content": "For more information on how Humanloop processes user data, visit our Security & Compliance page.", "hierarchy": { "h3": { - "id": "learn-more-", - "title": "Learn More " + "id": "learn-more", + "title": "Learn More" } }, "level": "h3", "level_title": "Learn More" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-data-storage-retention-and-recovery-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-data-storage-retention-and-recovery", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/data-management", @@ -8715,19 +8663,19 @@ ], "authed": false, "type": "markdown", - "hash": "#data-storage-retention-and-recovery-", + "hash": "#data-storage-retention-and-recovery", "content": "All platform data is stored in a primary database server with multi-availability zone replication. Platform data is retained indefinitely and backed up daily in a secure and encrypted manner until a request is made by the contractual owners of that data to remove it, in accordance with GDPR guidelines.\nHumanloop's Terms of Service define the contractual owner of the user data and data derived from the user data. A semi-automated disaster recovery process is in place to restore the database to a specified point-in-time backup as required.", "hierarchy": { "h3": { - "id": "data-storage-retention-and-recovery-", - "title": "Data Storage, Retention, and Recovery " + "id": "data-storage-retention-and-recovery", + "title": "Data Storage, Retention, and Recovery" } }, "level": "h3", "level_title": "Data Storage, Retention, and Recovery" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-data-breach-response-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-data-breach-response", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/data-management", @@ -8750,19 +8698,19 @@ ], "authed": false, "type": "markdown", - "hash": "#data-breach-response-", + "hash": "#data-breach-response", "content": "Any data breaches will be communicated to all impacted Humanloop users and partners within 24 hours, along with consequences and mitigations. Breaches will be dealt with in accordance with the Humanloop data breach response policy, which is tested annually.", "hierarchy": { "h3": { - "id": "data-breach-response-", - "title": "Data Breach Response " + "id": "data-breach-response", + "title": "Data Breach Response" } }, "level": "h3", "level_title": "Data Breach Response" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-data-portability-and-return-", + "objectID": "humanloop:humanloop.com:root..v5.uv.docs.docs.reference.data-management-data-portability-and-return", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/reference/data-management", @@ -8785,12 +8733,12 @@ ], "authed": false, "type": "markdown", - "hash": "#data-portability-and-return-", + "hash": "#data-portability-and-return", "content": "Within 30 days post-contract termination, users can request the return of their data and derived data (as defined by the Terms of Service). Humanloop provides this data via downloadable files in comma-separated value (.csv) or .json formats.", "hierarchy": { "h3": { - "id": "data-portability-and-return-", - "title": "Data Portability and Return " + "id": "data-portability-and-return", + "title": "Data Portability and Return" } }, "level": "h3", @@ -8815,7 +8763,6 @@ ], "authed": false, "type": "markdown", - "description": "", "content": "The Humanloop API allows you to interact with Humanloop and model providers programmatically.\nYou can do this through HTTP requests from any language or via our official Python or TypeScript SDK.\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)\n\n\nGuides and further details about key concepts can be found in our docs.", "code_snippets": [ { @@ -8865,11 +8812,10 @@ "authed": false, "type": "markdown", "description": "Learn how to integrate Humanloop into your applications using our Python and TypeScript SDKs or REST API.", - "content": "The Humanloop platform can be accessed through the API or through our Python and TypeScript SDKs.", - "code_snippets": [] + "content": "The Humanloop platform can be accessed through the API or through our Python and TypeScript SDKs." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.api-reference.api-reference.introduction.sdks-usage-examples-", + "objectID": "humanloop:humanloop.com:root..v5.uv.api-reference.api-reference.introduction.sdks-usage-examples", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/api-reference/sdks", @@ -8892,8 +8838,7 @@ ], "authed": false, "type": "markdown", - "hash": "#usage-examples-", - "content": "", + "hash": "#usage-examples", "code_snippets": [ { "lang": "shell", @@ -8938,8 +8883,8 @@ ], "hierarchy": { "h3": { - "id": "usage-examples-", - "title": "Usage Examples " + "id": "usage-examples", + "title": "Usage Examples" } }, "level": "h3", @@ -8969,11 +8914,10 @@ ], "authed": false, "type": "markdown", - "description": "This page provides a list of the error codes and messages you may encounter when using the Humanloop API.\nIn the event an issue occurs with our system, or with one of the model providers we integrate with, our API will raise a predictable and interpretable error.", - "code_snippets": [] + "description": "This page provides a list of the error codes and messages you may encounter when using the Humanloop API.\nIn the event an issue occurs with our system, or with one of the model providers we integrate with, our API will raise a predictable and interpretable error." }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.api-reference.api-reference.introduction.errors-http-error-codes-", + "objectID": "humanloop:humanloop.com:root..v5.uv.api-reference.api-reference.introduction.errors-http-error-codes", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/api-reference/errors", @@ -8996,19 +8940,19 @@ ], "authed": false, "type": "markdown", - "hash": "#http-error-codes-", + "hash": "#http-error-codes", "content": "Our API will return one of the following HTTP error codes in the event of an issue:\n\n\n\n\nYour request was improperly formatted or presented.\n\n\nYour API key is incorrect or missing, or your user does not have the rights to access the relevant resource.\n\n\nThe requested resource could not be located.\n\n\nModifying the resource would leave it in an illegal state.\n\n\nYour request was properly formatted but contained invalid instructions or did not match the fields required by the endpoint.\n\n\nYou've exceeded the maximum allowed number of requests in a given time period.\n\n\nAn unexpected issue occurred on the server.\n\n\nThe service is temporarily overloaded and you should try again.", "hierarchy": { "h3": { - "id": "http-error-codes-", - "title": "HTTP error codes " + "id": "http-error-codes", + "title": "HTTP error codes" } }, "level": "h3", "level_title": "HTTP error codes" }, { - "objectID": "humanloop:humanloop.com:root..v5.uv.api-reference.api-reference.introduction.errors-error-details-", + "objectID": "humanloop:humanloop.com:root..v5.uv.api-reference.api-reference.introduction.errors-error-details", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v5/api-reference/errors", @@ -9031,7 +8975,7 @@ ], "authed": false, "type": "markdown", - "hash": "#error-details-", + "hash": "#error-details", "content": "Our prompt/call endpoint acts as a unified interface across all popular model providers. The error returned by this endpoint may be raised by the model provider's system. Details of the error are returned in the detail object of the response.", "code_snippets": [ { @@ -9041,8 +8985,8 @@ ], "hierarchy": { "h2": { - "id": "error-details-", - "title": "Error details " + "id": "error-details", + "title": "Error details" } }, "level": "h2", @@ -9066,14 +9010,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "September 1900", - "pathname": "/docs/v5/changelog/9" } ], "version": { @@ -9106,14 +9042,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "September 1900", - "pathname": "/docs/v5/changelog/9" } ], "version": { @@ -9146,14 +9074,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "September 1900", - "pathname": "/docs/v5/changelog/9" } ], "version": { @@ -9186,14 +9106,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "September 1900", - "pathname": "/docs/v5/changelog/9" } ], "version": { @@ -9226,14 +9138,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "September 1900", - "pathname": "/docs/v5/changelog/9" } ], "version": { @@ -9271,14 +9175,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -9311,14 +9207,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -9351,14 +9239,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -9391,14 +9271,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -9431,14 +9303,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -9471,14 +9335,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -9511,14 +9367,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -9551,14 +9399,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -9591,14 +9431,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -9631,14 +9463,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -9671,14 +9495,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -9721,14 +9537,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -9761,14 +9569,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "July 1900", - "pathname": "/docs/v5/changelog/7" } ], "version": { @@ -9801,14 +9601,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "July 1900", - "pathname": "/docs/v5/changelog/7" } ], "version": { @@ -9841,14 +9633,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "July 1900", - "pathname": "/docs/v5/changelog/7" } ], "version": { @@ -9881,14 +9665,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "July 1900", - "pathname": "/docs/v5/changelog/7" } ], "version": { @@ -9921,14 +9697,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "June 1900", - "pathname": "/docs/v5/changelog/6" } ], "version": { @@ -9961,14 +9729,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "June 1900", - "pathname": "/docs/v5/changelog/6" } ], "version": { @@ -10001,14 +9761,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "June 1900", - "pathname": "/docs/v5/changelog/6" } ], "version": { @@ -10041,14 +9793,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "June 1900", - "pathname": "/docs/v5/changelog/6" } ], "version": { @@ -10081,14 +9825,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "June 1900", - "pathname": "/docs/v5/changelog/6" } ], "version": { @@ -10121,14 +9857,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "June 1900", - "pathname": "/docs/v5/changelog/6" } ], "version": { @@ -10161,14 +9889,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "June 1900", - "pathname": "/docs/v5/changelog/6" } ], "version": { @@ -10201,14 +9921,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "May 1900", - "pathname": "/docs/v5/changelog/5" } ], "version": { @@ -10241,14 +9953,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "May 1900", - "pathname": "/docs/v5/changelog/5" } ], "version": { @@ -10281,14 +9985,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "May 1900", - "pathname": "/docs/v5/changelog/5" } ], "version": { @@ -10321,14 +10017,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "May 1900", - "pathname": "/docs/v5/changelog/5" } ], "version": { @@ -10361,14 +10049,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "May 1900", - "pathname": "/docs/v5/changelog/5" } ], "version": { @@ -10401,14 +10081,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "May 1900", - "pathname": "/docs/v5/changelog/5" } ], "version": { @@ -10441,14 +10113,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "April 1900", - "pathname": "/docs/v5/changelog/4" } ], "version": { @@ -10481,14 +10145,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "April 1900", - "pathname": "/docs/v5/changelog/4" } ], "version": { @@ -10521,14 +10177,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "April 1900", - "pathname": "/docs/v5/changelog/4" } ], "version": { @@ -10561,14 +10209,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "April 1900", - "pathname": "/docs/v5/changelog/4" } ], "version": { @@ -10601,14 +10241,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "April 1900", - "pathname": "/docs/v5/changelog/4" } ], "version": { @@ -10641,14 +10273,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "April 1900", - "pathname": "/docs/v5/changelog/4" } ], "version": { @@ -10681,14 +10305,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "April 1900", - "pathname": "/docs/v5/changelog/4" } ], "version": { @@ -10721,14 +10337,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "March 1900", - "pathname": "/docs/v5/changelog/3" } ], "version": { @@ -10761,14 +10369,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "March 1900", - "pathname": "/docs/v5/changelog/3" } ], "version": { @@ -10801,14 +10401,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "March 1900", - "pathname": "/docs/v5/changelog/3" } ], "version": { @@ -10841,14 +10433,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "March 1900", - "pathname": "/docs/v5/changelog/3" } ], "version": { @@ -10881,14 +10465,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "February 1900", - "pathname": "/docs/v5/changelog/2" } ], "version": { @@ -10921,14 +10497,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "February 1900", - "pathname": "/docs/v5/changelog/2" } ], "version": { @@ -10961,14 +10529,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "February 1900", - "pathname": "/docs/v5/changelog/2" } ], "version": { @@ -11001,14 +10561,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "February 1900", - "pathname": "/docs/v5/changelog/2" } ], "version": { @@ -11041,14 +10593,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "February 1900", - "pathname": "/docs/v5/changelog/2" } ], "version": { @@ -11081,14 +10625,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "February 1900", - "pathname": "/docs/v5/changelog/2" } ], "version": { @@ -11127,14 +10663,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "January 1900", - "pathname": "/docs/v5/changelog/1" } ], "version": { @@ -11167,14 +10695,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "January 1900", - "pathname": "/docs/v5/changelog/1" } ], "version": { @@ -11207,14 +10727,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "January 1900", - "pathname": "/docs/v5/changelog/1" } ], "version": { @@ -11247,14 +10759,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2024", - "pathname": "/docs/v5/changelog/2024" - }, - { - "title": "January 1900", - "pathname": "/docs/v5/changelog/1" } ], "version": { @@ -11287,14 +10791,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "December 1900", - "pathname": "/docs/v5/changelog/12" } ], "version": { @@ -11327,14 +10823,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "December 1900", - "pathname": "/docs/v5/changelog/12" } ], "version": { @@ -11367,14 +10855,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "December 1900", - "pathname": "/docs/v5/changelog/12" } ], "version": { @@ -11407,14 +10887,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "December 1900", - "pathname": "/docs/v5/changelog/12" } ], "version": { @@ -11447,14 +10919,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "December 1900", - "pathname": "/docs/v5/changelog/12" } ], "version": { @@ -11487,14 +10951,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "December 1900", - "pathname": "/docs/v5/changelog/12" } ], "version": { @@ -11527,14 +10983,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "November 1900", - "pathname": "/docs/v5/changelog/11" } ], "version": { @@ -11581,14 +11029,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "November 1900", - "pathname": "/docs/v5/changelog/11" } ], "version": { @@ -11621,14 +11061,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "November 1900", - "pathname": "/docs/v5/changelog/11" } ], "version": { @@ -11661,14 +11093,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "November 1900", - "pathname": "/docs/v5/changelog/11" } ], "version": { @@ -11719,14 +11143,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "November 1900", - "pathname": "/docs/v5/changelog/11" } ], "version": { @@ -11759,14 +11175,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "November 1900", - "pathname": "/docs/v5/changelog/11" } ], "version": { @@ -11799,14 +11207,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "November 1900", - "pathname": "/docs/v5/changelog/11" } ], "version": { @@ -11839,14 +11239,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "November 1900", - "pathname": "/docs/v5/changelog/11" } ], "version": { @@ -11879,14 +11271,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "November 1900", - "pathname": "/docs/v5/changelog/11" } ], "version": { @@ -11919,14 +11303,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "October 1900", - "pathname": "/docs/v5/changelog/10" } ], "version": { @@ -11959,14 +11335,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "October 1900", - "pathname": "/docs/v5/changelog/10" } ], "version": { @@ -11999,14 +11367,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "October 1900", - "pathname": "/docs/v5/changelog/10" } ], "version": { @@ -12039,14 +11399,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "October 1900", - "pathname": "/docs/v5/changelog/10" } ], "version": { @@ -12093,14 +11445,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "September 1900", - "pathname": "/docs/v5/changelog/9" } ], "version": { @@ -12139,14 +11483,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "September 1900", - "pathname": "/docs/v5/changelog/9" } ], "version": { @@ -12179,14 +11515,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -12219,14 +11547,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -12259,14 +11579,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -12299,14 +11611,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "August 1900", - "pathname": "/docs/v5/changelog/8" } ], "version": { @@ -12339,14 +11643,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "July 1900", - "pathname": "/docs/v5/changelog/7" } ], "version": { @@ -12385,14 +11681,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "July 1900", - "pathname": "/docs/v5/changelog/7" } ], "version": { @@ -12425,14 +11713,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "July 1900", - "pathname": "/docs/v5/changelog/7" } ], "version": { @@ -12465,14 +11745,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "July 1900", - "pathname": "/docs/v5/changelog/7" } ], "version": { @@ -12505,14 +11777,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "July 1900", - "pathname": "/docs/v5/changelog/7" } ], "version": { @@ -12555,14 +11819,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "July 1900", - "pathname": "/docs/v5/changelog/7" } ], "version": { @@ -12601,14 +11857,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "July 1900", - "pathname": "/docs/v5/changelog/7" } ], "version": { @@ -12655,14 +11903,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "June 1900", - "pathname": "/docs/v5/changelog/6" } ], "version": { @@ -12695,14 +11935,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "June 1900", - "pathname": "/docs/v5/changelog/6" } ], "version": { @@ -12752,14 +11984,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "June 1900", - "pathname": "/docs/v5/changelog/6" } ], "version": { @@ -12792,14 +12016,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "May 1900", - "pathname": "/docs/v5/changelog/5" } ], "version": { @@ -12838,14 +12054,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "May 1900", - "pathname": "/docs/v5/changelog/5" } ], "version": { @@ -12888,14 +12096,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "April 1900", - "pathname": "/docs/v5/changelog/4" } ], "version": { @@ -12938,14 +12138,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "March 1900", - "pathname": "/docs/v5/changelog/3" } ], "version": { @@ -12978,14 +12170,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "March 1900", - "pathname": "/docs/v5/changelog/3" } ], "version": { @@ -13018,14 +12202,6 @@ { "title": "Changelog", "pathname": "/docs/v5/changelog" - }, - { - "title": "2023", - "pathname": "/docs/v5/changelog/2023" - }, - { - "title": "February 1900", - "pathname": "/docs/v5/changelog/2" } ], "version": { @@ -13065,11 +12241,10 @@ "authed": false, "type": "markdown", "description": "In this tutorial, you’ll use Humanloop to quickly create a GPT-4 chat app. You’ll learn how to create a Prompt, call GPT-4, and log your results. You’ll also learn how to capture feedback from your end users to evaluate and improve your model.\nIn this tutorial, you’ll use GPT-4 and Humanloop to quickly create a GPT-4 chat app that explains topics in the style of different experts.", - "content": "At the end of this tutorial, you’ll have created your first GPT-4 app. You’ll also have learned how to:\nCreate a Prompt\n\nUse the Humanloop SDK to call Open AI GPT-4 and log your results\n\nCapture feedback from your end users to evaluate and improve your model\n\n\n\n\nThis tutorial picks up where the Quick Start left off. If you’ve already followed the quick start you can skip to step 4 below.", - "code_snippets": [] + "content": "At the end of this tutorial, you’ll have created your first GPT-4 app. You’ll also have learned how to:\nCreate a Prompt\n\nUse the Humanloop SDK to call Open AI GPT-4 and log your results\n\nCapture feedback from your end users to evaluate and improve your model\n\n\n\n\nThis tutorial picks up where the Quick Start left off. If you’ve already followed the quick start you can skip to step 4 below." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-create-the-prompt-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-create-the-prompt", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/create-your-first-gpt-4-app", @@ -13092,19 +12267,19 @@ ], "authed": false, "type": "markdown", - "hash": "#create-the-prompt-", + "hash": "#create-the-prompt", "content": "Create a Humanloop Account\nIf you haven’t already, create an account or log in to Humanloop\nAdd an OpenAI API Key\nIf you’re the first person in your organization, you’ll need to add an API key to a model provider.\nGo to OpenAI and grab an API key\n\nIn Humanloop Organization Settings set up OpenAI as a model provider.\n\n\n\n\nUsing the Prompt Editor will use your OpenAI credits in the same way that the OpenAI playground does. Keep your API keys for Humanloop and the model providers private.", "hierarchy": { "h2": { - "id": "create-the-prompt-", - "title": "Create the Prompt " + "id": "create-the-prompt", + "title": "Create the Prompt" } }, "level": "h2", "level_title": "Create the Prompt" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-get-started-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-get-started", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/create-your-first-gpt-4-app", @@ -13127,7 +12302,7 @@ ], "authed": false, "type": "markdown", - "hash": "#get-started-", + "hash": "#get-started", "content": "Create a Prompt File\nWhen you first open Humanloop you’ll see your File navigation on the left. Click ‘+ New’ and create a Prompt.\n\n\nIn the sidebar, rename this file to \"Comedian Bot\" now or later.\nCreate the Prompt template in the Editor\nThe left hand side of the screen defines your Prompt – the parameters such as model, temperature and template. The right hand side is a single chat session with this Prompt.\n\n\nClick the “+ Message” button within the chat template to add a system message to the chat template.\n\n\nAdd the following templated message to the chat template.\nThis message forms the chat template. It has an input slot called topic (surrounded by two curly brackets) for an input value that is provided each time you call this Prompt.\nOn the right hand side of the page, you’ll now see a box in the Inputs section for topic.\nAdd a value for topic e.g. music, jogging, whatever\n\nClick Run in the bottom right of the page\n\n\nThis will call OpenAI’s model and return the assistant response. Feel free to try other values, the model is very funny.\nYou now have a first version of your prompt that you can use.\nCommit your first version of this Prompt\nClick the Commit button\n\nPut “initial version” in the commit message field\n\nClick Commit\n\n\n\n\nView the logs\nUnder the Prompt File, click ‘Logs’ to view all the generations from this Prompt\nClick on a row to see the details of what version of the prompt generated it. From here you can give feedback to that generation, see performance metrics, open up this example in the Editor, or add this log to a dataset.", "code_snippets": [ { @@ -13139,15 +12314,15 @@ ], "hierarchy": { "h2": { - "id": "get-started-", - "title": "Get Started " + "id": "get-started", + "title": "Get Started" } }, "level": "h2", "level_title": "Get Started" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-call-the-prompt-in-an-app-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-call-the-prompt-in-an-app", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/create-your-first-gpt-4-app", @@ -13170,19 +12345,19 @@ ], "authed": false, "type": "markdown", - "hash": "#call-the-prompt-in-an-app-", + "hash": "#call-the-prompt-in-an-app", "content": "Now that you’ve found a good prompt and settings, you’re ready to build the \"Learn anything from anyone\" app! We’ve written some code to get you started — follow the instructions below to download the code and run the app.", "hierarchy": { "h2": { - "id": "call-the-prompt-in-an-app-", - "title": "Call the Prompt in an app " + "id": "call-the-prompt-in-an-app", + "title": "Call the Prompt in an app" } }, "level": "h2", "level_title": "Call the Prompt in an app" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-setup-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-setup", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/create-your-first-gpt-4-app", @@ -13205,7 +12380,7 @@ ], "authed": false, "type": "markdown", - "hash": "#setup-", + "hash": "#setup", "content": "If you don’t have Python 3 installed, install it from here. Then download the code by cloning this repository in your terminal:\nIf you prefer not to use git, you can alternatively download the code using this zip file.\nIn your terminal, navigate into the project directory and make a copy of the example environment variables file.\nCopy your Humanloop API key and set it as HUMANLOOP_API_KEY in your newly created .env file. Copy your OpenAI API key and set it as the OPENAI_API_KEY.", "code_snippets": [ { @@ -13221,19 +12396,19 @@ ], "hierarchy": { "h2": { - "id": "setup-", - "title": "Setup " + "id": "setup", + "title": "Setup" }, "h3": { - "id": "setup-", - "title": "Setup " + "id": "setup", + "title": "Setup" } }, "level": "h3", "level_title": "Setup" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-run-the-app-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-run-the-app", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/create-your-first-gpt-4-app", @@ -13256,7 +12431,7 @@ ], "authed": false, "type": "markdown", - "hash": "#run-the-app-", + "hash": "#run-the-app", "content": "Run the following commands in your terminal in the project directory to install the dependencies and run the app.\nOpen http://localhost:5000 in your browser and you should see the app. If you type in the name of an expert, e.g \"Aristotle\", and a topic that they're famous for, e.g \"ethics\", the app will try to generate an explanation in their style.\nPress the thumbs-up or thumbs-down buttons to register your feedback on whether the generation is any good.\nTry a few more questions. Perhaps change the name of the expert and keep the topic fixed.", "code_snippets": [ { @@ -13265,19 +12440,19 @@ ], "hierarchy": { "h2": { - "id": "run-the-app-", - "title": "Run the app " + "id": "run-the-app", + "title": "Run the app" }, "h3": { - "id": "run-the-app-", - "title": "Run the app " + "id": "run-the-app", + "title": "Run the app" } }, "level": "h3", "level_title": "Run the app" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-view-the-data-on-humanloop-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-view-the-data-on-humanloop", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/create-your-first-gpt-4-app", @@ -13300,19 +12475,19 @@ ], "authed": false, "type": "markdown", - "hash": "#view-the-data-on-humanloop-", + "hash": "#view-the-data-on-humanloop", "content": "Now that you have a working app you can use Humanloop to measure and improve performance. Go back to the Humanloop app and go to your project named \"learn-anything\".\nOn the Models dashboard you'll be able to see how many data points have flowed through the app as well as how much feedback you've received. Click on your model in the table at the bottom of the page.\n\n\nClick View data in the top right. Here you should be able to see each of your generations as well as the feedback that's been logged against them. You can also add your own internal feedback by clicking on a datapoint in the table and using the feedback buttons.", "hierarchy": { "h2": { - "id": "view-the-data-on-humanloop-", - "title": "View the data on Humanloop " + "id": "view-the-data-on-humanloop", + "title": "View the data on Humanloop" } }, "level": "h2", "level_title": "View the data on Humanloop" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-understand-the-code-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-understand-the-code", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/create-your-first-gpt-4-app", @@ -13335,7 +12510,7 @@ ], "authed": false, "type": "markdown", - "hash": "#understand-the-code-", + "hash": "#understand-the-code", "content": "Open up the file app.py in the \"openai-quickstart-python\" folder. There are a few key code snippets that will let you understand how the app works.\nBetween lines 30 and 41 you'll see the following code.\nOn line 34 you can see the call to humanloop.complete_deployed which takes the project name and project inputs as variables. humanloop.complete_deployed calls GPT-4 and also automatically logs your data to the Humanloop app.\nIn addition to returning the result of your model on line 39, you also get back a data_id which can be used for recording feedback about your generations.\nOn line 51 of app.py, you can see an example of logging feedback to Humanloop.\nThe call to humanloop.feedback uses the data_id returned above to associate a piece of positive feedback with that generation.\nIn this app there are two feedback groups rating (which can be good or bad) and actions, which here is the copy button and also indicates positive feedback from the user.", "code_snippets": [ { @@ -13349,15 +12524,15 @@ ], "hierarchy": { "h2": { - "id": "understand-the-code-", - "title": "Understand the code " + "id": "understand-the-code", + "title": "Understand the code" } }, "level": "h2", "level_title": "Understand the code" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-add-a-new-model-config-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-add-a-new-model-config", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/create-your-first-gpt-4-app", @@ -13380,7 +12555,7 @@ ], "authed": false, "type": "markdown", - "hash": "#add-a-new-model-config-", + "hash": "#add-a-new-model-config", "content": "If you experiment a bit, you might find that the model isn't initially that good. The answers are often too short or not in the style of the expert being asked. We can try to improve this by experimenting with other prompts.\nClick on your model on the model dashboard and then in the top right, click Editor\n\n\n\nEdit the prompt template to try and improve the prompt. Try changing the maximum number of tokens using the Max tokens slider, or the wording of the prompt.\n\n\n\n\nHere are some prompt ideas to try out. Which ones work better?\n\n\nClick Save to add the new model to your project. Add it to the \"learn-anything\" project.\n\n\n\nGo to your project dashboard. At the top left of the page, click menu of \"production\" environment card. Within that click the button Change deployment and set a new model config as active; calls to humanloop.complete_deployed will now use this new model. Now go back to the app and see the effect!", "code_snippets": [ { @@ -13396,15 +12571,15 @@ ], "hierarchy": { "h2": { - "id": "add-a-new-model-config-", - "title": "Add a new model config " + "id": "add-a-new-model-config", + "title": "Add a new model config" } }, "level": "h2", "level_title": "Add a new model config" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-congratulations-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.create-your-first-gpt-4-app-congratulations", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/create-your-first-gpt-4-app", @@ -13427,12 +12602,12 @@ ], "authed": false, "type": "markdown", - "hash": "#congratulations-", + "hash": "#congratulations", "content": "And that’s it! You should now have a full understanding of how to go from creating a Prompt in Humanloop to a deployed and functioning app. You've learned how to create prompt templates, capture user feedback and deploy a new models.\nIf you want to learn how to improve your model by running experiments or finetuning check out our guides below.", "hierarchy": { "h2": { - "id": "congratulations-", - "title": "Congratulations! " + "id": "congratulations", + "title": "Congratulations!" } }, "level": "h2", @@ -13463,11 +12638,10 @@ "authed": false, "type": "markdown", "description": "In this tutorial, you'll build a custom ChatGPT using Next.js and streaming using Humanloop TypeScript SDK.\nIn this tutorial, you'll build a custom ChatGPT using Next.js and streaming using Humanloop TypeScript SDK.", - "content": "At the end of this tutorial, you'll have built a simple ChatGPT-style interface using Humanloop as the backend to manage interactions with your model provider, track user engagement and experiment with model configuration.\nIf you just want to leap in, the complete repo for this project is available on GitHub here.", - "code_snippets": [] + "content": "At the end of this tutorial, you'll have built a simple ChatGPT-style interface using Humanloop as the backend to manage interactions with your model provider, track user engagement and experiment with model configuration.\nIf you just want to leap in, the complete repo for this project is available on GitHub here." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.chatgpt-clone-in-nextjs-step-1-create-a-new-prompt-in-humanloop-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.chatgpt-clone-in-nextjs-step-1-create-a-new-prompt-in-humanloop", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/chatgpt-clone-in-nextjs", @@ -13490,7 +12664,7 @@ ], "authed": false, "type": "markdown", - "hash": "#step-1-create-a-new-prompt-in-humanloop-", + "hash": "#step-1-create-a-new-prompt-in-humanloop", "content": "First, create a Prompt with the name chat-tutorial-ts. Go to the Editor tab on the left. Here, we can play with parameters and prompt templates to create a model which will be accessible via the Humanloop SDK.\n\n\nIf this is your first time using the Prompt Editor, you'll be prompted to\nenter an OpenAI API key. You can create one by going\nhere.\nThe Prompt Editor is an interactive environment where you can experiment with prompt templates to create a model which will be accessible via the Humanloop SDK.\n\n\nLet's try to create a chess tutor. Paste the following system message into the Chat template box on the left-hand side.\nIn the Parameters section above, select gpt-4 as the model. Click Commit and enter a commit message such as \"GPT-4 Grandmaster\".\nNavigate back to the Dashboard tab in the sidebar. Your new Prompt Version is visible in the table at the bottom of the Prompt dashboard.", "code_snippets": [ { @@ -13499,15 +12673,15 @@ ], "hierarchy": { "h1": { - "id": "step-1-create-a-new-prompt-in-humanloop-", - "title": "Step 1: Create a new Prompt in Humanloop " + "id": "step-1-create-a-new-prompt-in-humanloop", + "title": "Step 1: Create a new Prompt in Humanloop" } }, "level": "h1", "level_title": "Step 1: Create a new Prompt in Humanloop" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.chatgpt-clone-in-nextjs-step-2-set-up-a-nextjs-application-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.chatgpt-clone-in-nextjs-step-2-set-up-a-nextjs-application", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/chatgpt-clone-in-nextjs", @@ -13530,7 +12704,7 @@ ], "authed": false, "type": "markdown", - "hash": "#step-2-set-up-a-nextjs-application-", + "hash": "#step-2-set-up-a-nextjs-application", "content": "Now, let's turn to building out a simple Next.js application. We'll use the Humanloop TypeScript SDK to provide programmatic access to the model we just created.\nRun npx create-next-app@latest to create a fresh Next.js project. Accept all the default config options in the setup wizard (which includes using TypeScript, Tailwind, and the Next.js app router). Now npm run dev to fire up the development server.\nNext npm i humanloop to install the Humanloop SDK in your project.\nEdit app/page.tsx to the following. This code stubs out the basic React components and state management we need for a chat interface.\n\n\nWe shouldn't call the Humanloop SDK from the client's browser as this would\nrequire giving out the Humanloop API key, which you should not do! Instead,\nwe'll create a simple backend API route in Next.js which can perform the\nHumanloop requests on the Node server and proxy these back to the client.\nCreate a file containing the code below at app/api/chat/route.ts. This will automatically create an API route at /api/chat. In the call to the Humanloop SDK, you'll need to pass the project name you created in step 1.\nIn this code, we're calling humanloop.chatDeployed. This function is used to target the model which is actively deployed on your project - in this case it should be the model we set up in step 1. Other related functions in the SDK reference (such as humanloop.chat) allow you to target a specific model config (rather than the actively deployed one) or even specify model config directly in the function call.\nWhen we receive a response from Humanloop, we strip out just the text of the chat response and send this back to the client via a Response object (see Next.js - Route Handler docs). The Humanloop SDK response contains much more data besides the raw text, which you can inspect by logging to the console.\nFor the above to work, you'll need to ensure that you have a .env.local file at the root of your project directory with your Humanloop API key. You can generate a Humanloop API key by clicking your name in the bottom left and selecting API keys. This environment variable will only be available on the Next.js server, not on the client (see Next.js - Environment Variables).\nNow, modify page.tsx to use a fetch request against the new API route.\nYou should now find that your application works as expected. When we send messages from the client, a GPT response appears beneath (after a delay).\n\n\nBack in your Humanloop Prompt dashboard you should see Logs being recorded as clients interact with your model.", "code_snippets": [ { @@ -13556,15 +12730,15 @@ ], "hierarchy": { "h1": { - "id": "step-2-set-up-a-nextjs-application-", - "title": "Step 2: Set up a Next.js application " + "id": "step-2-set-up-a-nextjs-application", + "title": "Step 2: Set up a Next.js application" } }, "level": "h1", "level_title": "Step 2: Set up a Next.js application" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.chatgpt-clone-in-nextjs-step-3-streaming-tokens-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.chatgpt-clone-in-nextjs-step-3-streaming-tokens", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/chatgpt-clone-in-nextjs", @@ -13587,7 +12761,7 @@ ], "authed": false, "type": "markdown", - "hash": "#step-3-streaming-tokens-", + "hash": "#step-3-streaming-tokens", "content": "(Note: requires Node version 18+).\nYou may notice that model responses can take a while to appear on screen. Currently, our Next.js API route blocks while the entire response is generated, before finally sending the whole thing back to the client browser in one go. For longer generations, this can take some time, particularly with larger models like GPT-4. Other model config settings can impact this too.\nTo provide a better user experience, we can deal with this latency by streaming tokens back to the client as they are generated and have them display eagerly on the page. The Humanloop SDK wraps the model providers' streaming functionality so that we can achieve this. Let's incorporate streaming tokens into our app next.\nEdit the API route at to look like the following. Notice that we have switched to using the humanloop.chatDeployedStream function, which offers Server Sent Event streaming as new tokens arrive from the model provider.\nNow, modify the onSend function in page.tsx to the following. This streams the response body in chunks, updating the UI each time a new chunk arrives.\nYou should now find that tokens stream onto the screen as soon as they are available.", "code_snippets": [ { @@ -13603,15 +12777,15 @@ ], "hierarchy": { "h1": { - "id": "step-3-streaming-tokens-", - "title": "Step 3: Streaming tokens " + "id": "step-3-streaming-tokens", + "title": "Step 3: Streaming tokens" } }, "level": "h1", "level_title": "Step 3: Streaming tokens" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.chatgpt-clone-in-nextjs-step-4-add-feedback-buttons-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.chatgpt-clone-in-nextjs-step-4-add-feedback-buttons", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/chatgpt-clone-in-nextjs", @@ -13634,7 +12808,7 @@ ], "authed": false, "type": "markdown", - "hash": "#step-4-add-feedback-buttons-", + "hash": "#step-4-add-feedback-buttons", "content": "We'll now add feedback buttons to the Assistant chat messages, and submit feedback on those Logs via the Humanloop API whenever the user clicks the buttons.\nModify page.tsx to include an id for each message in React state. Note that we'll only have ids for assistant messages, and null for user messages.\nModify the onSend function to look like this:\nNow, modify the MessageRow component to become a ChatItemRow component which knows about the id.\nAnd finally for page.tsx, modify the rendering of the message history to use the new component:\nNext, we need to create a Next.js API route for submitting feedback, similar to the one we had for making a /chat request. Create a new file at the path app/api/feedback/route.ts with the following code:\nThis code simply proxies the feedback request through the Next.js server. You should now see feedback buttons on the relevant rows in chat.\n\n\nWhen you click one of these feedback buttons and visit the Prompt in Humanloop, you should see the feedback logged against the log.", "code_snippets": [ { @@ -13665,15 +12839,15 @@ ], "hierarchy": { "h1": { - "id": "step-4-add-feedback-buttons-", - "title": "Step 4: Add Feedback buttons " + "id": "step-4-add-feedback-buttons", + "title": "Step 4: Add Feedback buttons" } }, "level": "h1", "level_title": "Step 4: Add Feedback buttons" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.chatgpt-clone-in-nextjs-conclusion-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.tutorials.chatgpt-clone-in-nextjs-conclusion", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tutorials/chatgpt-clone-in-nextjs", @@ -13696,12 +12870,12 @@ ], "authed": false, "type": "markdown", - "hash": "#conclusion-", + "hash": "#conclusion", "content": "Congratulations! You've now built a working chat interface and used Humanloop to handle interaction with the model provider and log chats. You used a system message (which is invisible to your end user) to make GPT-4 behave like a chess tutor. You also added a way for your app's users to provide feedback which you can track in Humanloop to help improve your models.\nNow that you've seen how to create a simple Humanloop project and build a chat interface on top of it, try visiting the Humanloop project dashboard to view the logs and iterate on your model configs. You can also create experiments to learn which model configs perform best with your users. To learn more about these topics, take a look at our guides below.\nAll the code for this project is available on Github.", "hierarchy": { "h1": { - "id": "conclusion-", - "title": "Conclusion " + "id": "conclusion", + "title": "Conclusion" } }, "level": "h1", @@ -13732,11 +12906,10 @@ "authed": false, "type": "markdown", "description": "Learn how to create a Prompt in Humanloop using the UI or SDK, version it, and use it to generate responses from your AI models. Prompt management is a key part of the Humanloop platform.\nHow to create, version and use a Prompt in Humanloop", - "content": "Humanloop acts as a registry of your Prompts so you can centrally manage all their versions and Logs, and evaluate and improve your AI systems.\nThis guide will show you how to create a Prompt in the UI or via the SDK/API.\n\n\nPrerequisite: A Humanloop account.\nYou can create an account now by going to the Sign up page.", - "code_snippets": [] + "content": "Humanloop acts as a registry of your Prompts so you can centrally manage all their versions and Logs, and evaluate and improve your AI systems.\nThis guide will show you how to create a Prompt in the UI or via the SDK/API.\n\n\nPrerequisite: A Humanloop account.\nYou can create an account now by going to the Sign up page." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.create-prompt-create-a-prompt-in-the-ui-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.create-prompt-create-a-prompt-in-the-ui", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/create-prompt", @@ -13759,7 +12932,7 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-prompt-in-the-ui-", + "hash": "#create-a-prompt-in-the-ui", "content": "Create a Prompt File\nWhen you first open Humanloop you’ll see your File navigation on the left. Click ‘+ New’ and create a Prompt.\n\n\nIn the sidebar, rename this file to \"Comedian Bot\" now or later.\nCreate the Prompt template in the Editor\nThe left hand side of the screen defines your Prompt – the parameters such as model, temperature and template. The right hand side is a single chat session with this Prompt.\n\n\nClick the \"+ Message\" button within the chat template to add a system message to the chat template.\n\n\nAdd the following templated message to the chat template.\nThis message forms the chat template. It has an input slot called topic (surrounded by two curly brackets) for an input value that is provided each time you call this Prompt.\nOn the right hand side of the page, you’ll now see a box in the Inputs section for topic.\nAdd a value fortopic e.g. music, jogging, whatever.\n\nClick Run in the bottom right of the page.\n\n\nThis will call OpenAI’s model and return the assistant response. Feel free to try other values, the model is very funny.\nYou now have a first version of your prompt that you can use.\nCommit your first version of this Prompt\nClick the Commit button\n\nPut “initial version” in the commit message field\n\nClick Commit\n\n\n\n\nView the logs\nUnder the Prompt File click ‘Logs’ to view all the generations from this Prompt\nClick on a row to see the details of what version of the prompt generated it. From here you can give feedback to that generation, see performance metrics, open up this example in the Editor, or add this log to a dataset.", "code_snippets": [ { @@ -13771,15 +12944,15 @@ ], "hierarchy": { "h2": { - "id": "create-a-prompt-in-the-ui-", - "title": "Create a Prompt in the UI " + "id": "create-a-prompt-in-the-ui", + "title": "Create a Prompt in the UI" } }, "level": "h2", "level_title": "Create a Prompt in the UI" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.create-prompt-create-a-prompt-using-the-sdk-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.create-prompt-create-a-prompt-using-the-sdk", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/create-prompt", @@ -13802,7 +12975,7 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-prompt-using-the-sdk-", + "hash": "#create-a-prompt-using-the-sdk", "content": "The Humanloop Python SDK allows you to programmatically set up and version your Prompts in Humanloop, and log generations from your models. This guide will show you how to create a Prompt using the SDK.\n\n\nPrerequisite: A Humanloop SDK Key.\nYou can get this from your Organisation Settings page if you have the right permissions.\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)\n\n\nContinue in the same Python interpreter (where you have run humanloop = Humanloop(...)).\n\n\nNote: Prompts are still called 'projects' in the SDK and versions of Prompts are called 'model configs'\n\nCreate the Prompt \"project\"\nRegister your version (\"model config\")\nGo to the App\nGo to the Humanloop app and you will see your new project as a Prompt with the model config you just created.\nYou now have a project in Humanloop that contains your model config. You can view your project and invite team members by going to the Project page.", "code_snippets": [ { @@ -13844,15 +13017,15 @@ ], "hierarchy": { "h2": { - "id": "create-a-prompt-using-the-sdk-", - "title": "Create a Prompt using the SDK " + "id": "create-a-prompt-using-the-sdk", + "title": "Create a Prompt using the SDK" } }, "level": "h2", "level_title": "Create a Prompt using the SDK" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.create-prompt-next-steps-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.create-prompt-next-steps", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/create-prompt", @@ -13875,12 +13048,12 @@ ], "authed": false, "type": "markdown", - "hash": "#next-steps-", + "hash": "#next-steps", "content": "With the Prompt set up, you can now integrate it into your app by following the SDK/API integration guide.", "hierarchy": { "h2": { - "id": "next-steps-", - "title": "Next Steps " + "id": "next-steps", + "title": "Next Steps" } }, "level": "h2", @@ -13915,11 +13088,10 @@ "authed": false, "type": "markdown", "description": "Learn how to generate from large language models and log the results in Humanloop, with managed and versioned prompts.\nUse Humanloop to generate from large language models", - "content": "A Log is created every time a Prompt is called. The Log contain contains the inputs and the output (the generation) as well as metadata such as which version of the Prompt was used and any associated feedback.\nThere are two ways to get your Logs into Humanloop, referred to as 'proxy' and 'async'.", - "code_snippets": [] + "content": "A Log is created every time a Prompt is called. The Log contain contains the inputs and the output (the generation) as well as metadata such as which version of the Prompt was used and any associated feedback.\nThere are two ways to get your Logs into Humanloop, referred to as 'proxy' and 'async'." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.generate-and-log-with-the-sdk-proxied-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.generate-and-log-with-the-sdk-proxied", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/generate-and-log-with-the-sdk", @@ -13946,19 +13118,19 @@ ], "authed": false, "type": "markdown", - "hash": "#proxied-", + "hash": "#proxied", "content": "In one call you can fetch the latest version of a Prompt, generate from the provider, stream the result back and log the result.\nUsing Humanloop as a proxy is by far the most convenient and way of calling your LLM-based applications.", "hierarchy": { "h3": { - "id": "proxied-", - "title": "Proxied " + "id": "proxied", + "title": "Proxied" } }, "level": "h3", "level_title": "Proxied" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.generate-and-log-with-the-sdk-async-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.generate-and-log-with-the-sdk-async", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/generate-and-log-with-the-sdk", @@ -13985,12 +13157,12 @@ ], "authed": false, "type": "markdown", - "hash": "#async-", + "hash": "#async", "content": "With the async method, you can fetch the latest version of a Prompt, generate from the provider, and log the result in separate calls. This is useful if you want to decouple the generation and logging steps, or if you want to log results from your own infrastructure. It also allows you to have no additional latency or servers on the critical path to your AI features.\n\n\nThe guides in this section instruct you on how to create Logs on Humanloop. Once\nthis is setup, you can begin to use Humanloop to evaluate and improve your LLM apps.", "hierarchy": { "h3": { - "id": "async-", - "title": "Async " + "id": "async", + "title": "Async" } }, "level": "h3", @@ -14025,11 +13197,10 @@ "authed": false, "type": "markdown", "description": "Learn how to generate completions from a large language model and log the results in Humanloop, with managed and versioned prompts.\nA walkthrough of how to generate completions from a large language model with the prompt managed in Humanloop.", - "content": "The Humanloop Python SDK allows you to easily replace your openai.Completions.create() calls with a humanloop.complete() call that, in addition to calling OpenAI to get a generation, automatically logs the data to your Humanloop project.", - "code_snippets": [] + "content": "The Humanloop Python SDK allows you to easily replace your openai.Completions.create() calls with a humanloop.complete() call that, in addition to calling OpenAI to get a generation, automatically logs the data to your Humanloop project." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.completion-using-the-sdk-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.completion-using-the-sdk-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/completion-using-the-sdk", @@ -14056,7 +13227,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\n\n\n\nThis guide assumes you're using an OpenAI model. If you want to use other providers or your own model please also look at our guide to using your own model.\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)", "code_snippets": [ { @@ -14082,15 +13253,15 @@ ], "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.completion-using-the-sdk-activate-a-model-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.completion-using-the-sdk-activate-a-model", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/completion-using-the-sdk", @@ -14117,19 +13288,19 @@ ], "authed": false, "type": "markdown", - "hash": "#activate-a-model-", + "hash": "#activate-a-model", "content": "Log in to Humanloop and navigate to the Dashboard tab of your project.\n\nEnsure that the default environment is in green at the top of the dashboard, the default environment is mapped to your active deployment. If there is no active deployment set, then use the dropdown button for the default environment and select the Change deployment option to select one of your existing model configs to use to generate. You also need to confirm the model you config you have deployed is a Completion model. This can be confirmed by clicking on the config in the table and viewing the Endpoint, making sure it says Complete.", "hierarchy": { "h2": { - "id": "activate-a-model-", - "title": "Activate a model " + "id": "activate-a-model", + "title": "Activate a model" } }, "level": "h2", "level_title": "Activate a model" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.completion-using-the-sdk-use-the-sdk-to-call-your-model-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.completion-using-the-sdk-use-the-sdk-to-call-your-model", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/completion-using-the-sdk", @@ -14156,7 +13327,7 @@ ], "authed": false, "type": "markdown", - "hash": "#use-the-sdk-to-call-your-model-", + "hash": "#use-the-sdk-to-call-your-model", "content": "Now you can use the SDK to generate completions and log the results to your project.\nNavigate to your project's Logs tab in the browser to see the recorded inputs and outputs of your generation.\n🎉 Now that you have generations flowing through your project you can start to log your end user feedback to evaluate and improve your models.", "code_snippets": [ { @@ -14166,8 +13337,8 @@ ], "hierarchy": { "h2": { - "id": "use-the-sdk-to-call-your-model-", - "title": "Use the SDK to call your model " + "id": "use-the-sdk-to-call-your-model", + "title": "Use the SDK to call your model" } }, "level": "h2", @@ -14202,11 +13373,10 @@ "authed": false, "type": "markdown", "description": "Learn how to generate chat completions from a large language model and log the results in Humanloop, with managed and versioned prompts.\nA walkthrough of how to generate chat completions from a large language model with the prompt managed in Humanloop.", - "content": "The Humanloop Python SDK allows you to easily replace your openai.ChatCompletions.create() calls with a humanloop.chat() call that, in addition to calling OpenAI to get a response, automatically logs the data to your Humanloop project.", - "code_snippets": [] + "content": "The Humanloop Python SDK allows you to easily replace your openai.ChatCompletions.create() calls with a humanloop.chat() call that, in addition to calling OpenAI to get a response, automatically logs the data to your Humanloop project." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.chat-using-the-sdk-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.chat-using-the-sdk-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/chat-using-the-sdk", @@ -14233,7 +13403,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\n\n\n\nThis guide assumes you're using an OpenAI model. If you want to use other providers or your own model please also look at our guide to using your own model.\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)", "code_snippets": [ { @@ -14259,15 +13429,15 @@ ], "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.chat-using-the-sdk-activate-a-model-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.chat-using-the-sdk-activate-a-model", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/chat-using-the-sdk", @@ -14294,19 +13464,19 @@ ], "authed": false, "type": "markdown", - "hash": "#activate-a-model-", + "hash": "#activate-a-model", "content": "Log in to Humanloop and navigate to the Models tab of your project.\n\nEnsure that the default environment is in green at the top of the dashboard.\nThe default environment is mapped to your active deployment.\nIf there is no active deployment set, then use the dropdown button for the default environment and select the Change deployment option to select one of your existing model configs to use to generate. You also need to confirm the model you config you have deployed is a Chat model. This can be confirmed by clicking on the config in the table and viewing the Endpoint, making sure it says Chat.", "hierarchy": { "h2": { - "id": "activate-a-model-", - "title": "Activate a model " + "id": "activate-a-model", + "title": "Activate a model" } }, "level": "h2", "level_title": "Activate a model" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.chat-using-the-sdk-use-the-sdk-to-call-your-model-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.chat-using-the-sdk-use-the-sdk-to-call-your-model", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/chat-using-the-sdk", @@ -14333,7 +13503,7 @@ ], "authed": false, "type": "markdown", - "hash": "#use-the-sdk-to-call-your-model-", + "hash": "#use-the-sdk-to-call-your-model", "content": "Now you can use the SDK to generate completions and log the results to your project:\nNavigate to your project's Logs tab in the browser to see the recorded inputs, messages and responses of your chat.\n🎉 Now that you have chat messages flowing through your project you can start to log your end user feedback to evaluate and improve your models.", "code_snippets": [ { @@ -14343,8 +13513,8 @@ ], "hierarchy": { "h2": { - "id": "use-the-sdk-to-call-your-model-", - "title": "Use the SDK to call your model " + "id": "use-the-sdk-to-call-your-model", + "title": "Use the SDK to call your model" } }, "level": "h2", @@ -14379,11 +13549,10 @@ "authed": false, "type": "markdown", "description": "Learn how to record user feedback on datapoints generated by your large language model using the Humanloop SDK.\nYou can record feedback on generations from your users using the Humanloop Python SDK. This allows you to monitor how your generations perform with your users.", - "content": "This guide shows how to use the Humanloop SDK to record user feedback on datapoints. This works equivalently for both the completion and chat APIs.", - "code_snippets": [] + "content": "This guide shows how to use the Humanloop SDK to record user feedback on datapoints. This works equivalently for both the completion and chat APIs." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.capture-user-feedback-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.capture-user-feedback-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/capture-user-feedback", @@ -14410,19 +13579,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\n\nAlready have integrated humanloop.chat() or humanloop.complete() to log generations with the Python or TypeScript SDKs. If not, follow our guide to integrating the SDK.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.capture-user-feedback-record-feedback-with-the-datapoint-id-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.capture-user-feedback-record-feedback-with-the-datapoint-id", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/capture-user-feedback", @@ -14449,7 +13618,7 @@ ], "authed": false, "type": "markdown", - "hash": "#record-feedback-with-the-datapoint-id-", + "hash": "#record-feedback-with-the-datapoint-id", "content": "Extract the data ID from the humanloop.complete_deployed() response.\n\nCall humanloop.feedback() referencing the saved datapoint ID to record user feedback.\n\nYou can also include the source of the feedback when recording it.\n\n\nThe feedback recorded for each datapoint can be viewed in the Logs tab of your project.\n\n\nDifferent use cases and user interfaces may require different kinds of feedback that need to be mapped to the appropriate end user interaction. There are broadly 3 important kinds of feedback:\nExplicit feedback: these are purposeful actions to review the generations. For example, ‘thumbs up/down’ button presses.\n\nImplicit feedback: indirect actions taken by your users may signal whether the generation was good or bad, for example, whether the user ‘copied’ the generation, ‘saved it’ or ‘dismissed it’ (which is negative feedback).\n\nFree-form feedback: Corrections and explanations provided by the end-user on the generation.", "code_snippets": [ { @@ -14462,15 +13631,15 @@ ], "hierarchy": { "h2": { - "id": "record-feedback-with-the-datapoint-id-", - "title": "Record feedback with the datapoint ID " + "id": "record-feedback-with-the-datapoint-id", + "title": "Record feedback with the datapoint ID" } }, "level": "h2", "level_title": "Record feedback with the datapoint ID" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.capture-user-feedback-recording-corrections-as-feedback-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.capture-user-feedback-recording-corrections-as-feedback", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/capture-user-feedback", @@ -14497,7 +13666,7 @@ ], "authed": false, "type": "markdown", - "hash": "#recording-corrections-as-feedback-", + "hash": "#recording-corrections-as-feedback", "content": "It can also be useful to allow your users to correct the outputs of your model. This is strong feedback signal and can also be considered as ground truth data for finetuning later.\n\n\nThis feedback will also show up within Humanloop, where your internal users can also provide feedback and corrections on logged data to help with evaluation.", "code_snippets": [ { @@ -14507,8 +13676,8 @@ ], "hierarchy": { "h2": { - "id": "recording-corrections-as-feedback-", - "title": "Recording corrections as feedback " + "id": "recording-corrections-as-feedback", + "title": "Recording corrections as feedback" } }, "level": "h2", @@ -14543,11 +13712,10 @@ "authed": false, "type": "markdown", "description": "Learn how to upload your historic model data to an existing Humanloop project to warm-start your project.\nUploading historic model inputs and generations to an existing Humanloop project.", - "content": "The Humanloop Python SDK allows you to upload your historic model data to an existing Humanloop project. This can be used to warm-start your project. The data can be considered for feedback and review alongside your new user generated data.", - "code_snippets": [] + "content": "The Humanloop Python SDK allows you to upload your historic model data to an existing Humanloop project. This can be used to warm-start your project. The data can be considered for feedback and review alongside your new user generated data." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.upload-historic-data-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.upload-historic-data-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/upload-historic-data", @@ -14574,7 +13742,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\n\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)", "code_snippets": [ { @@ -14600,15 +13768,15 @@ ], "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.upload-historic-data-log-historic-data-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.upload-historic-data-log-historic-data", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/upload-historic-data", @@ -14635,7 +13803,7 @@ ], "authed": false, "type": "markdown", - "hash": "#log-historic-data-", + "hash": "#log-historic-data", "content": "Grab your API key from your Settings page.\nSet up your code to first load up your historic data and then log this to Humanloop, explicitly passing details of the model config (if available) alongside the inputs and output:\n\nThe process of capturing feedback then uses the returned log_id as before.\nSee our guide on capturing user feedback.\n\nYou can also log immediate feedback alongside the input and outputs:", "code_snippets": [ { @@ -14649,8 +13817,8 @@ ], "hierarchy": { "h2": { - "id": "log-historic-data-", - "title": "Log historic data " + "id": "log-historic-data", + "title": "Log historic data" } }, "level": "h2", @@ -14685,11 +13853,10 @@ "authed": false, "type": "markdown", "description": "Integrating Humanloop and running an experiment when using your own models.", - "content": "The humanloop.complete()and humanloop.chat() call encapsulates the LLM provider calls (for example openai.Completions.create()), the model-config selection and logging steps in a single unified interface. There may be scenarios that you wish to manage the LLM provider calls directly in your own code instead of relying on Humanloop.\nFor example, you may be using an LLM provider that currently is not directly supported by Humanloop such as Hugging Face.\nTo support using your own model provider, we provide additional humanloop.log() and humanloop.projects.get_active_config() methods in the SDK.\nIn this guide, we walk through how to use these SDK methods to log data to Humanloop and run experiments.", - "code_snippets": [] + "content": "The humanloop.complete()and humanloop.chat() call encapsulates the LLM provider calls (for example openai.Completions.create()), the model-config selection and logging steps in a single unified interface. There may be scenarios that you wish to manage the LLM provider calls directly in your own code instead of relying on Humanloop.\nFor example, you may be using an LLM provider that currently is not directly supported by Humanloop such as Hugging Face.\nTo support using your own model provider, we provide additional humanloop.log() and humanloop.projects.get_active_config() methods in the SDK.\nIn this guide, we walk through how to use these SDK methods to log data to Humanloop and run experiments." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.use-your-own-model-provider-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.use-your-own-model-provider-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/use-your-own-model-provider", @@ -14716,7 +13883,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\n\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)", "code_snippets": [ { @@ -14742,15 +13909,15 @@ ], "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.use-your-own-model-provider-log-data-to-your-project-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.use-your-own-model-provider-log-data-to-your-project", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/use-your-own-model-provider", @@ -14777,7 +13944,7 @@ ], "authed": false, "type": "markdown", - "hash": "#log-data-to-your-project-", + "hash": "#log-data-to-your-project", "content": "Set up your code to first get your model config from Humanloop, then call your LLM provider to get a completion (or chat response) and then log this, alongside the inputs, config and output:\nThe process of capturing feedback then uses the returned data_id as before.\nSee our guide on capturing user feedback.\nYou can also log immediate feedback alongside the input and outputs:\n\n\nNote that you can also use a similar pattern for non-OpenAI LLM providers. For example, logging results from Hugging Face’s Inference API:", "code_snippets": [ { @@ -14808,8 +13975,8 @@ ], "hierarchy": { "h2": { - "id": "log-data-to-your-project-", - "title": "Log data to your project " + "id": "log-data-to-your-project", + "title": "Log data to your project" } }, "level": "h2", @@ -14844,11 +14011,10 @@ "authed": false, "type": "markdown", "description": "Learn how to log sequences of LLM calls to Humanloop, enabling you to trace through \"sessions\" and troubleshoot where your LLM chain went wrong or track sequences of actions taken by your LLM agent.\nThis guide explains how to use sequences of LLM calls to achieve a task in Humanloop. Humanloop allows you to trace through \"sessions\", enabling you to track sequences of actions taken by your LLM agent and troubleshoot where your LLM chain went wrong.", - "content": "This guide contains 3 sections. We'll start with an example Python script that makes a series of calls to an LLM upon receiving a user request. In the first section, we'll log these calls to Humanloop. In the second section, we'll link up these calls to a single session so they can be easily inspected on Humanloop. Finally, we'll explore how to deal with nested logs within a session.\nBy following this guide, you will:\nHave hooked up your backend system to use Humanloop.\n\nBe able to view session traces displaying sequences of LLM calls on Humanloop.\n\nLearn how to log complex session traces containing nested logs.", - "code_snippets": [] + "content": "This guide contains 3 sections. We'll start with an example Python script that makes a series of calls to an LLM upon receiving a user request. In the first section, we'll log these calls to Humanloop. In the second section, we'll link up these calls to a single session so they can be easily inspected on Humanloop. Finally, we'll explore how to deal with nested logs within a session.\nBy following this guide, you will:\nHave hooked up your backend system to use Humanloop.\n\nBe able to view session traces displaying sequences of LLM calls on Humanloop.\n\nLearn how to log complex session traces containing nested logs." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.logging-session-traces-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.logging-session-traces-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/logging-session-traces", @@ -14875,19 +14041,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "A Humanloop account. If you don't have one, you can create an account now by going to the Sign up page.\n\nYou have a system making a series of LLM calls when a user makes a request. If you do not have one, you can use the following example Python script. In this guide, we'll be illustrating the steps to be taken with specific modifications to this script.\n\n\n\n\nIf you don't use Python, you can checkout our TypeScript SDK\n or the underlying API in our Postman\ncollection\nfor the corresponding endpoints.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.logging-session-traces-example-script-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.logging-session-traces-example-script", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/logging-session-traces", @@ -14914,7 +14080,7 @@ ], "authed": false, "type": "markdown", - "hash": "#example-script-", + "hash": "#example-script", "content": "To set up your local environment to run this script, you will need to have installed Python 3 and the following libraries:\npip install openai google-search-results.", "code_snippets": [ { @@ -14924,19 +14090,19 @@ ], "hierarchy": { "h2": { - "id": "example-script-", - "title": "Example script " + "id": "example-script", + "title": "Example script" }, "h3": { - "id": "example-script-", - "title": "Example script " + "id": "example-script", + "title": "Example script" } }, "level": "h3", "level_title": "Example script" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.logging-session-traces-send-logs-to-humanloop-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.logging-session-traces-send-logs-to-humanloop", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/logging-session-traces", @@ -14963,7 +14129,7 @@ ], "authed": false, "type": "markdown", - "hash": "#send-logs-to-humanloop-", + "hash": "#send-logs-to-humanloop", "content": "To send logs to Humanloop, we'll install and use the Humanloop Python SDK.\n\n\nInstall the Humanloop Python SDK with pip install --upgrade humanloop.\nInitialize the Humanloop client:\nAdd the following lines to the top of the example file. (Get your API key from your Organisation Settings page)\nUse Humanloop to fetch the moderator response. This automatically sends the logs to Humanloop:\nReplace your openai.ChatCompletion.create() call under # Check for abuse with a humanloop.chat() call.\n\n\nInstead of replacing your model call with humanloop.chat()you can\nalternatively add a humanloop.log()call after your model call. This is\nuseful for use cases that leverage custom models not yet supported natively by\nHumanloop. See our Using your own model guide\nfor more information.\nLog the Google search tool result.\nAt the top of the file add the inspect import.\nInsert the following log request after print(\"Google answer:\", google_answer).\nUse Humanloop to fetch the assistant response. This automatically sends the log to Humanloop.\nReplace your openai.Completion.create() call under # Respond to request with a humanloop.complete() call.\nYou have now connected your multiple calls to Humanloop, logging them to individual projects. While each one can be inspected individually, we can't yet view them together to evaluate and improve our pipeline.", "code_snippets": [ { @@ -15009,15 +14175,15 @@ ], "hierarchy": { "h2": { - "id": "send-logs-to-humanloop-", - "title": "Send logs to Humanloop " + "id": "send-logs-to-humanloop", + "title": "Send logs to Humanloop" } }, "level": "h2", "level_title": "Send logs to Humanloop" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.logging-session-traces-post-logs-to-a-session-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.logging-session-traces-post-logs-to-a-session", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/logging-session-traces", @@ -15044,7 +14210,7 @@ ], "authed": false, "type": "markdown", - "hash": "#post-logs-to-a-session-", + "hash": "#post-logs-to-a-session", "content": "To view the logs for a single user_request together, we can log them to a session. This requires a simple change of just passing in the same session id to the different calls.\n\n\nCreate an ID representing a session to connect the sequence of logs.\nAt the top of the file, instantiate a session_reference_id. A V4 UUID is suitable for this use-case.\nAdd session_reference_id to each humanloop.chat/complete/log(...) call.\nFor example, for the final humanloop.complete(...) call, this looks like", "code_snippets": [ { @@ -15066,15 +14232,15 @@ ], "hierarchy": { "h2": { - "id": "post-logs-to-a-session-", - "title": "Post logs to a session " + "id": "post-logs-to-a-session", + "title": "Post logs to a session" } }, "level": "h2", "level_title": "Post logs to a session" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.logging-session-traces-final-example-script-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.logging-session-traces-final-example-script", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/logging-session-traces", @@ -15101,7 +14267,7 @@ ], "authed": false, "type": "markdown", - "hash": "#final-example-script-", + "hash": "#final-example-script", "content": "This is the updated version of the example script above with Humanloop fully integrated. Running this script yields sessions that can be inspected on Humanloop.", "code_snippets": [ { @@ -15111,19 +14277,19 @@ ], "hierarchy": { "h2": { - "id": "final-example-script-", - "title": "Final example script " + "id": "final-example-script", + "title": "Final example script" }, "h3": { - "id": "final-example-script-", - "title": "Final example script " + "id": "final-example-script", + "title": "Final example script" } }, "level": "h3", "level_title": "Final example script" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.logging-session-traces-nesting-logs-within-a-session-extension-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.generate-and-log.logging-session-traces-nesting-logs-within-a-session-extension", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/logging-session-traces", @@ -15150,7 +14316,7 @@ ], "authed": false, "type": "markdown", - "hash": "#nesting-logs-within-a-session-extension-", + "hash": "#nesting-logs-within-a-session-extension", "content": "A more complicated trace involving nested logs, such as those recording an Agent's behaviour, can also be logged and viewed in Humanloop.\nFirst, post a log to a session, specifying both session_reference_id and reference_id. Then, pass in this reference_id as parent_reference_id in a subsequent log request. This indicates to Humanloop that this second log should be nested under the first.\n\n\nDeferred output population\nIn most cases, you don't know the output for a parent log until all of its children have completed. For instance, the root-level Agent will spin off multiple LLM requests before it can retrieve an output. To support this case, we allow logging without an output. The output can then be updated after the session is complete with a separate humanloop.logs_api.update_by_reference_id(reference_id, output) call.", "code_snippets": [ { @@ -15164,8 +14330,8 @@ ], "hierarchy": { "h2": { - "id": "nesting-logs-within-a-session-extension-", - "title": "Nesting logs within a session [Extension] " + "id": "nesting-logs-within-a-session-extension", + "title": "Nesting logs within a session [Extension]" } }, "level": "h2", @@ -15200,11 +14366,10 @@ "authed": false, "type": "markdown", "description": "Learn how to set up and use Humanloop's evaluation framework to test and track the performance of your prompts.\nHumanloop's evaluation framework allows you to test and track the performance of models in a rigorous way.", - "content": "A key part of successful prompt engineering and deployment for LLMs is a robust evaluation framework. In this section we provide guides for how to set up Humanloop's evaluation framework in your projects.\nThe core entity in the Humanloop evaluation framework is an evaluator - a function you define which takes an LLM-generated log as an argument and returns an evaluation. The evaluation is typically either a boolean or a number, indicating how well the model performed according to criteria you determine based on your use case.", - "code_snippets": [] + "content": "A key part of successful prompt engineering and deployment for LLMs is a robust evaluation framework. In this section we provide guides for how to set up Humanloop's evaluation framework in your projects.\nThe core entity in the Humanloop evaluation framework is an evaluator - a function you define which takes an LLM-generated log as an argument and returns an evaluation. The evaluation is typically either a boolean or a number, indicating how well the model performed according to criteria you determine based on your use case." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-types-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-types", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15231,19 +14396,19 @@ ], "authed": false, "type": "markdown", - "hash": "#types-", + "hash": "#types", "content": "Currently, you can define your evaluators in two different ways:\nPython - using our in-browser editor, define simple Python functions to act as evaluators\n\nLLM - use language models to evaluate themselves! Our evaluator editor allows you to define a special-purpose prompt which passes data from the underlying log to a language model. This type of evaluation is particularly useful for more subjective evaluation such as verifying appropriate tone-of-voice or factuality given an input set of facts.", "hierarchy": { "h2": { - "id": "types-", - "title": "Types " + "id": "types", + "title": "Types" } }, "level": "h2", "level_title": "Types" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-modes-monitoring-vs-testing-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-modes-monitoring-vs-testing", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15270,19 +14435,19 @@ ], "authed": false, "type": "markdown", - "hash": "#modes-monitoring-vs-testing-", + "hash": "#modes-monitoring-vs-testing", "content": "Evaluation is useful for both testing new model configs as you develop them and for monitoring live deployments that are already in production.\nTo handle these different use cases, there are two distinct modes of evaluator - online and offline.", "hierarchy": { "h2": { - "id": "modes-monitoring-vs-testing-", - "title": "Modes: Monitoring vs. testing " + "id": "modes-monitoring-vs-testing", + "title": "Modes: Monitoring vs. testing" } }, "level": "h2", "level_title": "Modes: Monitoring vs. testing" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-online-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-online", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15309,23 +14474,23 @@ ], "authed": false, "type": "markdown", - "hash": "#online-", + "hash": "#online", "content": "Online evaluators are for use on logs generated in your project, including live in production. Typically, they are used to monitor deployed model performance over time.\nOnline evaluators can be set to run automatically whenever logs are added to a project. The evaluator takes the log as an argument.", "hierarchy": { "h2": { - "id": "online-", - "title": "Online " + "id": "online", + "title": "Online" }, "h3": { - "id": "online-", - "title": "Online " + "id": "online", + "title": "Online" } }, "level": "h3", "level_title": "Online" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-offline-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-offline", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15352,23 +14517,23 @@ ], "authed": false, "type": "markdown", - "hash": "#offline-", + "hash": "#offline", "content": "Offline evaluators are for use with predefined test datasets in order to evaluate models as you iterate in your prompt engineering workflow, or to test for regressions in a CI environment.\nA test dataset is a collection of datapoints, which are roughly analogous to unit tests or test cases in traditional programming. Each datapoint specifies inputs to your model and (optionally) some target data.\nWhen you run an offline evaluation, Humanloop iterates through each datapoint in the dataset and triggers a fresh LLM generation using the inputs of the testcase and the model config being evaluated. For each test case, your evaluator function will be called, taking as arguments the freshly generated log and the testcase datapoint that gave rise to it. Typically, you would write your evaluator to perform some domain-specific logic to determine whether the model-generated log meets your desired criteria (as specified in the datapoint 'target').", "hierarchy": { "h2": { - "id": "offline-", - "title": "Offline " + "id": "offline", + "title": "Offline" }, "h3": { - "id": "offline-", - "title": "Offline " + "id": "offline", + "title": "Offline" } }, "level": "h3", "level_title": "Offline" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-humanloop-hosted-vs-self-hosted-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-humanloop-hosted-vs-self-hosted", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15395,19 +14560,19 @@ ], "authed": false, "type": "markdown", - "hash": "#humanloop-hosted-vs-self-hosted-", + "hash": "#humanloop-hosted-vs-self-hosted", "content": "Conceptually, evaluation runs have two components:\nGeneration of logs from the datapoints\n\nEvaluating those logs.\n\n\nUsing the Evaluations API, Humanloop offers the ability to generate logs either within the Humanloop runtime, or self-hosted. Similarly, evaluations of the logs can be performed in the Humanloop runtime (using evaluators that you can define in-app) or self-hosted (see our guide on self-hosted evaluations).\nIn fact, it's possible to mix-and-match self-hosted and Humanloop-runtime generations and evaluations in any combination you wish. When creating an evaluation via the API, set the hl_generated flag to False to indicate that you are posting the logs from your own infrastructure (see our guide on evaluating externally-generated logs). Include an evaluator of type External to indicate that you will post evaluation results from your own infrastructure. You can include multiple evaluators on any run, and these can include any combination of External (i.e. self-hosted) and Humanloop-runtime evaluators.\n\n\ntitle: Evaluating LLM Applications\nauthors: [\"Peter Hayes\"]\ntype: Blog\ndate: 2024-02-06\ndraft: false\npublished: true\ntags: [\"llm\", \"gpt-4\", \"evals\"]\nsummary:\nAn overview of evaluating LLM applications. The emerging evaluation framework,\nparallels to traditional software testing and some guidance on best practices.", "hierarchy": { "h2": { - "id": "humanloop-hosted-vs-self-hosted-", - "title": "Humanloop-hosted vs. self-hosted " + "id": "humanloop-hosted-vs-self-hosted", + "title": "Humanloop-hosted vs. self-hosted" } }, "level": "h2", "level_title": "Humanloop-hosted vs. self-hosted" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-thumbnail-blogevaluating-llm-appsevalllmappsthumbnail2png-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-thumbnail-blogevaluating-llm-appsevalllmappsthumbnail2png", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15434,19 +14599,19 @@ ], "authed": false, "type": "markdown", - "hash": "#thumbnail-blogevaluating-llm-appsevalllmappsthumbnail2png-", + "hash": "#thumbnail-blogevaluating-llm-appsevalllmappsthumbnail2png", "content": "An ever-increasing number of companies are using large language models (LLMs) to\ntransform both their product experiences and internal operations. These kinds of\nfoundation models represent a new computing platform. The process of\nprompt engineering is\nreplacing aspects of software development and the scope of what software can\nachieve is rapidly expanding.\nIn order to effectively leverage LLMs in production, having confidence in how\nthey perform is paramount. This represents a unique challenge for most companies\ngiven the inherent novelty and complexities surrounding LLMs. Unlike traditional\nsoftware and non-generative machine learning (ML) models, evaluation is\nsubjective, hard to automate and the risk of the system going embarrassingly\nwrong is higher.\nThis post provides some thoughts on evaluating LLMs and discusses some emerging\npatterns I've seen work well in practice from experience with thousands of teams\ndeploying LLM applications in production.", "hierarchy": { "h2": { - "id": "thumbnail-blogevaluating-llm-appsevalllmappsthumbnail2png-", - "title": "thumbnail: /blog/evaluating-llm-apps/EvalLLMAppsThumbnail2.png " + "id": "thumbnail-blogevaluating-llm-appsevalllmappsthumbnail2png", + "title": "thumbnail: /blog/evaluating-llm-apps/EvalLLMAppsThumbnail2.png" } }, "level": "h2", "level_title": "thumbnail: /blog/evaluating-llm-apps/EvalLLMAppsThumbnail2.png" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-llms-are-not-all-you-need-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-llms-are-not-all-you-need", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15473,19 +14638,19 @@ ], "authed": false, "type": "markdown", - "hash": "#llms-are-not-all-you-need-", + "hash": "#llms-are-not-all-you-need", "content": "It’s important to first understand the basic makeup of what we are evaluating\nwhen working with LLMs in production. As the models get increasingly more\npowerful, a significant amount of effort is spent trying to give the model the\nappropriate context and access required to solve a task.\n\n\nFor the current generation of models, at the core of any LLM app is usually some\ncombination of the following components:\nLLM model - the core reasoning engine; an API into OpenAI, Anthropic,\nGoogle, or open source alternatives like\nMistral.\n\nPrompt template - the boilerplate instructions to your model, which are\nshared between requests. This is generally versioned and managed like code\nusing formats like the\n.prompt file.\n\nData sources - to provide the relevant context to the model; often\nreferred to as retrieval augmented generation (RAG). Examples being\ntraditional relational databases, graph databases, and\nvector databases.\n\nMemory - like a data source, but that builds up a history of previous\ninteractions with the model for re-use.\n\nTools - provides access to actions like API calls and code execution\nempowering the model to interact with external systems where appropriate.\n\nAgent control flow - some form of looping logic that allows the model to\nmake multiple generations to solve a task before hitting some stopping\ncriteria.\n\nGuardrails - a check that is run on the output of the model before\nreturning the output to the user. This can be simple logic, for example\nlooking for certain keywords, or another model. Often triggering fallback to\nhuman-in-the-loop workflows", "hierarchy": { "h1": { - "id": "llms-are-not-all-you-need-", - "title": "LLMs are not all you need " + "id": "llms-are-not-all-you-need", + "title": "LLMs are not all you need" } }, "level": "h1", "level_title": "LLMs are not all you need" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-llm-apps-are-complex-systems-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-llm-apps-are-complex-systems", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15512,23 +14677,23 @@ ], "authed": false, "type": "markdown", - "hash": "#llm-apps-are-complex-systems-", + "hash": "#llm-apps-are-complex-systems", "content": "These individual components represent a large and unique design space to\nnavigate. The configuration of each one requires careful consideration; it's no\nlonger just strictly prompt engineering.\nFor example, take the vector database - now a mainstay for the problem of\nproviding the relevant chunks of context to the model, for a particular query,\nfrom a larger corpus of documents. There is a near infinite number of open or\nclosed source vector stores to choose from. Then there is the embedding model\n(that also has its own design choices), retrieval technique, similarity metric,\nhow to chunk your documents, how to sync your vector store... and the list goes\non.\nNot only that, but there are often complex interactions between these components\nthat are hard to predict. For example, maybe the performance of your prompt\ntemplate is weirdly sensitive to the format of the separator tokens you forgot\nto strip when chunking your documents in the vector database (a real personal\nanecdote).\nFurthermore, we're seeing applications that have multiple specialist blocks of\nthese components chained together to solve a task. This all adds to the\nchallenge of evaluating the resulting complex system. Specialist tooling is\nincreasingly a necessity to help teams build robust applications.\nLike for testing in traditional software development, the goal of a good LLM\nevaluation framework is to provide confidence that the system is working as\nexpected and also transparency into what might be causing issues when things go\nwrong. Unlike traditional software development, a significant amount of\nexperimentation and collaboration is required when building with LLMs. From\nprompt engineering with domain experts, to tool integrations with engineers. A\nsystematic way to track progress is required.", "hierarchy": { "h1": { - "id": "llm-apps-are-complex-systems-", - "title": "LLM apps are complex systems " + "id": "llm-apps-are-complex-systems", + "title": "LLM apps are complex systems" }, "h2": { - "id": "llm-apps-are-complex-systems-", - "title": "LLM apps are complex systems " + "id": "llm-apps-are-complex-systems", + "title": "LLM apps are complex systems" } }, "level": "h2", "level_title": "LLM apps are complex systems" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-take-lessons-from-traditional-software-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-take-lessons-from-traditional-software", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15555,19 +14720,19 @@ ], "authed": false, "type": "markdown", - "hash": "#take-lessons-from-traditional-software-", + "hash": "#take-lessons-from-traditional-software", "content": "A large proportion of teams now building great products with LLMs aren't\nexperienced ML practitioners. Conveniently many of the goals and best practices\nfrom software development are broadly still relevant when thinking about LLM\nevals.", "hierarchy": { "h1": { - "id": "take-lessons-from-traditional-software-", - "title": "Take lessons from traditional software " + "id": "take-lessons-from-traditional-software", + "title": "Take lessons from traditional software" } }, "level": "h1", "level_title": "Take lessons from traditional software" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-automation-and-continuous-integration-is-still-the-goal-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-automation-and-continuous-integration-is-still-the-goal", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15594,23 +14759,23 @@ ], "authed": false, "type": "markdown", - "hash": "#automation-and-continuous-integration-is-still-the-goal-", + "hash": "#automation-and-continuous-integration-is-still-the-goal", "content": "Competent teams will traditionally set up robust test suites that are run\nautomatically against every system change before deploying to production. This\nis a key aspect of continuous integration (CI) and is done to protect against\nregressions and ensure the system is working as the engineers expect. Test\nsuites are generally made up of 3 canonical types of tests: unit, integration\nand end-to-end.\n\n\nUnit - very numerous, target a specific atom of code and are fast to run.\n\nIntegration - less numerous, cover multiple chunks of code, are slower to\nrun than unit tests and may require mocking external services.\n\nEnd-to-end - emulate the experience of an end UI user or API caller; they\nare slow to run and oftentimes need to interact with a live version of the\nsystem.\n\n\nThe most effective mix of test types for a given system often sparks debate.\nYet, the role of automated testing as part of the deployment lifecycle,\nalongside the various trade-offs between complexity and speed, remain valuable\nconsiderations when working with LLMs.", "hierarchy": { "h1": { - "id": "automation-and-continuous-integration-is-still-the-goal-", - "title": "Automation and continuous integration is still the goal " + "id": "automation-and-continuous-integration-is-still-the-goal", + "title": "Automation and continuous integration is still the goal" }, "h2": { - "id": "automation-and-continuous-integration-is-still-the-goal-", - "title": "Automation and continuous integration is still the goal " + "id": "automation-and-continuous-integration-is-still-the-goal", + "title": "Automation and continuous integration is still the goal" } }, "level": "h2", "level_title": "Automation and continuous integration is still the goal" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-unit-tests-are-tricky-for-llms-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-unit-tests-are-tricky-for-llms", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15637,23 +14802,23 @@ ], "authed": false, "type": "markdown", - "hash": "#unit-tests-are-tricky-for-llms-", + "hash": "#unit-tests-are-tricky-for-llms", "content": "There are however a number of fundamental differences with LLM native products\nwhen it comes to this type of testing. Of the test types, the most difficult to\ntransfer over to LLMs is the unit test because of:\nRandomness - LLMs produce probabilities over words which can result in\nrandom variation between generations for the same prompt. Certain\napplications, like task automation, require deterministic predictions. Others,\nlike creative writing, demand diversity.\n\nSubjectivity - we oftentimes want LLMs to produce natural human-like\ninteractions. This requires more nuanced approaches to evaluation because of\nthe inherent subjectivity of the correctness of outputs, which may depend on\ncontext or user preferences.\n\nCost and latency - given the computation involved, running SOTA LLMs can\ncome with a significant cost and tend to have relatively high latency;\nespecially if configured as an agent that can take multiple steps.\n\nScope - LLMs are increasingly capable of solving broader less well-defined\ntasks, resulting in the scope of what we are evaluating often being a lot more\nopen-ended than in traditional software applications.\n\n\nAs a result, the majority of automation efforts in evaluating LLM apps take the\nform of integration and end-to-end style tests and should be managed as such\nwithin CI pipelines.", "hierarchy": { "h1": { - "id": "unit-tests-are-tricky-for-llms-", - "title": "Unit tests are tricky for LLMs " + "id": "unit-tests-are-tricky-for-llms", + "title": "Unit tests are tricky for LLMs" }, "h2": { - "id": "unit-tests-are-tricky-for-llms-", - "title": "Unit tests are tricky for LLMs " + "id": "unit-tests-are-tricky-for-llms", + "title": "Unit tests are tricky for LLMs" } }, "level": "h2", "level_title": "Unit tests are tricky for LLMs" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-observability-needs-to-evolve-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-observability-needs-to-evolve", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15680,23 +14845,23 @@ ], "authed": false, "type": "markdown", - "hash": "#observability-needs-to-evolve-", + "hash": "#observability-needs-to-evolve", "content": "There is also the important practice of monitoring the system in production.\nLoad and usage patterns in the wild can be unexpected and lead to bugs.\nTraditional observability solutions like Datadog\nand New Relic monitor the health of the system and\nprovide alerts when things go wrong; usually based on simple heuristics and\nerror codes. This tends to fall short when it comes to LLMs. The more capable\nand complex the system, the harder it can be to determine something actually\nwent wrong and the more important observability and traceability is.\nFurthermore, one of the promises of building with LLMs is the potential to more\nrapidly intervene and experiment. By tweaking instructions you can fix issues\nand improve performance. Another advantage is that less technical teams can be\nmore involved in building; the\nmakeup of the teams\nis evolving. This impacts what's needed from an observability solution in this\nsetting. A tighter integration between observability data and the development\nenvironment to make changes is more beneficial, as well as usability for\ncollaborating with product teams and domain experts outside of engineering. This\npromise of more rapid and sometimes non-technical iteration cycles also\nincreases the importance of robust regression testing.\nBefore delving more into the stages of evaluation and how they relate to\nexisting CI and observability concepts, it's important to understand more about\nthe different types of evaluations in this space.", "hierarchy": { "h1": { - "id": "observability-needs-to-evolve-", - "title": "Observability needs to evolve " + "id": "observability-needs-to-evolve", + "title": "Observability needs to evolve" }, "h2": { - "id": "observability-needs-to-evolve-", - "title": "Observability needs to evolve " + "id": "observability-needs-to-evolve", + "title": "Observability needs to evolve" } }, "level": "h2", "level_title": "Observability needs to evolve" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-types-of-evaluation-can-vary-significantly-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-types-of-evaluation-can-vary-significantly", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15723,19 +14888,19 @@ ], "authed": false, "type": "markdown", - "hash": "#types-of-evaluation-can-vary-significantly-", + "hash": "#types-of-evaluation-can-vary-significantly", "content": "When evaluating one or more components of an LLM block, different types of\nevaluations are appropriate depending on your goals, the complexity of the task\nand available resources. Having good coverage over the components that are\nlikely to have an impact over the overall quality of the system is important.\nThese different types can be roughly characterized by the return type and the\nsource of, as well as the criteria for, the judgment required.", "hierarchy": { "h1": { - "id": "types-of-evaluation-can-vary-significantly-", - "title": "Types of evaluation can vary significantly " + "id": "types-of-evaluation-can-vary-significantly", + "title": "Types of evaluation can vary significantly" } }, "level": "h1", "level_title": "Types of evaluation can vary significantly" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-judgment-return-types-are-best-kept-simple-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-judgment-return-types-are-best-kept-simple", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15762,23 +14927,23 @@ ], "authed": false, "type": "markdown", - "hash": "#judgment-return-types-are-best-kept-simple-", + "hash": "#judgment-return-types-are-best-kept-simple", "content": "The most common judgment return types are familiar from traditional data science\nand machine learning frameworks. From simple to more complex:\nBinary - involves a yes/no, true/false, or pass/fail judgment based on\nsome criteria.\n\nCategorical - involves more than two categories; for exampling adding an\nabstain or maybe option to a binary judgment.\n\nRanking - the relative quality of output from different samples or\nvariations of the model are being ranked from best to worst based on some\ncriteria. Preference based judgments are often used in evaluating the quality\nof a ranking.\n\nNumerical - involves a score, a percentage, or any other kind of numeric\nrating.\n\nText - a simple comment or a more detailed critique. Often used when a\nmore nuanced or detailed evaluation of the model's output is required.\n\nMulti-task - combines multiple types of judgment simultaneously. For\nexample, a model's output could be evaluated using both a binary rating and a\nfree-form text explanation.\n\n\nSimple individual judgments can be easily aggregated across a dataset of\nmultiple examples using well known metrics. For example, for classification\nproblems, precision,\nrecall and\nF1 are typical choices. For rankings,\nthere are metrics like\nNDCG,\nElo ratings and\nKendall's Tau.\nFor numerical judgments there are variations of the\nBleu score.\nI find that in practice binary and categorical types generally cover the\nmajority of use cases. They have the added benefit of being the most straight\nforward to source reliably. The more complex the judgment type, the more\npotential for ambiguity there is and the harder it becomes to make inferences.", "hierarchy": { "h1": { - "id": "judgment-return-types-are-best-kept-simple-", - "title": "Judgment return types are best kept simple " + "id": "judgment-return-types-are-best-kept-simple", + "title": "Judgment return types are best kept simple" }, "h2": { - "id": "judgment-return-types-are-best-kept-simple-", - "title": "Judgment return types are best kept simple " + "id": "judgment-return-types-are-best-kept-simple", + "title": "Judgment return types are best kept simple" } }, "level": "h2", "level_title": "Judgment return types are best kept simple" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-model-sourced-judgments-are-increasingly-promising-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-model-sourced-judgments-are-increasingly-promising", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15805,23 +14970,23 @@ ], "authed": false, "type": "markdown", - "hash": "#model-sourced-judgments-are-increasingly-promising-", + "hash": "#model-sourced-judgments-are-increasingly-promising", "content": "Sourcing judgments is an area where there are new and evolving patterns around\nfoundation models like LLMs. At Humanloop, we've standardised around the\nfollowing canonical sources:\nHeuristic/Code - using simple deterministic rules based judgments against\nattributes like cost, token usage, latency, regex rules on the output, etc.\nThese are generally fast and cheap to run at scale.\n\nModel (or 'AI') - using other foundation models to provide judgments on\nthe output of the component. This allows for more qualitative and nuanced\njudgments for a fraction of the cost of human judgments.\n\nHuman - getting gold standard judgments from either end users of your\napplication, or internal domain experts. This can be the most expensive and\nslowest option, but also the most reliable.\n\n\n\n\n\nModel judgments in particular are increasingly promising and an active research\narea. The paper Judging LLM-as-a-Judge\ndemonstrates that an appropriately prompted GPT-4 model achieves over 80%\nagreement with human judgments when rating LLM model responses to questions on a\nscale of 1-10; that's equivalent to the levels of agreement between humans.\nSuch evaluators can be equally effective in evaluating the important non-LLM\ncomponents, such as the retrieval component in RAG. In\nAutomated Evaluation of Retrieval Augmented Generation\na GPT-3 model is tasked with extracting the most relevant sentences from the\nretrieved context. A numeric judgment for relevance is then computed using the\nratio of the number of relevant to irrelevant sentences, which was also found to\nbe highly correlated with expert human judgments.\nHowever, there are risks to consider. The same reasons that evaluating LLMs is\nhard apply to using them as evaluators. Recent research has also shown LLMs to\nhave biases that can contaminate the evaluation process. In\nBenchmarking Cognitive Biases in Large Language Models as Evaluators\nthey measure 6 cognitive biases across 15 different LLM variations. They find\nthat simple details such as the order of the results presented to the model can\nhave material impact on the evaluation.\n\n\n\nThe takeaway here is that it's important to still experiment with performance on\nyour target use cases before trusting LLM evaluators - evaluate the evaluator!\nAll the usual prompt engineering techniques such as including few-shot examples\nare just as applicable here. In addition, fine-tuning specialist, more\neconomical evaluator models using human judgements can be a real unlock.\nI believe teams should consider shifting more of their human judgment efforts up\na level to focus on helping improve model evaluators. This will ultimately lead\nto a more scalable, repeatable and cost-effective evaluation process. As well as\none where the human expertise can be more targeted on the most important high\nvalue scenarios.", "hierarchy": { "h1": { - "id": "model-sourced-judgments-are-increasingly-promising-", - "title": "Model sourced judgments are increasingly promising " + "id": "model-sourced-judgments-are-increasingly-promising", + "title": "Model sourced judgments are increasingly promising" }, "h2": { - "id": "model-sourced-judgments-are-increasingly-promising-", - "title": "Model sourced judgments are increasingly promising " + "id": "model-sourced-judgments-are-increasingly-promising", + "title": "Model sourced judgments are increasingly promising" } }, "level": "h2", "level_title": "Model sourced judgments are increasingly promising" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-judgment-criteria-is-where-most-of-the-customisation-happens-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-judgment-criteria-is-where-most-of-the-customisation-happens", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15848,23 +15013,23 @@ ], "authed": false, "type": "markdown", - "hash": "#judgment-criteria-is-where-most-of-the-customisation-happens-", + "hash": "#judgment-criteria-is-where-most-of-the-customisation-happens", "content": "The actual criteria for the judgment is what tends to be most specific to the\nneeds of a particular use case. This will either be defined in code, in a prompt\n(or in the parameters of a model), or just in guidelines depending on whether\nit's a code, model or human based evaluator.\nThere are lots of broad themes to crib from. Humanloop for example provides\ntemplates for popular use cases and best practises, with the ability to\nexperiment and customize. There are categories like general performance\n(latency, cost and error thresholds), behavioural (tone of voice, writing style,\ndiversity, factuality, relevance, etc.), ethical (bias, safety, privacy, etc.)\nand user experience (engagement, satisfaction, productivity, etc.).\nUnsurprisingly, starting with a small set of evaluators that cover the most\nimportant criteria is wise. These can then be adapted and added to over time as\nrequirements are clarified and new edge cases uncovered. Tradeoffs are often\nnecessary between these criteria. For example, a more diverse set of responses\nmight be more engaging, but also more likely to contain errors and higher\nquality can often come at a cost in terms of latency.\nThinking about these criteria upfront for your project can be a good hack to\nensure your team deeply understand the end goals of the application.", "hierarchy": { "h1": { - "id": "judgment-criteria-is-where-most-of-the-customisation-happens-", - "title": "Judgment criteria is where most of the customisation happens " + "id": "judgment-criteria-is-where-most-of-the-customisation-happens", + "title": "Judgment criteria is where most of the customisation happens" }, "h2": { - "id": "judgment-criteria-is-where-most-of-the-customisation-happens-", - "title": "Judgment criteria is where most of the customisation happens " + "id": "judgment-criteria-is-where-most-of-the-customisation-happens", + "title": "Judgment criteria is where most of the customisation happens" } }, "level": "h2", "level_title": "Judgment criteria is where most of the customisation happens" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-different-stages-of-evaluation-are-necessary-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-different-stages-of-evaluation-are-necessary", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15891,19 +15056,19 @@ ], "authed": false, "type": "markdown", - "hash": "#different-stages-of-evaluation-are-necessary-", + "hash": "#different-stages-of-evaluation-are-necessary", "content": "As discussed with the distinction between CI and observability; different stages\nof the app development lifecycle will have different evaluation needs. I've\nfound this lifecycle to naturally still consist of some sort of planning and\nscoping exercise, followed by cycles of development, deployment and monitoring.\nThese cycles are then repeated during the lifetime of the LLM app in order to\nintervene and improve performance. The stronger the teams, the more agile and\ncontinuous this process tends to be.\nDevelopment here will include both the typical app development; orchestrating\nyour LLM blocks in code, setting up your UIs, etc, as well more LLM specific\ninterventions and experimentation; including prompt engineering, context\ntweaking, tool integration updates and fine-tuning - to name a few. Both the\nchoices and quality of interventions to\noptimize your LLM performance are\nmuch improved if the right evaluation stages are in place. It facilitates a more\ndata-driven, systematic approach.\nFrom my experience there are 3 complementary stages of evaluation that are\nhighest ROI in supporting rapid iteration cycles of the LLM block related\ninterventions:\nInteractive - it's useful to have an interactive playground-like editor\nenvironment that allows rapid experimentation with components of the model\nand provides immediate evaluator feedback. This usually works best on a\nrelatively small number of scenarios. This allows teams (both technical and\nnon-technical) to quickly explore the design space of the LLM app and get an\ninformal sense of what works well.\n\nBatch offline - benchmarking or regression testing the most promising\nvariations over a larger curated set of scenarios to provide a more\nsystematic evaluation. Ideally a range of different evaluators for different\ncomponents of the app can contribute to this stage, some comparing against\ngold standard expected results for the task. This can fit naturally into\nexisting CI processes.\n\nMonitoring online - post deployment, real user interactions can be\nevaluated continuously to monitor the performance of the model. This process\ncan drive alerts, gather additional scenarios for offline evaluations and\ninform when to make further interventions. Staging deployments through\ninternal environments, or beta testing with selected cohorts of users first,\nare usually super valuable.\n\n\n\n\n\nIt's usually necessary to co-evolve to some degree the evaluation framework\nalongside the app development as more data becomes available and requirements\nare clarified. The ability to easily version control and share across stages and\nteams both the evaluators and the configuration of your app can significantly\nimprove the efficiency of this process.", "hierarchy": { "h1": { - "id": "different-stages-of-evaluation-are-necessary-", - "title": "Different stages of evaluation are necessary " + "id": "different-stages-of-evaluation-are-necessary", + "title": "Different stages of evaluation are necessary" } }, "level": "h1", "level_title": "Different stages of evaluation are necessary" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-high-quality-datasets-are-still-paramount-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-high-quality-datasets-are-still-paramount", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15930,19 +15095,19 @@ ], "authed": false, "type": "markdown", - "hash": "#high-quality-datasets-are-still-paramount-", + "hash": "#high-quality-datasets-are-still-paramount", "content": "Lack of access to high quality data will undermine any good evaluation\nframework. A good evaluation dataset should ideally be representative of the\nfull distribution of behaviours you expect to see and care about in production,\nconsidering both the inputs and the expected outputs. It's also important to\nkeep in mind that coverage of the expected behaviours for the individual\ncomponents of your app is important.\nHere are some strategies that I think are worth considering: leveraging\npublic/academic benchmarks, collecting data from your own systems and creating\nsynthetic data.", "hierarchy": { "h1": { - "id": "high-quality-datasets-are-still-paramount-", - "title": "High quality datasets are still paramount " + "id": "high-quality-datasets-are-still-paramount", + "title": "High quality datasets are still paramount" } }, "level": "h1", "level_title": "High quality datasets are still paramount" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-pay-attention-to-academic-and-public-benchmarks-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-pay-attention-to-academic-and-public-benchmarks", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -15969,23 +15134,23 @@ ], "authed": false, "type": "markdown", - "hash": "#pay-attention-to-academic-and-public-benchmarks-", + "hash": "#pay-attention-to-academic-and-public-benchmarks", "content": "There are well cited academic benchmarks that have been curated to evaluate the\ngeneral capabilities of LLMs. For AI leaders, these can be helpful to reference\nwhen choosing which base models to build with originally, or to graduate to when\nthings like scale and cost start to factor in. For example the\nLarge Model Systems Organizations maintains\nChatbot Arena where they have crowd-sourced over 200k\nhuman preferences votes to rank LLMs, both commercial and open source, as well\nas recording the performance on academic multi-task reasoning benchmarks like\nMMLU.\n\n\n\nAnother great resource in the same vein is\nHugging Face datasets, where they\nalso maintain a leaderboard of how all the latest OSS models perform across a\nrange of tasks using the\nEleuther LLM evaluation harness library.\n\n\n\nMore domain specific academic datasets may also be particularly relevant for\nyour target use case and can be used to warm start your evaluation efforts; for\nexample if you were working on\nmedical related tasks.", "hierarchy": { "h1": { - "id": "pay-attention-to-academic-and-public-benchmarks-", - "title": "Pay attention to academic and public benchmarks " + "id": "pay-attention-to-academic-and-public-benchmarks", + "title": "Pay attention to academic and public benchmarks" }, "h2": { - "id": "pay-attention-to-academic-and-public-benchmarks-", - "title": "Pay attention to academic and public benchmarks " + "id": "pay-attention-to-academic-and-public-benchmarks", + "title": "Pay attention to academic and public benchmarks" } }, "level": "h2", "level_title": "Pay attention to academic and public benchmarks" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-real-product-interactions-are-the-most-valuable-source-of-data-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-real-product-interactions-are-the-most-valuable-source-of-data", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -16012,23 +15177,23 @@ ], "authed": false, "type": "markdown", - "hash": "#real-product-interactions-are-the-most-valuable-source-of-data-", + "hash": "#real-product-interactions-are-the-most-valuable-source-of-data", "content": "Arguably the best form of dataset comes from real user interactions. Useful\nsources of this kind of data are actually the interactive and monitoring stages\ndiscussed above.\nWith access to an interactive environment for prompt engineering (or a test\nversion of your application), internal domain experts can synthesize examples of\nthe kinds of interactions they expect to see in production. These interactions\nshould be recorded throughout the course of initial experimentation to form a\nbenchmark dataset for subsequent offline evaluations.\nFor leveraging real end-user interactions, a tighter integration between\nobservability data and the development environment that manages evaluations\nmakes it easier to curate real scenarios into your benchmark datasets over time.\n\n\n\nSomething worth careful consideration to maximise the impact of end-user\ninteractions is to set up your application to\ncapture rich feedback\nfrom users form the start. This is an example of an online evaluator that relies\non human judgments, which can be used to filter for particularly interesting\nscenarios to add to benchmark datasets.\nFeedback doesn't need to be only explicit from the user; it can be provided\nimplicitly in the way they interact with the system. For example,\ngithub copilot reportedly\nmonitors whether the code suggestion was accepted at various time increments\nafter the suggestion was made, as well as whether the user made any edits to the\nsuggestion before accepting it.", "hierarchy": { "h1": { - "id": "real-product-interactions-are-the-most-valuable-source-of-data-", - "title": "Real product interactions are the most valuable source of data " + "id": "real-product-interactions-are-the-most-valuable-source-of-data", + "title": "Real product interactions are the most valuable source of data" }, "h2": { - "id": "real-product-interactions-are-the-most-valuable-source-of-data-", - "title": "Real product interactions are the most valuable source of data " + "id": "real-product-interactions-are-the-most-valuable-source-of-data", + "title": "Real product interactions are the most valuable source of data" } }, "level": "h2", "level_title": "Real product interactions are the most valuable source of data" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-synthetic-data-is-on-the-rise-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-synthetic-data-is-on-the-rise", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -16055,23 +15220,23 @@ ], "authed": false, "type": "markdown", - "hash": "#synthetic-data-is-on-the-rise-", + "hash": "#synthetic-data-is-on-the-rise", "content": "Once you have a small amount of high quality data leveraging LLMs to generate\nadditional input examples can help bootstrap to larger datasets. By utilizing\nfew-shot prompting and including a representative subset of your existing data\nwithin the prompt, you can guide the synthesizer model to generate a wide range\nof supplementary examples.\nA quick pointer here is to prompt the model to generate a batch of examples at a\ntime, rather than one at a time, such that you can encourage characteristics\nlike diversity between examples. Or, similarly, feed previously generated\nexamples back into your prompt. For instance, for a customer service system,\nprompts could be designed to elicit responses across a variety of emotional\nstates, from satisfaction to frustration.\nA specific example of this is model red-teaming, or synthesizing adversarial\nexamples. This is where you use the synthesizer model to generate examples that\nare designed to break the system. For example, in\nRed Teaming Language Models with Language Models,\nthey uncover offensive replies, data leakage and other vulnerabilities in an LLM\nchat-bot using variations of few-shot prompts to generate adversarial questions.\nThey also leverage a pre-trained offensive classifier to help automate their\nevaluation process. However, it is worth noting they too point out the\nlimitations caused by LLM biases that limits diversity. They ultimately need to\ngenerate and filter hundreds of thousands of synthetic examples.\n\n\n\nAs with LLM evaluators, all the same rigour and tools should be applied to\nevaluating the quality of the synthetic data generator model before trusting it.", "hierarchy": { "h1": { - "id": "synthetic-data-is-on-the-rise-", - "title": "Synthetic data is on the rise " + "id": "synthetic-data-is-on-the-rise", + "title": "Synthetic data is on the rise" }, "h2": { - "id": "synthetic-data-is-on-the-rise-", - "title": "Synthetic data is on the rise " + "id": "synthetic-data-is-on-the-rise", + "title": "Synthetic data is on the rise" } }, "level": "h2", "level_title": "Synthetic data is on the rise" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-looking-forward-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.overview-looking-forward", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/overview", @@ -16098,12 +15263,12 @@ ], "authed": false, "type": "markdown", - "hash": "#looking-forward-", + "hash": "#looking-forward", "content": "This is a rapidly evolving area of research and practice. Here's a few areas\nthat I'm particularly excited about working more on at Humanloop over the coming\nmonths that we'll touch on further in future posts:\nIncreasing adoption of AI based evaluators for all components of these\nsystems, with improved support for fine-tuning and specialisation happening at\nthis level. The existence of OpenAI's\nSuperalignment team\nshows there is focus here on the research front.\n\nSupporting more multi-modal applications deployed in production, with more\ntext, image, voice and even video based models coming online.\n\nMore complex agent-based workflows and experimenting with more multi-agent\nsetups and how evaluation needs to adapt to supervise these systems.\n\nMoving towards more end-to-end optimization for the components of these\ncomplex systems. A robust set of evaluators can provide an objective to\nmeasure performance, coupled with data synthesization to simulate the system.\n\n\nAt Humanloop, we've built an integrated solution for managing the development\nlifecycle of LLM apps from first principles, which includes some of the\nevaluation challenges discussed in this post. Please\nreach out if you'd like to learn more.", "hierarchy": { "h1": { - "id": "looking-forward-", - "title": "Looking forward... " + "id": "looking-forward", + "title": "Looking forward..." } }, "level": "h1", @@ -16138,11 +15303,10 @@ "authed": false, "type": "markdown", "description": "How do you evaluate your large language model use case using a dataset and an evaluator on Humanloop?\nIn this guide, we will walk through creating a dataset and using it to run an offline evaluation.", - "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan", - "code_snippets": [] + "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluate-models-offline-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluate-models-offline-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluate-models-offline", @@ -16169,23 +15333,23 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You need to have access to Evaluations\n\nYou also need to have a Prompt – if not, please follow our Prompt creation guide.\n\nFinally, you need at least a few Logs in your prompt. Use the Editor to generate some logs if you have none.\n\n\n\n\nYou need logs for your project because we will use these as a source of test datapoints for the dataset we create. If you want to make arbitrary test datapoints from scratch, see our guide to doing this from the API. We will soon update the app to enable arbitrary test datapoint creation from your browser.\nFor this example, we will evaluate a model responsible for extracting critical information from a customer service request and returning this information in JSON. In the image below, you can see the model config we've drafted on the left and an example of it running against a customer query on the right.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluate-models-offline-set-up-a-dataset-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluate-models-offline-set-up-a-dataset", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluate-models-offline", @@ -16212,23 +15376,23 @@ ], "authed": false, "type": "markdown", - "hash": "#set-up-a-dataset-", + "hash": "#set-up-a-dataset", "content": "We will create a dataset based on existing logs in the project.\n\n\nNavigate to the Logs tab\nSelect the logs you would like to convert into test datapoints\nFrom the dropdown menu in the top right (see below), choose Add to Dataset\n\n\nIn the dialog box, give the new dataset a name and provide an optional description. Click Create dataset.\n\n\n\n\nYou can add more datapoints to the same dataset later by clicking the 'add to existing dataset' button at the top.\nGo to the Datasets tab.\nClick on the newly created dataset. One datapoint will be present for each log you selected in Step 3\n\n\nClick on a datapoint to inspect its parameters.\n\n\nA test datapoint contains inputs (the variables passed into your model config template), an optional sequence of messages (if used for a chat model) and a target representing the desired output.\nWhen existing logs are converted to datapoints, the datapoint target defaults to the output of the source Log.\nIn our example, we created datapoints from existing logs. The default behaviour is that the original log's output becomes an output field in the target JSON.\nTo access the feature field more efficiently in our evaluator, we'll modify the datapoint targets to be a raw JSON with a feature key.\n\n\nModify the datapoint if you need to make refinements\nYou can provide an arbitrary JSON object as the target.", "hierarchy": { "h2": { - "id": "set-up-a-dataset-", - "title": "Set up a dataset " + "id": "set-up-a-dataset", + "title": "Set up a dataset" }, "h3": { - "id": "set-up-a-dataset-", - "title": "Set up a dataset " + "id": "set-up-a-dataset", + "title": "Set up a dataset" } }, "level": "h3", "level_title": "Set up a dataset" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluate-models-offline-create-an-offline-evaluator--1", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluate-models-offline-create-an-offline-evaluator-1", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluate-models-offline", @@ -16255,7 +15419,7 @@ ], "authed": false, "type": "markdown", - "hash": "#create-an-offline-evaluator--1", + "hash": "#create-an-offline-evaluator-1", "content": "Having set up a dataset, we'll now create the evaluator. As with online evaluators, it's a Python function but for offline mode, it also takes a testcase parameter alongside the generated log.\n\n\nNavigate to the evaluations section, and then the Evaluators tab\nSelect + New Evaluator and choose Offline Evaluation\nChoose Start from scratch\nFor this example, we'll use the code below to compare the LLM generated output with what we expected for that testcase.\nUse the Debug Console\nIn the debug console at the bottom of the dialog, click Load data and then Datapoints from dataset. Select the dataset you created in the previous section. The console will be populated with its datapoints.\n\n\nChoose a model config from the dropdown menu.\nClick the run button at the far right of one of the test datapoints.\nA new debug run will be triggered, which causes an LLM generation using that datapoint's inputs and messages parameters. The generated log and the test datapoint will be passed to the evaluator, and the resulting evaluation will be displayed in the Result column.\nClick Create when you are happy with the evaluator.", "code_snippets": [ { @@ -16271,15 +15435,15 @@ ], "hierarchy": { "h2": { - "id": "create-an-offline-evaluator--1", - "title": "Create an offline evaluator " + "id": "create-an-offline-evaluator-1", + "title": "Create an offline evaluator" } }, "level": "h2", "level_title": "Create an offline evaluator" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluate-models-offline-trigger-an-offline-evaluation-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluate-models-offline-trigger-an-offline-evaluation", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluate-models-offline", @@ -16306,12 +15470,12 @@ ], "authed": false, "type": "markdown", - "hash": "#trigger-an-offline-evaluation-", + "hash": "#trigger-an-offline-evaluation", "content": "Now that you have an offline evaluator and a dataset, you can use them to evaluate the performance of any model config in your project.\n\n\nGo to the Evaluations section.\nIn the Runs tab, click Run Evaluation\nIn the dialog box, choose a model config to evaluate and select your newly created dataset and evaluator.\n\n\nClick Batch Generate\nA new evaluation is launched. Click on the card to inspect the results.\nA batch generation has now been triggered. This means that the model config you selected will be used to generate a log for each datapoint in the dataset. It may take some time for the evaluation to complete, depending on how many test datapoints are in your dataset and what model config you are using. Once all the logs have been generated, the evaluator will execute for each in turn.\nInspect the results of the evaluation.", "hierarchy": { "h2": { - "id": "trigger-an-offline-evaluation-", - "title": "Trigger an offline evaluation " + "id": "trigger-an-offline-evaluation", + "title": "Trigger an offline evaluation" } }, "level": "h2", @@ -16346,11 +15510,10 @@ "authed": false, "type": "markdown", "description": "How to use Humanloop to evaluate your large language model use-case, using a dataset and an evaluator.\nIn this guide, we'll walk through an example of using our API to create dataset and trigger an evaluation.", - "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan\n\n\nThis guide uses our Python SDK. All of the\nendpoints used are available in our TypeScript SDK\nand directly via the API.", - "code_snippets": [] + "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan\n\n\nThis guide uses our Python SDK. All of the\nendpoints used are available in our TypeScript SDK\nand directly via the API." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluations-using-api", @@ -16377,7 +15540,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "First you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)", "code_snippets": [ { @@ -16403,15 +15566,15 @@ ], "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites: " + "id": "prerequisites", + "title": "Prerequisites:" } }, "level": "h2", "level_title": "Prerequisites:" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-create-evaluation-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-create-evaluation", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluations-using-api", @@ -16438,19 +15601,19 @@ ], "authed": false, "type": "markdown", - "hash": "#create-evaluation-", + "hash": "#create-evaluation", "content": "We'll go through how to use the SDK in a Python script to set up a project, create a dataset and then finally trigger an evaluation.", "hierarchy": { "h2": { - "id": "create-evaluation-", - "title": "Create evaluation " + "id": "create-evaluation", + "title": "Create evaluation" } }, "level": "h2", "level_title": "Create evaluation" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-set-up-a-project-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-set-up-a-project", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluations-using-api", @@ -16477,7 +15640,7 @@ ], "authed": false, "type": "markdown", - "hash": "#set-up-a-project-", + "hash": "#set-up-a-project", "content": "Import Humanloop and set your Humanloop and OpenAI API keys.\nCreate a project and register your first model config\nWe'll use OpenAI's GPT-4 for extracting product feature names from customer queries in this example. The first model config created against the project is automatically deployed:\nIf you log onto your Humanloop account you will now see your project with a single model config defined:", "code_snippets": [ { @@ -16499,19 +15662,19 @@ ], "hierarchy": { "h2": { - "id": "set-up-a-project-", - "title": "Set up a project " + "id": "set-up-a-project", + "title": "Set up a project" }, "h3": { - "id": "set-up-a-project-", - "title": "Set up a project " + "id": "set-up-a-project", + "title": "Set up a project" } }, "level": "h3", "level_title": "Set up a project" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-create-a-dataset-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-create-a-dataset", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluations-using-api", @@ -16538,7 +15701,7 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-dataset-", + "hash": "#create-a-dataset", "content": "Follow the steps in our guide to Upload a Dataset via API.\n\n\nNow test your model manually by generating a log for one of the datapoints' messages:\nYou can see from the output field in the response that the model has done a good job at extracting the mentioned features in the desired json format:", "code_snippets": [ { @@ -16560,19 +15723,19 @@ ], "hierarchy": { "h2": { - "id": "create-a-dataset-", - "title": "Create a dataset " + "id": "create-a-dataset", + "title": "Create a dataset" }, "h3": { - "id": "create-a-dataset-", - "title": "Create a dataset " + "id": "create-a-dataset", + "title": "Create a dataset" } }, "level": "h3", "level_title": "Create a dataset" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-create-an-evaluator-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-create-an-evaluator", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluations-using-api", @@ -16599,7 +15762,7 @@ ], "authed": false, "type": "markdown", - "hash": "#create-an-evaluator-", + "hash": "#create-an-evaluator", "content": "Now that you have a project with a model config and a dataset defined, you can create an evaluator that will determine the success criteria for a log generated from the model using the target defined in the test datapoint.\n\n\nCreate an evaluator to determine if the extracted JSON is correct and test it against the generated log and the corresponding test datapoint:\nSubmit this evaluator to Humanloop\nThis means it can be used for future evaluations triggered via the UI or the API:\nIn your Humanloop project you will now see an evaluator defined:", "code_snippets": [ { @@ -16629,15 +15792,15 @@ ], "hierarchy": { "h2": { - "id": "create-an-evaluator-", - "title": "Create an evaluator " + "id": "create-an-evaluator", + "title": "Create an evaluator" } }, "level": "h2", "level_title": "Create an evaluator" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-launch-an-evaluation-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-launch-an-evaluation", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluations-using-api", @@ -16664,7 +15827,7 @@ ], "authed": false, "type": "markdown", - "hash": "#launch-an-evaluation-", + "hash": "#launch-an-evaluation", "content": "Launch an evaluation\nYou can now low against the model config using the dataset and evaluator. In practice you can include more than one evaluator:\nNavigate to your Humanloop account to see the evaluation results. Initially it will be in a pending state, but will quickly move to completed given the small number of test cases. The datapoints generated by your model as part of the evaluation will also be recorded in your project's logs table.", "code_snippets": [ { @@ -16678,19 +15841,19 @@ ], "hierarchy": { "h2": { - "id": "launch-an-evaluation-", - "title": "Launch an evaluation " + "id": "launch-an-evaluation", + "title": "Launch an evaluation" }, "h3": { - "id": "launch-an-evaluation-", - "title": "Launch an evaluation " + "id": "launch-an-evaluation", + "title": "Launch an evaluation" } }, "level": "h3", "level_title": "Launch an evaluation" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-create-evaluation---full-script-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluations-using-api-create-evaluation---full-script", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluations-using-api", @@ -16717,7 +15880,7 @@ ], "authed": false, "type": "markdown", - "hash": "#create-evaluation---full-script-", + "hash": "#create-evaluation---full-script", "content": "Here is the full script you can copy and paste and run in your Python environment:", "code_snippets": [ { @@ -16727,8 +15890,8 @@ ], "hierarchy": { "h2": { - "id": "create-evaluation---full-script-", - "title": "Create evaluation - full script " + "id": "create-evaluation---full-script", + "title": "Create evaluation - full script" } }, "level": "h2", @@ -16763,11 +15926,10 @@ "authed": false, "type": "markdown", "description": "Learn how to use LLM as a judge to check for PII in Logs.\nIn this guide, we will set up an LLM evaluator to check for PII (Personally Identifiable Information) in Logs.", - "content": "As well as using Python code to evaluate Logs, you can also create special-purpose prompts for LLMs to evaluate Logs too.\nIn this guide, we'll show how to set up LLM evaluations.", - "code_snippets": [] + "content": "As well as using Python code to evaluate Logs, you can also create special-purpose prompts for LLMs to evaluate Logs too.\nIn this guide, we'll show how to set up LLM evaluations." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.use-llms-to-evaluate-logs-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.use-llms-to-evaluate-logs-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/use-llms-to-evaluate-logs", @@ -16794,19 +15956,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You need to have access to evaluations.\n\nYou also need to have a Prompt – if not, please follow our Prompt creation guide.\n\nFinally, you need at least a few logs in your project. Use the Editor to generate some logs if you don't have any yet.", "hierarchy": { "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.use-llms-to-evaluate-logs-set-up-an-llm-evaluator-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.use-llms-to-evaluate-logs-set-up-an-llm-evaluator", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/use-llms-to-evaluate-logs", @@ -16833,19 +15995,19 @@ ], "authed": false, "type": "markdown", - "hash": "#set-up-an-llm-evaluator-", + "hash": "#set-up-an-llm-evaluator", "content": "From the Evaluations page, click New Evaluator and select AI.\n\n\nFrom the presets menu on the left-hand side of the page, select PII.\n\n\nSet the evaluator to Online mode, and toggle Auto-run to on. This will make the PII checker run on all new logs in the project.\n\n\nClick Create in the bottom left of the page.\nGo to Editor and try generating a couple of logs, some containing PII and some without.\nGo to the Logs table to review these logs.\n\n\nClick one of the logs to see more details in the drawer.\nIn our example below, you can see that the the log did contain PII, and the PII check evaluator has correctly identified this and flagged it with False.\n\n\nClick View session at the top of log drawer to inspect in more detail the LLM evaluator's generation itself.\nSelect the PII check entry in the session trace\nIn the Completed Prompt tab of the log, you'll see the full input and output of the LLM evaluator generation.", "hierarchy": { "h3": { - "id": "set-up-an-llm-evaluator-", - "title": "Set up an LLM evaluator " + "id": "set-up-an-llm-evaluator", + "title": "Set up an LLM evaluator" } }, "level": "h3", "level_title": "Set up an LLM evaluator" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.use-llms-to-evaluate-logs-available-variables-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.use-llms-to-evaluate-logs-available-variables", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/use-llms-to-evaluate-logs", @@ -16872,7 +16034,7 @@ ], "authed": false, "type": "markdown", - "hash": "#available-variables-", + "hash": "#available-variables", "content": "In the prompt editor for an LLM evaluator, you have access to the underlying log you are evaluating as well as the testcase that gave rise to it in the case of offline evaluations. These are accessed with the standard {{ variable }} syntax, enhanced with a familiar dot notation to pick out specific values from inside the log and testcase objects. The log and testcase shown in the debug console correspond to the objects available in the context of the LLM evaluator prompt.\nFor example, suppose you are evaluating a log object like this.\nIn the LLM evaluator prompt, if you write {{ log.inputs.hello }} it will be replaced with world in the final prompt sent to the LLM evaluator model.\nNote that in order to get access to the fully populated prompt that was sent in the underlying log, you can use {{ log_prompt }}.", "code_snippets": [ { @@ -16883,8 +16045,8 @@ ], "hierarchy": { "h3": { - "id": "available-variables-", - "title": "Available variables " + "id": "available-variables", + "title": "Available variables" } }, "level": "h3", @@ -16919,11 +16081,10 @@ "authed": false, "type": "markdown", "description": "Learn how to run an evaluation in your own infrastructure and post the results to Humanloop.\nIn this guide, we'll show how to run an evaluation in your own infrastructure and post the results to Humanloop.", - "content": "For some use cases, you may wish to run your evaluation process outside of Humanloop, as opposed to running the evaluators we offer in our Humanloop runtime.\nFor example, you may have implemented an evaluator that uses your own custom model or which has to interact with multiple systems. In these cases, you can continue to leverage the datasets you have curated on Humanloop, as well as consolidate all of the results alongside the prompts you maintain in Humanloop.\nIn this guide, we'll show an example of setting up a simple script to run such a self-hosted evaluation using our Python SDK.", - "code_snippets": [] + "content": "For some use cases, you may wish to run your evaluation process outside of Humanloop, as opposed to running the evaluators we offer in our Humanloop runtime.\nFor example, you may have implemented an evaluator that uses your own custom model or which has to interact with multiple systems. In these cases, you can continue to leverage the datasets you have curated on Humanloop, as well as consolidate all of the results alongside the prompts you maintain in Humanloop.\nIn this guide, we'll show an example of setting up a simple script to run such a self-hosted evaluation using our Python SDK." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.self-hosted-evaluations-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.self-hosted-evaluations-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/self-hosted-evaluations", @@ -16950,19 +16111,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You need to have access to evaluations\n\nYou also need to have a Prompt – if not, please follow our Prompt creation guide.\n\nYou need to have a dataset in your project. See our dataset creation guide if you don't yet have one.\n\nYou need to have a model config that you're trying to evaluate - create one in the Editor.", "hierarchy": { "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.self-hosted-evaluations-setting-up-the-script-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.self-hosted-evaluations-setting-up-the-script", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/self-hosted-evaluations", @@ -16989,7 +16150,7 @@ ], "authed": false, "type": "markdown", - "hash": "#setting-up-the-script-", + "hash": "#setting-up-the-script", "content": "Install the latest version of the Humanloop Python SDK:\nIn a new Python script, import the Humanloop SDK and create an instance of the client:\nRetrieve the ID of the Humanloop project you are working in - you can find this in the Humanloop app\nRetrieve the dataset you're going to use for evaluation from the project\nCreate an external evaluator\nRetrieve the model config you're evaluating\nInitiate an evaluation run in Humanloop\nAfter this step, you'll see a new run in the Humanloop app, under the Evaluations tab of your project. It should have status running.\nIterate through the datapoints in your dataset and use the model config to generate logs from them\nEvaluate the logs using your own evaluation logic and post the results back to Humanloop\nIn this example, we use an extremely simple evaluation function for clarity.\nMark the evaluation run as completed", "code_snippets": [ { @@ -17075,15 +16236,15 @@ ], "hierarchy": { "h3": { - "id": "setting-up-the-script-", - "title": "Setting up the script " + "id": "setting-up-the-script", + "title": "Setting up the script" } }, "level": "h3", "level_title": "Setting up the script" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.self-hosted-evaluations-review-the-results-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.self-hosted-evaluations-review-the-results", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/self-hosted-evaluations", @@ -17110,12 +16271,12 @@ ], "authed": false, "type": "markdown", - "hash": "#review-the-results-", + "hash": "#review-the-results", "content": "After running this script with the appropriate resource IDs (project, dataset, model config), you should see the results in the Humanloop app, right alongside any other evaluations you have performed using the Humanloop runtime.", "hierarchy": { "h2": { - "id": "review-the-results-", - "title": "Review the results " + "id": "review-the-results", + "title": "Review the results" } }, "level": "h2", @@ -17150,11 +16311,10 @@ "authed": false, "type": "markdown", "description": "Learn how to use the Humanloop Python SDK to create an evaluation run and post-generated logs.\nIn this guide, we'll demonstrate an evaluation run workflow where logs are generated outside the Humanloop environment and posted via API.", - "content": "If running your infrastructure to generate logs, you can still leverage the Humanloop evaluations suite via our API. The workflow looks like this:\nTrigger the creation of an evaluation run\n\nLoop through the datapoints in your dataset and perform generations on your side\n\nPost the generated logs to the evaluation run\n\n\nThis works with any evaluator - if you have configured a Humanloop-runtime evaluator, these will be automatically run on each log you post to the evaluation run; or, you can use self-hosted evaluators and post the results to the evaluation run yourself (see Self-hosted evaluations).", - "code_snippets": [] + "content": "If running your infrastructure to generate logs, you can still leverage the Humanloop evaluations suite via our API. The workflow looks like this:\nTrigger the creation of an evaluation run\n\nLoop through the datapoints in your dataset and perform generations on your side\n\nPost the generated logs to the evaluation run\n\n\nThis works with any evaluator - if you have configured a Humanloop-runtime evaluator, these will be automatically run on each log you post to the evaluation run; or, you can use self-hosted evaluators and post the results to the evaluation run yourself (see Self-hosted evaluations)." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluating-externally-generated-logs-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluating-externally-generated-logs-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluating-externally-generated-logs", @@ -17181,19 +16341,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You need to have access to evaluations\n\nYou also need to have a project created - if not, please first follow our project creation guides.\n\nYou need to have a dataset in your project. See our dataset creation guide if you don't yet have one.\n\nYou need a model configuration to evaluate, so create one in the Editor.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluating-externally-generated-logs-setting-up-the-script-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluating-externally-generated-logs-setting-up-the-script", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluating-externally-generated-logs", @@ -17220,7 +16380,7 @@ ], "authed": false, "type": "markdown", - "hash": "#setting-up-the-script-", + "hash": "#setting-up-the-script", "content": "Install the latest version of the Humanloop Python SDK\nIn a new Python script, import the Humanloop SDK and create an instance of the client\nRetrieve the ID of the Humanloop project you are working in\nYou can find this in the Humanloop app.\nRetrieve the dataset you're going to use for evaluation from the project\nSet up the model config you are evaluating\nIf you constructed this in Humanloop, retrieve it by calling:\nAlternatively, if your model config lives outside the Humanloop system, post it to Humanloop with the register model config endpoint.\nEither way, you need the ID of the config.\nIn the Humanloop app, create an evaluator\nWe'll create a Valid JSON checker for this guide.\nVisit the Evaluations tab, and select Evaluators\n\nClick + New Evaluator and choose Code from the options.\n\nSelect the Valid JSON preset on the left.\n\nChoose the mode Offline in the settings panel on the left.\n\nClick Create.\n\nCopy your new evaluator's ID from the address bar. It starts with evfn_.\n\n\nCreate an evaluation run with hl_generated set to False\nThis tells the Humanloop runtime that it should not trigger evaluations but wait for them to be posted via the API.\nBy default, the evaluation status after creation is pending. Before sending the generation logs, set the status to running.\nIterate through the datapoints in the dataset, produce a generation and post the evaluation\nRun the full script above.\nIf everything goes well, you should now have posted a new evaluation run to Humanloop and logged all the generations derived from the underlying datapoints.\nThe Humanloop evaluation runtime will now iterate through those logs and run the Valid JSON evaluator on each. To check progress:\nVisit your project in the Humanloop app and go to the Evaluations tab.\nYou should see the run you recently created; click through to it, and you'll see rows in the table showing the generations.\n\n\n\n\nIn this case, all the evaluations returned False because the \"Hello World!\" string wasn't valid JSON. Try logging something valid JSON to check that everything works as expected.", "code_snippets": [ { @@ -17306,15 +16466,15 @@ ], "hierarchy": { "h2": { - "id": "setting-up-the-script-", - "title": "Setting up the script " + "id": "setting-up-the-script", + "title": "Setting up the script" } }, "level": "h2", "level_title": "Setting up the script" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluating-externally-generated-logs-full-script-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluating-externally-generated-logs-full-script", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluating-externally-generated-logs", @@ -17341,7 +16501,7 @@ ], "authed": false, "type": "markdown", - "hash": "#full-script-", + "hash": "#full-script", "content": "For reference, here's the full script to get started quickly.\n\n\nIt's also a good practice to wrap the above code in a try-except block and to mark the evaluation run as failed (using update_status) if an exception causes something to fail.", "code_snippets": [ { @@ -17351,8 +16511,8 @@ ], "hierarchy": { "h2": { - "id": "full-script-", - "title": "Full Script " + "id": "full-script", + "title": "Full Script" } }, "level": "h2", @@ -17386,11 +16546,10 @@ ], "authed": false, "type": "markdown", - "description": "Learn how to set up a human evaluator to collect feedback on the output of your model.\nThis guide demonstrates how to run a batch generation and collect manual human feedback.", - "code_snippets": [] + "description": "Learn how to set up a human evaluator to collect feedback on the output of your model.\nThis guide demonstrates how to run a batch generation and collect manual human feedback." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluating-with-human-feedback-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluating-with-human-feedback-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluating-with-human-feedback", @@ -17417,19 +16576,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You need to have access to evaluations.\n\nYou also need to have a Prompt – if not, please follow our Prompt creation guide.\n\nFinally, you need at least a few logs in your project. Use the Editor to generate some logs if you don't have any yet.", "hierarchy": { "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluating-with-human-feedback-set-up-an-evaluator-to-collect-human-feedback-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluating-with-human-feedback-set-up-an-evaluator-to-collect-human-feedback", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluating-with-human-feedback", @@ -17456,19 +16615,19 @@ ], "authed": false, "type": "markdown", - "hash": "#set-up-an-evaluator-to-collect-human-feedback-", + "hash": "#set-up-an-evaluator-to-collect-human-feedback", "content": "Create a 'Human' Evaluator\nFrom the Evaluations page, click New Evaluator and select Human.\n\n\nGive the evaluator a name and description and click Create in the top-right.\nReturn to the Evaluations page and select Run Evaluation.\nChoose the model config you are evaluating, a dataset you would like to evaluate against and then select the new Human evaluator.\n\n\nClick Batch generate and follow the link in the bottom-right corner to see the evaluation run.\n\n\nView the details\nAs the rows populate with the generated output from the model, you can review those outputs and apply feedback in the rating column. Click a row to see the full details of the Log in a drawer.\nApply your feedback either directly in the table, or from the drawer.\n\n\nOnce you've finished providing feedback for all the Logs in the run, click Mark as complete in the top right of the page.\nYou can review the aggregated feedback results in the Stats section on this page.", "hierarchy": { "h3": { - "id": "set-up-an-evaluator-to-collect-human-feedback-", - "title": "Set up an evaluator to collect human feedback " + "id": "set-up-an-evaluator-to-collect-human-feedback", + "title": "Set up an evaluator to collect human feedback" } }, "level": "h3", "level_title": "Set up an evaluator to collect human feedback" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluating-with-human-feedback-configuring-the-feedback-schema-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.evaluating-with-human-feedback-configuring-the-feedback-schema", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/evaluating-with-human-feedback", @@ -17495,12 +16654,12 @@ ], "authed": false, "type": "markdown", - "hash": "#configuring-the-feedback-schema-", + "hash": "#configuring-the-feedback-schema", "content": "If you need a more complex feedback schema, visit the Settings page in your project and follow the link to Feedbacks. Here, you can add more categories to the default feedback types. If you need more control over feedback types, you can create new ones via the API.", "hierarchy": { "h2": { - "id": "configuring-the-feedback-schema-", - "title": "Configuring the feedback schema " + "id": "configuring-the-feedback-schema", + "title": "Configuring the feedback schema" } }, "level": "h2", @@ -17535,11 +16694,10 @@ "authed": false, "type": "markdown", "description": "Learn how to create and use online evaluators to observe the performance of your models.\nIn this guide, we will demonstrate how to create and use online evaluators to observe the performance of your models.", - "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan", - "code_snippets": [] + "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.monitoring-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.monitoring-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/monitoring", @@ -17566,23 +16724,23 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You need to have access to evaluations.\n\nYou also need to have a Prompt – if not, please follow our Prompt creation guide.\n\nFinally, you need at least a few logs in your project. Use the Editor to generate some logs if you don't have any yet.\n\n\nTo set up an online Python evaluator:\n\n\nGo to the Evaluations page in one of your projects and select the Evaluators tab\nSelect + New Evaluator and choose Code Evaluator in the dialog\n\n\nFrom the library of presets on the left-hand side, we'll choose Valid JSON for this guide. You'll see a pre-populated evaluator with Python code that checks the output of our model is valid JSON grammar.\n\n\nIn the debug console at the bottom of the dialog, click Random logs from project. The console will be populated with five datapoints from your project.\n\n\nClick the Run button at the far right of one of the log rows. After a moment, you'll see the Result column populated with a True or False.\n\n\nExplore the log dictionary in the table to help understand what is available on the Python object passed into the evaluator.\nClick Create on the left side of the page.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.monitoring-activate-an-evaluator-for-a-project-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.monitoring-activate-an-evaluator-for-a-project", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/monitoring", @@ -17609,19 +16767,19 @@ ], "authed": false, "type": "markdown", - "hash": "#activate-an-evaluator-for-a-project-", + "hash": "#activate-an-evaluator-for-a-project", "content": "On the new **Valid JSON ** evaluator in the Evaluations tab, toggle the switch to on - the evaluator is now activated for the current project.\n\n\nGo to the Editor, and generate some fresh logs with your model.\nOver in the Logs tab you'll see the new logs. The Valid JSON evaluator runs automatically on these new logs, and the results are displayed in the table.", "hierarchy": { "h2": { - "id": "activate-an-evaluator-for-a-project-", - "title": "Activate an evaluator for a project " + "id": "activate-an-evaluator-for-a-project", + "title": "Activate an evaluator for a project" } }, "level": "h2", "level_title": "Activate an evaluator for a project" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.monitoring-prerequisites--1", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.evaluation.monitoring-prerequisites-1", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/evaluation/monitoring", @@ -17648,16 +16806,16 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites--1", + "hash": "#prerequisites-1", "content": "A Humanloop project with a reasonable amount of data.\n\nAn Evaluator activated in that project.\n\n\nTo track the performance of different model configs in your project:\n\n\nGo to the Dashboard tab.\nIn the table of model configs at the\nbottom, choose a subset of the project's model configs.\nUse the graph controls\nAt the top of the page to select the date range and time granularity\nof interest.\nReview the relative performance\nFor each activated Evaluator shown in the graphs, you can see the relative performance of the model configs you selected.\n\n\n\n\nThe following Python modules are available to be imported in your code evaluators:\nre\n\nmath\n\nrandom\n\ndatetime\n\njson (useful for validating JSON grammar as per the example above)\n\njsonschema (useful for more fine-grained validation of JSON output - see the in-app example)\n\nsqlglot (useful for validating SQL query grammar)\n\nrequests (useful to make further LLM calls as part of your evaluation - see the in-app example for a suggestion of how to get started).", "hierarchy": { "h2": { - "id": "prerequisites--1", - "title": "Prerequisites " + "id": "prerequisites-1", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites--1", - "title": "Prerequisites " + "id": "prerequisites-1", + "title": "Prerequisites" } }, "level": "h3", @@ -17692,8 +16850,7 @@ "authed": false, "type": "markdown", "description": "Datasets are pre-defined collections of input-output pairs that you can use within Humanloop to define fixed examples for your projects.\nDatasets are collections of datapoints which represent input-output pairs for an LLM call.", - "content": "Datasets are pre-defined collections of input-output pairs that you can use within Humanloop to define fixed examples for your projects.\nA datapoint consists of three things:\nInputs: a collection of prompt variable values which are interpolated into the prompt template of your model config at generation time (i.e. they replace the {{ variables }} you define in the prompt template.\n\nMessages: for chat models, as well as the prompt template, you may have a history of prior chat messages from the same conversation forming part of the input to the next generation. Datapoints can have these messages included as part of the input.\n\nTarget: data representing the expected or intended output of the model. In the simplest case, this can simply be a string representing the exact output you hope the model produces for the example represented by the datapoint. In more complex cases, you can define an arbitrary JSON object for target with whatever fields are necessary to help you specify the intended behaviour. You can then use our evaluations feature to run the necessary code to compare the actual generated output with your target data to determine whether the result was as expected.\n\n\n\n\nDatasets can be created via CSV upload, converting from existing Logs in your project, or by API requests.", - "code_snippets": [] + "content": "Datasets are pre-defined collections of input-output pairs that you can use within Humanloop to define fixed examples for your projects.\nA datapoint consists of three things:\nInputs: a collection of prompt variable values which are interpolated into the prompt template of your model config at generation time (i.e. they replace the {{ variables }} you define in the prompt template.\n\nMessages: for chat models, as well as the prompt template, you may have a history of prior chat messages from the same conversation forming part of the input to the next generation. Datapoints can have these messages included as part of the input.\n\nTarget: data representing the expected or intended output of the model. In the simplest case, this can simply be a string representing the exact output you hope the model produces for the example represented by the datapoint. In more complex cases, you can define an arbitrary JSON object for target with whatever fields are necessary to help you specify the intended behaviour. You can then use our evaluations feature to run the necessary code to compare the actual generated output with your target data to determine whether the result was as expected.\n\n\n\n\nDatasets can be created via CSV upload, converting from existing Logs in your project, or by API requests." }, { "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.datasets.create-dataset", @@ -17724,11 +16881,10 @@ "authed": false, "type": "markdown", "description": "Learn how to create Datasets in Humanloop to define fixed examples for your projects, and build up a collection of input-output pairs for evaluation and fine-tuning.\nDatasets can be created from existing logs or uploaded from CSV and via the API.", - "content": "You can currently create Datasets in Humanloop in three ways: from existing logs, by uploading a CSV or via the API.", - "code_snippets": [] + "content": "You can currently create Datasets in Humanloop in three ways: from existing logs, by uploading a CSV or via the API." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.datasets.create-dataset-create-a-dataset-from-logs-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.datasets.create-dataset-create-a-dataset-from-logs", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/create-dataset", @@ -17755,19 +16911,19 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-dataset-from-logs-", + "hash": "#create-a-dataset-from-logs", "content": "Prerequisites:\nA Prompt in Humanloop\n\nSome Logs available in that Prompt\n\n\nTo create a Dataset from existing Logs:\n\n\nGo to the Logs tab\nSelect a subset of the Logs\nChoose Add to Dataset\nIn the menu in the top right of the page, select Add to dataset.\n\n\nAdd to a new or existing Dataset\nProvide a name of the new dataset and click Create, or you can click add to existing dataset to append the selected to a dataset you already have.", "hierarchy": { "h1": { - "id": "create-a-dataset-from-logs-", - "title": "Create a Dataset from Logs " + "id": "create-a-dataset-from-logs", + "title": "Create a Dataset from Logs" } }, "level": "h1", "level_title": "Create a Dataset from Logs" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.datasets.create-dataset-upload-data-from-csv-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.datasets.create-dataset-upload-data-from-csv", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/create-dataset", @@ -17794,19 +16950,19 @@ ], "authed": false, "type": "markdown", - "hash": "#upload-data-from-csv-", + "hash": "#upload-data-from-csv", "content": "Prerequisites:\nA Prompt in Humanloop\n\n\nTo create a dataset from a CSV file, we'll first create a CSV in Google Sheets and then upload it to a dataset in Humanloop.\n\n\nCreate a CSV file.\nIn our Google Sheets example below, we have a column called user_query which is an input to a prompt variable of that name. So in our model config, we'll need to include {{ user_query }} somewhere, and that placeholder will be populated with the value from the user_query input in the datapoint at generation-time.\n\nYou can include as many columns of prompt variables as you need for your model configs.\n\nThere is additionally a column called target which will populate the target of the datapoint. In this case, we use simple strings to define the target.\n\nNote: messages are harder to incorporate into a CSV file as they tend to be verbose and hard-to-read JSON. If you want a dataset with messages, consider using the API to upload, or convert from existing logs.\n\n\n\n\nExport the Google Sheet to CSV\nChoose File → Download → Comma-separated values (.csv)\nCreate a new Dataset File\nClick Upload CSV\nUupload the CSV file from step 2 by drag-and-drop or using the file explorer.\n\n\nClick Upload Dataset from CSV\nYou should see a new dataset appear in the datasets tab. You can explore it by clicking in.\nFollow the link in the pop-up to inspect the dataset that was created in the upload.\nYou'll see a column with the input key-value pairs for each datapoint, a messages column (in our case we didn't use messages, so they're all empty) and a target column with the expected model output.", "hierarchy": { "h1": { - "id": "upload-data-from-csv-", - "title": "Upload data from CSV " + "id": "upload-data-from-csv", + "title": "Upload data from CSV" } }, "level": "h1", "level_title": "Upload data from CSV" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.datasets.create-dataset-upload-via-api-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.datasets.create-dataset-upload-via-api", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/create-dataset", @@ -17833,7 +16989,7 @@ ], "authed": false, "type": "markdown", - "hash": "#upload-via-api-", + "hash": "#upload-via-api", "content": "First you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)\n\n\n\n\nFirst define some sample data\nThis should consist of user messages and target extraction pairs. This is where you could load up any existing data you wish to use for your evaluation:\nThen define a dataset and upload the datapoints\nOn the datasets tab in your Humanloop project you will now see the dataset you just uploaded via the API.", "code_snippets": [ { @@ -17879,8 +17035,8 @@ ], "hierarchy": { "h1": { - "id": "upload-via-api-", - "title": "Upload via API " + "id": "upload-via-api", + "title": "Upload via API" } }, "level": "h1", @@ -17915,11 +17071,10 @@ "authed": false, "type": "markdown", "description": "This guide demonstrates how to run a batch generation using a large language model across all the datapoints in a dataset.\nOnce you have created a dataset, you can trigger batch generations across it with any model config in your project.", - "content": "This guide demonstrates how to run a batch generation across all the datapoints in a dataset.\nPrerequistes\nA Prompt) in Humanloop\n\nA dataset in that project", - "code_snippets": [] + "content": "This guide demonstrates how to run a batch generation across all the datapoints in a dataset.\nPrerequistes\nA Prompt) in Humanloop\n\nA dataset in that project" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.datasets.batch-generate-create-a-model-config-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.datasets.batch-generate-create-a-model-config", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/batch-generate", @@ -17946,12 +17101,12 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-model-config-", + "hash": "#create-a-model-config", "content": "It's important that the model config we use to perform the batch generation is consistent with the dataset. We're going to use the simple customer support dataset that we uploaded in the previous Create a dataset guide. As a reminder, the dataset looks like this\n\n\nWe want to get the model to classify the customer support query into the appropriate category. For this dataset, we have specified the correct category for each datapoint, so we'll be able to know easily if the model produced the correct output.\n\n\nIn Editor, create a simple completion model config as below.\n\n\nWe've used the following prompt:\nYou are a customer support classifier for Humanloop, a platform for building applications with LLMs.\nPlease classify the following customer support query into one of these categories:\n[datasets, docs, evaluators, feedback, fine-tuning, model configs, model providers]\n{{user_query}}\nThe most important thing here is that we have included a prompt variable - {{ user_query }} which corresponds to the input key on all the datapoints in our dataset. This was the first column header in the CSV file we used to upload the dataset.\nSave the model config by clicking the Save button. Call the config support_classifier.\nGo to the Datasets tab\nClick the menu icon in the top-right corner of the dataset you want to perform a batch generation across.\nIn that menu, choose Batch Generate & Eval\n\n\nIn the dialog window, choose the support_classifier model config created in step 2.\nYou can also optionally select an evaluator to use to compare the model's generation output to the target output in each datapoint. We set up the Exact match offline evaluator in our project (it's one of the builtins and requires no further configuration).\nClick Batch generate\nFollow the link in the pop-up to the batch generation run which is under the Evaluations tab.\n\n\nThe output the model produced is shown in the output column, and the exact match column shows that the model produced the expected (target) output in most cases. From here, we could inspect the failing cases and iterate on our model config before testing again to see if the accuracy across the whole dataset has improved.", "hierarchy": { "h2": { - "id": "create-a-model-config-", - "title": "Create a model config " + "id": "create-a-model-config", + "title": "Create a model config" } }, "level": "h2", @@ -17986,8 +17141,7 @@ "authed": false, "type": "markdown", "description": "Experiments allow you to set up A/B test between multiple different Prompts.\nExperiments allow you to set up A/B test between multiple different Prompts.", - "content": "Experiments allow you to set up A/B test between multiple different Prompts.\nExperiments can be used to compare different prompt templates, different parameter combinations (such as temperature and presence penalties) and even different base models.\nThis enables you to try out alternative prompts or models and use the feedback from your users to determine which works better.", - "code_snippets": [] + "content": "Experiments allow you to set up A/B test between multiple different Prompts.\nExperiments can be used to compare different prompt templates, different parameter combinations (such as temperature and presence penalties) and even different base models.\nThis enables you to try out alternative prompts or models and use the feedback from your users to determine which works better." }, { "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.experiments-from-the-app", @@ -18018,11 +17172,10 @@ "authed": false, "type": "markdown", "description": "Experiments allow you to set up A/B tests between multiple model configs.\nThis guide shows you how to experiment with Humanloop to systematically find the best-performing model configuration for your project based on your end-user’s feedback.", - "content": "Experiments can be used to compare different prompt templates, parameter combinations (such as temperature and presence penalties), and even base models.", - "code_snippets": [] + "content": "Experiments can be used to compare different prompt templates, parameter combinations (such as temperature and presence penalties), and even base models." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.experiments-from-the-app-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.experiments-from-the-app-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/experiments-from-the-app", @@ -18049,19 +17202,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\nYou have integrated humanloop.complete_deployed() or the humanloop.chat_deployed() endpoints, along with the humanloop.feedback() with the API or Python SDK.\n\n\n\n\nThis guide assumes you're using an OpenAI model. If you want to use other providers or your model, refer to the guide for running an experiment with your model provider.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.experiments-from-the-app-create-an-experiment-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.experiments-from-the-app-create-an-experiment", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/experiments-from-the-app", @@ -18088,19 +17241,19 @@ ], "authed": false, "type": "markdown", - "hash": "#create-an-experiment-", + "hash": "#create-an-experiment", "content": "Navigate to the Experiments tab of your Prompt\nClick the Create new experiment button\nGive your experiment a descriptive name.\n\nSelect a list of feedback labels to be considered as positive actions - this will be used to calculate the performance of each of your model configs during the experiment.\n\nSelect which of your project’s model configs to compare.\n\nThen click the Create button.", "hierarchy": { "h2": { - "id": "create-an-experiment-", - "title": "Create an experiment " + "id": "create-an-experiment", + "title": "Create an experiment" } }, "level": "h2", "level_title": "Create an experiment" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.experiments-from-the-app-set-the-experiment-live-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.experiments-from-the-app-set-the-experiment-live", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/experiments-from-the-app", @@ -18127,19 +17280,19 @@ ], "authed": false, "type": "markdown", - "hash": "#set-the-experiment-live-", + "hash": "#set-the-experiment-live", "content": "Now that you have an experiment, you need to set it as the project’s active experiment:\n\n\nNavigate to the Experiments tab.\nOf a Prompt go to the Experiments tab.\nChoose the Experiment card you want to deploy.\nClick the Deploy button\nNext to the Environments label, click the Deploy button.\nSelect the environment to deploy the experiment.\nWe only have one environment by default so select the 'production' environment.\n\n\n\n\nNow that your experiment is active, any SDK or API calls to generate will sample model configs from the list you provided when creating the experiment and any subsequent feedback captured using feedback will contribute to the experiment performance.", "hierarchy": { "h2": { - "id": "set-the-experiment-live-", - "title": "Set the experiment live " + "id": "set-the-experiment-live", + "title": "Set the experiment live" } }, "level": "h2", "level_title": "Set the experiment live" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.experiments-from-the-app-monitor-experiment-progress-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.experiments-from-the-app-monitor-experiment-progress", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/experiments-from-the-app", @@ -18166,12 +17319,12 @@ ], "authed": false, "type": "markdown", - "hash": "#monitor-experiment-progress-", + "hash": "#monitor-experiment-progress", "content": "Now that an experiment is live, the data flowing through your generate and feedback calls will update the experiment progress in real-time:\n\n\nNavigate back to the Experiments tab.\nSelect the Experiment card\nHere you will see the performance of each model config with a measure of confidence based on how much feedback data has been collected so far:\n\n\n\n\n🎉 Your experiment can now give you insight into which of the model configs your users prefer.\n\n\nHow quickly you can draw conclusions depends on how much traffic you have flowing through your project.\nGenerally, you should be able to draw some initial conclusions after on the order of hundreds of examples.", "hierarchy": { "h2": { - "id": "monitor-experiment-progress-", - "title": "Monitor experiment progress " + "id": "monitor-experiment-progress", + "title": "Monitor experiment progress" } }, "level": "h2", @@ -18206,11 +17359,10 @@ "authed": false, "type": "markdown", "description": "Experiments allow you to set up A/B test between multiple different model configs.\nHow to set up an experiment on Humanloop using your own model.", - "content": "Experiments can be used to compare different prompt templates, different parameter combinations (such as temperature and presence penalties) and even different base models.\nThis guide focuses on the case where you wish to manage your own model provider calls.", - "code_snippets": [] + "content": "Experiments can be used to compare different prompt templates, different parameter combinations (such as temperature and presence penalties) and even different base models.\nThis guide focuses on the case where you wish to manage your own model provider calls." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.run-an-experiment-with-your-own-model-provider-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.run-an-experiment-with-your-own-model-provider-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/run-an-experiment-with-your-own-model-provider", @@ -18237,19 +17389,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\nYou have integrated humanloop.complete_deployed() or the humanloop.chat_deployed() endpoints, along with the humanloop.feedback() with the API or Python SDK.\n\n\n\n\nThis guide assumes you're are using an OpenAI model. If you want to use other providers or your own model please also look at the guide for running an experiment with your own model provider.\nSupport for other model providers on Humanloop is coming soon.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.run-an-experiment-with-your-own-model-provider-create-an-experiment-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.run-an-experiment-with-your-own-model-provider-create-an-experiment", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/run-an-experiment-with-your-own-model-provider", @@ -18276,19 +17428,19 @@ ], "authed": false, "type": "markdown", - "hash": "#create-an-experiment-", + "hash": "#create-an-experiment", "content": "Navigate to the Experiments tab of your project. ### Click the\nCreate new experiment button: 1. Give your experiment a descriptive name.\n2. Select a list of feedback labels to be considered as positive actions -\nthis will be used to calculate the performance of each of your model configs\nduring the experiment. 3. Select which of your project’s model configs you\nwish to compare. Then click the Create button.", "hierarchy": { "h2": { - "id": "create-an-experiment-", - "title": "Create an experiment " + "id": "create-an-experiment", + "title": "Create an experiment" } }, "level": "h2", "level_title": "Create an experiment" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.run-an-experiment-with-your-own-model-provider-log-to-your-experiment-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.experiments.run-an-experiment-with-your-own-model-provider-log-to-your-experiment", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/run-an-experiment-with-your-own-model-provider", @@ -18315,7 +17467,7 @@ ], "authed": false, "type": "markdown", - "hash": "#log-to-your-experiment-", + "hash": "#log-to-your-experiment", "content": "In order to log data for your experiment without using humanloop.complete_deployed() or humanloop.chat_deployed(), you must first determine which model config to use for your LLM provider calls. This is where the humanloop.experiments.get_model_config() function comes in.\n\n\nGo to your Prompt dashboard\nSet the experiment as the active deployment.\nTo do so, find the default environment in the Deployments bar. Click the dropdown menu from the default environment and from those options select Change deployment. In the dialog that opens select the experiment you created.\n\n\nCopy your project_id\nFrom the URL, https://app.humanloop.com/projects//dashboard. The project ID starts with pr_.\nAlter your existing logging code\nTo now first sample a model_config from your experiment to use when making your call to OpenAI:\nYou can also run multiple experiments within a single project. In this case, first navigate to the Experiments tab of your project and select your Experiment card. Then, retrieve your experiment_id from the experiment summary:\n\n\nThen, retrieve your model config from your experiment by calling humanloop.experiments.sample(experiment_id=experiment_id).", "code_snippets": [ { @@ -18329,8 +17481,8 @@ ], "hierarchy": { "h2": { - "id": "log-to-your-experiment-", - "title": "Log to your experiment " + "id": "log-to-your-experiment", + "title": "Log to your experiment" } }, "level": "h2", @@ -18365,11 +17517,10 @@ "authed": false, "type": "markdown", "description": "Learn how to use tool calling in your large language models and intract with it in the Humanloop Playground.\nHow to use Tool Calling to have your Prompts interact with external functions.", - "content": "Humanloop's Editor supports the usage of OpenAI function calling, which we refer to as JSON Schema tools. JSON Schema tools follow the universal JSON Schema syntax definition, similar to OpenAI function calling. You can define inline JSON Schema tools as part of your model configuration in the editor. These tools allow you to define a structure for OpenAI to follow when responding. In this guide, we'll walk through the process of using tools in the editor to interact with gpt-4.", - "code_snippets": [] + "content": "Humanloop's Editor supports the usage of OpenAI function calling, which we refer to as JSON Schema tools. JSON Schema tools follow the universal JSON Schema syntax definition, similar to OpenAI function calling. You can define inline JSON Schema tools as part of your model configuration in the editor. These tools allow you to define a structure for OpenAI to follow when responding. In this guide, we'll walk through the process of using tools in the editor to interact with gpt-4." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.tool-calling-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.tool-calling-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/tool-calling", @@ -18396,7 +17547,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "A Humanloop account - you can create one by going to our sign up page.\n\nYou already have a Prompt — if not, please follow our Prompt creation guide first.\n\n\n\n\nTo view the list of models that support Tool calling, see the Models page.\nTo create and use a tool follow the following steps:\n\n\nOpen the editor\nStart by opening the Humanloop Editor in your web browser. You can access this directly from your Humanloop account dashboard.\nSelect the model\nIn the editor, you'll see an option to select the model. Choose gpt-4 from the dropdown list.\nDefine the tool\nTo define a tool, you'll need to use the universal JSON Schema syntax syntax. For the purpose of this guide, let's select one of our preloaded example tools get_current_weather. In practice this would correspond to a function you have defined locally, in your own code, and you are defining the parameters and structure that you want OpenAI to respond with to integrate with that function.\n\n\nInput user text\nLet's input some user text relevant to our tool to trigger OpenAI to respond with the corresponding parameters. Since we're using a weather-related tool, type in: What's the weather in Boston?.\n\n\nIt should be noted that a user can ask a non-weather related question such as 'how are you today? ' and it likely wouldn't trigger the model to respond in a format relative to the tool.\nCheck assistant response\nIf correctly set up, the assistant should respond with a prompt to invoke the tool, including the name of the tool and the data it requires. For our get_current_weather tool, it might respond with the relevant tool name as well as the fields you requested, such as:\nInput tool parameters\nThe response can be used locally or for prototyping you can pass in any relevant values. In the case of our get_current_weather tool, we might respond with parameters such as temperature (e.g., 22) and weather condition (e.g., sunny). To do this, in the tool response add the parameters in the in the format { \"temperature\": 22, \"condition\": \"sunny\" }. To note, the response format is also flexible, inputting 22, sunny likely also works and might help you iterate more quickly in your experimentation.\nSubmit tool response\nAfter defining the parameters, click on the 'Run' button to send the Tool message to OpenAI.\nReview assistant response\nThe assistant should now respond using your parameters. For example, it might say: The current weather in Boston is sunny with a temperature of 22 degrees.\n\n\nSave the model config\nIf you are happy with your tool, you can save the model config. The tool will be saved on that model config and can be used again in the future by loading the model config again in the editor or by calling the model config via our SDK.", "code_snippets": [ { @@ -18408,12 +17559,12 @@ ], "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", @@ -18448,11 +17599,10 @@ "authed": false, "type": "markdown", "description": "Learn how to use OpenAI function calling in the Humanloop Python SDK.\nIn this guide we will demonstrate how to take advantage of OpenAI function calling in our Python SDK.", - "content": "The Humanloop SDK provides an easy way for you to integrate the functionality of OpenAI function calling, which we refer to as JSON Schema tools, into your existing projects. Tools follow the same universal JSON Schema syntax definition as OpenAI function calling. In this guide, we'll walk you through the process of using tools with the Humanloop SDK via the chat endpoint.", - "code_snippets": [] + "content": "The Humanloop SDK provides an easy way for you to integrate the functionality of OpenAI function calling, which we refer to as JSON Schema tools, into your existing projects. Tools follow the same universal JSON Schema syntax definition as OpenAI function calling. In this guide, we'll walk you through the process of using tools with the Humanloop SDK via the chat endpoint." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.create-a-tool-with-the-sdk-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.create-a-tool-with-the-sdk-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/create-a-tool-with-the-sdk", @@ -18479,7 +17629,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "A Humanloop account - you can create one by going to our sign up page.\n\nPython installed - you can download and install Python by following the steps on the Python download page.\n\n\n\n\nThis guide assumes you're using OpenAI with the gpt-4 model. Only specific\nmodels from OpenAI are supported for function calling.\n\n\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop TypeScript SDK:\n\nImport and initialize the SDK:\n\n\n\n\nFirst you need to install and initialize the SDK. If you have already done this, skip to the next section. Otherwise, open up your terminal and follow these steps:\nInstall the Humanloop Python SDK:\n\nStart a Python interpreter:\n\nInitialize the SDK with your Humanloop API key (get your API key from your Organisation Settings page)", "code_snippets": [ { @@ -18505,19 +17655,19 @@ ], "hierarchy": { "h1": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" }, "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.create-a-tool-with-the-sdk-install-and-initialize-the-sdk-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.create-a-tool-with-the-sdk-install-and-initialize-the-sdk", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/create-a-tool-with-the-sdk", @@ -18544,7 +17694,7 @@ ], "authed": false, "type": "markdown", - "hash": "#install-and-initialize-the-sdk-", + "hash": "#install-and-initialize-the-sdk", "content": "The SDK requires Python 3.8 or greater.\n\nImport the Humanloop SDK: If you haven't done so already, you'll need to install and import the Humanloop SDK into your Python environment. You can do this using pip:\nNote, this guide was built with Humanloop==0.5.18.\nThen import the SDK in your script:\nInitialize the SDK: Initialize the Humanloop SDK with your API key:\nCreate a chat with the tool: We'll start with the general chat endpoint format.\nDefine the tool: Define a tool using the universal JSON Schema syntax syntax. Let's assume we've defined a get_current_weather tool, which returns the current weather for a specified location. We'll add it in via a \"tools\": tools, field. We've also defined a dummy get_current_weather method at the top. This can be replaced by your own function to fetch real values, for now we're hardcoding it to return a random temperature and cloudy for this example.\nCheck assistant response\nThe code above will make the call to OpenAI with the tool but it does nothing to handle the assistant response. When responding with a tool response the response should have a tool_calls field. Fetch that value and pass it to your own function. An example of this can be seen below. Replace the TODO - Add assistant handling logic in your code from above with the following. Multiple tool calls can be returned with the latest OpenAI models gpt-4-1106-preview and gpt-3.5-turbo-1106, so below we loop through the tool_calls and populate the response accordingly.\nReturn the tool response\nWe can then return the tool response to OpenAI. This can be done by formatting OpenAI tool message into the relative assistant message seen below along with a tool message with the function name and function response.\nReview assistant response\nThe assistant should respond with a message that incorporates the parameters you provided, for example: The current weather in Boston is 22 degrees and cloudy. The above can be run by adding the python handling logic at the both of your file:\nThe full code from this example can be seen below:", "code_snippets": [ { @@ -18622,12 +17772,12 @@ ], "hierarchy": { "h1": { - "id": "install-and-initialize-the-sdk-", - "title": "Install and initialize the SDK " + "id": "install-and-initialize-the-sdk", + "title": "Install and initialize the SDK" }, "h2": { - "id": "install-and-initialize-the-sdk-", - "title": "Install and initialize the SDK " + "id": "install-and-initialize-the-sdk", + "title": "Install and initialize the SDK" } }, "level": "h2", @@ -18662,11 +17812,10 @@ "authed": false, "type": "markdown", "description": "Learn how to create a JSON Schema tool that can be reused across multiple Prompts.\nManaging and versioning a Tool seperately from your Prompts", - "content": "It's possible to re-use tool definitions them across multiple Prompts. You achieve this by having a Prompt file which defines a JSON schema, and linking them to your Prompt.\nYou can achieve this by first defining an instance of a JSON Schema tool in your global Tools tab. Here you can define a tool once, such as get_current_weather(location: string, unit: 'celsius' | 'fahrenheit'), and then link that to as many model configs as you need within the Editor as shown below.\nImportantly, updates to the get_current_weather JSON Schema tool defined here will then propagate automatically to all the model configs you've linked it to, without having to publish new versions of the prompt.", - "code_snippets": [] + "content": "It's possible to re-use tool definitions them across multiple Prompts. You achieve this by having a Prompt file which defines a JSON schema, and linking them to your Prompt.\nYou can achieve this by first defining an instance of a JSON Schema tool in your global Tools tab. Here you can define a tool once, such as get_current_weather(location: string, unit: 'celsius' | 'fahrenheit'), and then link that to as many model configs as you need within the Editor as shown below.\nImportantly, updates to the get_current_weather JSON Schema tool defined here will then propagate automatically to all the model configs you've linked it to, without having to publish new versions of the prompt." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.link-jsonschema-tool-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.link-jsonschema-tool-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/link-jsonschema-tool", @@ -18693,19 +17842,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "A Humanloop account - you can create one by going to our sign up page.\n\nBe on a paid plan - your organization has been upgraded from the Free tier.\n\nYou already have a Prompt — if not, please follow our Prompt creation guide first.\n\n\nTo create a JSON Schema tool that can be reusable across your organization, follow the following steps:", "hierarchy": { "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.link-jsonschema-tool-creating-and-linking-a-json-schema-tool-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.link-jsonschema-tool-creating-and-linking-a-json-schema-tool", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/link-jsonschema-tool", @@ -18732,7 +17881,7 @@ ], "authed": false, "type": "markdown", - "hash": "#creating-and-linking-a-json-schema-tool-", + "hash": "#creating-and-linking-a-json-schema-tool", "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan\n\n\nCreate a Tool file\nClick the 'New File' button on the homepage or in the sidebar.\nSelect the Json Schema Tool type\nDefine your tool\nSet the name, description, and parameters values. Our guide for using Tool Calling in the Prompt Editor can be a useful reference in this case. We can use the get_current_weather schema in this case. Paste the following into the dialog:\nPress the Create button.\nNavigate to the Editor\nMake sure you are using a model that supports tool calling, such as gpt-4o.\n\n\nSee the Models page for a list of models that support tool calling.\nAdd Tool to the Prompt definition.\nSelect 'Link existing Tool'\nIn the dropdown, go to the Link existing tool option. You should see your get_current_weather tool, click on it to link it to your editor.\n\n\nTest that the Prompt is working with the tool\nNow that your tool is linked you can start using it as you would normally use an inline tool. In the Chat section, in the User input, enter \"What is the weather in london?\"\nPress the Run button.\nYou should see the Assistant respond with the tool response and a new Tool field inserted to allow you to insert an answer. In this case, put in 22 into the tool response and press Run.\n\n\nThe model will respond with The current weather in London is 22 degrees.\nSave the Prompt\nYou've linked a tool to your model config, now let's save it. Press the Save button and name your model config weather-model-config.\n(Optional) Update the Tool\nNow that's we've linked your get_current_weather tool to your model config, let's try updating the base tool and see how it propagates the changes down into your saved weather-model-config config. Navigate back to the Tools in the sidebar and go to the Editor.\nChange the tool.\nLet's update both the name, as well as the required fields. For the name, update it to get_current_weather_updated and for the required fields, add unit as a required field. The should look like this now:\nSave the Tool\nPress the Save button, then the following Continue button to confirm.\nYour tool is now updated.\nTry the Prompt again\nNavigate back to your previous project, and open the editor. You should see the weather-model-config loaded as the active config. You should also be able to see the name of your previously linked tool in the Tools section now says get_current_weather_updated.\nIn the Chat section enter in again, What is the weather in london?, and press Run again.\nCheck the response\nYou should see the updated tool response, and how it now contains the unit field. Congratulations, you've successfully linked a JSON Schema tool to your model config.\n\n\n\n\nWhen updating your organization-level JSON Schema tools, remember that the\nchange will affect all the places you've previously linked the tool. Be\ncareful when making updates to not inadvertently change something you didn't\nintend.", "code_snippets": [ { @@ -18754,8 +17903,8 @@ ], "hierarchy": { "h2": { - "id": "creating-and-linking-a-json-schema-tool-", - "title": "Creating and linking a JSON Schema Tool " + "id": "creating-and-linking-a-json-schema-tool", + "title": "Creating and linking a JSON Schema Tool" } }, "level": "h2", @@ -18790,11 +17939,10 @@ "authed": false, "type": "markdown", "description": "Learn how to use the Snippet tool to manage common text snippets that you want to reuse across your different prompts.\nManage common text snippets in your Prompts", - "content": "The Humanloop Snippet tool supports managing common text 'snippets' (or 'passages', or 'chunks') that you want to reuse across your different prompts. A Snippet tool acts as a simple key/value store, where the key is the name of the common re-usable text snippet and the value is the corresponding text.\nFor example, you may have some common persona descriptions that you found to be effective across a range of your LLM features. Or maybe you have some specific formatting instructions that you find yourself re-using again and again in your prompts.\nInstead of needing to copy and paste between your editor sessions and keep track of which projects you edited, you can instead inject the text into your prompt using the Snippet tool.", - "code_snippets": [] + "content": "The Humanloop Snippet tool supports managing common text 'snippets' (or 'passages', or 'chunks') that you want to reuse across your different prompts. A Snippet tool acts as a simple key/value store, where the key is the name of the common re-usable text snippet and the value is the corresponding text.\nFor example, you may have some common persona descriptions that you found to be effective across a range of your LLM features. Or maybe you have some specific formatting instructions that you find yourself re-using again and again in your prompts.\nInstead of needing to copy and paste between your editor sessions and keep track of which projects you edited, you can instead inject the text into your prompt using the Snippet tool." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.snippet-tool-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.snippet-tool-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/snippet-tool", @@ -18821,16 +17969,16 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "A Humanloop account - you can create one by going to our sign up page.\n\nBe on a paid plan - your organization has been upgraded from the Free tier.\n\nYou already have a Prompt — if not, please follow our Prompt creation guide first.\n\n\n\n\nThe Snippet tool is not available for the Free tier. Please contact us if you\nwish to learn more about our Enterprise plan\nTo create and use a snippet tool, follow the following steps:\n\n\nNavigate to the tools tab in your organisation and select the Snippet tool card.\n\n\nName the tool\nName itassistant-personalities and give it a description Useful assistant personalities.\nAdd a snippet called \"helpful-assistant\"\nIn the initial box add helpful-assistant and give it a value of You are a helpful assistant. You like to tell jokes and if anyone asks your name is Sam.\nAdd another snippet called \"grumpy-assistant\"\nLet's add another key-value pair, so press the Add a key/value pair button and add a new key of grumpy-assistant and give it a value of You are a grumpy assistant. You rarely try to help people and if anyone asks your name is Freddy..\n\n\nPress Create Tool.\nNow your Snippets are set up, you can use it to populate strings in your prompt templates across your projects.\nNavigate to the Editor\nGo to the Editor of your previously created project.\nAdd {{ assistant-personalities(key) }} to your prompt\nDelete the existing prompt template and add {{ assistant-personalities(key) }} to your prompt.\n\n\nDouble curly bracket syntax is used to call a tool in the editor. Inside the curly brackets you put the tool name, e.g. {{ (key) }}.\nEnter the key as an input\nIn the input area set the value to helpful-assistant. The tool requires an input value to be provided for the key. When adding the tool an inputs field will appear in the top right of the editor where you can specify your key.\nPress the Run button\nStart the chat with the LLM and you can see the response of the LLM, as well as, see the key you previously defined add in the Chat on the right.\n\n\nChange the key to grumpy-assistant.\n\n\nIf you want to see the corresponding snippet to the key you either need to\nfirst run the conversation to fetch the string and see it in the preview.\nPlay with the LLM\nAsk the LLM, I'm a customer and need help solving this issue. Can you help?'. You should see a grumpy response from \"Freddy\" now.\nIf you have a specific key you would like to hardcode in the prompt, you can define it using the literal key value: {{ (\"key\") }}, so in this case it would be {{ assistant-personalities(\"grumpy-assistant\") }}. Delete the grumpy-assistant field and add it into your chat template.\nSave your Prompt.\nIf you're happy with you're grumpy assistant, save this new version of your Prompt.\n\n\nThe Snippet tool is particularly useful because you can define passages of text once in a Snippet tool and reuse them across multiple prompts, without needing to copy/paste them and manually keep them all in sync. Editing the values in your tool allows the changes to automatically propagate to the model configs when you update them, as long as the key is the same.\n\n\nSince the values for a Snippet are saved on the Tool, not the Prompt, changing\nthe values (or keys) defined in your Snippet tools could affect the relative\npropmt's behaviour that won't be captured by the Prompt's version. This could\nbe exactly what you intend, however caution should still be used make sure the\nchanges are expected.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", @@ -18865,11 +18013,10 @@ "authed": false, "type": "markdown", "description": "Learn how to set up a RAG system using the Pinecone integration to enrich your prompts with relevant context from a data source of documents.\nSet up a RAG system using the Pinecone integration", - "content": "In this guide we will set up a Humanloop Pinecone tool and use it to enrich a prompt with the relevant context from a data source of documents. This tool combines Pinecone's semantic search with OpenAI's embedding models.", - "code_snippets": [] + "content": "In this guide we will set up a Humanloop Pinecone tool and use it to enrich a prompt with the relevant context from a data source of documents. This tool combines Pinecone's semantic search with OpenAI's embedding models." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/set-up-semantic-search", @@ -18896,19 +18043,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "A Humanloop account - you can create one by going to our sign up page.\n\nA Pinecone account - you can create one by going to their sign up page.\n\nPython installed - you can download and install Python by following the steps on the Python download page.\n\n\n\n\nIf you have an existing Pinecone index that was created using one of OpenAI's\nembedding models, you can\nskip to section: Setup Humanloop", "hierarchy": { "h1": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h1", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-install-the-pinecone-sdk-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-install-the-pinecone-sdk", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/set-up-semantic-search", @@ -18935,7 +18082,7 @@ ], "authed": false, "type": "markdown", - "hash": "#install-the-pinecone-sdk-", + "hash": "#install-the-pinecone-sdk", "content": "If you already have the Pinecone SDK installed, skip to the next section.\n\n\nInstall the Pinecone Python SDK in your terminal:\nStart a Python interpreter:\nGo to the Pinecone console API Keys tab and create an API key - copy the key value and the environment.\nTest your Pinecone API key and environment by initialising the SDK", "code_snippets": [ { @@ -18965,19 +18112,19 @@ ], "hierarchy": { "h1": { - "id": "install-the-pinecone-sdk-", - "title": "Install the Pinecone SDK " + "id": "install-the-pinecone-sdk", + "title": "Install the Pinecone SDK" }, "h2": { - "id": "install-the-pinecone-sdk-", - "title": "Install the Pinecone SDK " + "id": "install-the-pinecone-sdk", + "title": "Install the Pinecone SDK" } }, "level": "h2", "level_title": "Install the Pinecone SDK" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-create-a-pinecone-index-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-create-a-pinecone-index", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/set-up-semantic-search", @@ -19004,7 +18151,7 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-pinecone-index-", + "hash": "#create-a-pinecone-index", "content": "Now we'll initialise a Pinecone index, which is where we'll store our vector embeddings. We will be using OpenAI's ada model to create vectors to save to Pinecone, which has an output dimension of 1536 that we need to specify upfront when creating the index:", "code_snippets": [ { @@ -19014,19 +18161,19 @@ ], "hierarchy": { "h1": { - "id": "create-a-pinecone-index-", - "title": "Create a Pinecone index " + "id": "create-a-pinecone-index", + "title": "Create a Pinecone index" }, "h2": { - "id": "create-a-pinecone-index-", - "title": "Create a Pinecone index " + "id": "create-a-pinecone-index", + "title": "Create a Pinecone index" } }, "level": "h2", "level_title": "Create a Pinecone index" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-preprocess-the-data-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-preprocess-the-data", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/set-up-semantic-search", @@ -19053,7 +18200,7 @@ ], "authed": false, "type": "markdown", - "hash": "#preprocess-the-data-", + "hash": "#preprocess-the-data", "content": "Now that you have a Pinecone index, we need some data to put in it. In this section we'll pre-process some data ready for embedding and storing to the index in the next section.\nWe'll use the awesome Hugging Face datasets to source a demo dataset (following the Pinecone quick-start guide). In practice you will customise this step to your own use case.\n\n\nFirst install Hugging Face datasets using pip:\nNext download the Quora dataset:\nNow we can preview the dataset - it contains ~400K pairs of natural language questions from Quora:\nExtract the text from the questions into a single list ready for embedding:", "code_snippets": [ { @@ -19109,19 +18256,19 @@ ], "hierarchy": { "h1": { - "id": "preprocess-the-data-", - "title": "Preprocess the data " + "id": "preprocess-the-data", + "title": "Preprocess the data" }, "h2": { - "id": "preprocess-the-data-", - "title": "Preprocess the data " + "id": "preprocess-the-data", + "title": "Preprocess the data" } }, "level": "h2", "level_title": "Preprocess the data" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-populate-pinecone-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-populate-pinecone", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/set-up-semantic-search", @@ -19148,23 +18295,23 @@ ], "authed": false, "type": "markdown", - "hash": "#populate-pinecone-", + "hash": "#populate-pinecone", "content": "Now that you have a Pinecone index and a dataset of text chunks, we can populate the index with embeddings before moving on to Humanloop. We'll use one of OpenAI's embedding models to create the vectors for storage.", "hierarchy": { "h1": { - "id": "populate-pinecone-", - "title": "Populate Pinecone " + "id": "populate-pinecone", + "title": "Populate Pinecone" }, "h2": { - "id": "populate-pinecone-", - "title": "Populate Pinecone " + "id": "populate-pinecone", + "title": "Populate Pinecone" } }, "level": "h2", "level_title": "Populate Pinecone" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-install-and-initialise-open-ai-sdk-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-install-and-initialise-open-ai-sdk", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/set-up-semantic-search", @@ -19191,7 +18338,7 @@ ], "authed": false, "type": "markdown", - "hash": "#install-and-initialise-open-ai-sdk-", + "hash": "#install-and-initialise-open-ai-sdk", "content": "If you already have your OpenAI key and the SDK installed, skip to the next section.\n\n\nInstall the OpenAI SDK using pip:\nInitialise the SDK (you'll need an OpenAI key from your OpenAI account)", "code_snippets": [ { @@ -19215,19 +18362,19 @@ ], "hierarchy": { "h1": { - "id": "install-and-initialise-open-ai-sdk-", - "title": "Install and initialise Open AI SDK " + "id": "install-and-initialise-open-ai-sdk", + "title": "Install and initialise Open AI SDK" }, "h3": { - "id": "install-and-initialise-open-ai-sdk-", - "title": "Install and initialise Open AI SDK " + "id": "install-and-initialise-open-ai-sdk", + "title": "Install and initialise Open AI SDK" } }, "level": "h3", "level_title": "Install and initialise Open AI SDK" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-populate-the-index-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-populate-the-index", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/set-up-semantic-search", @@ -19254,7 +18401,7 @@ ], "authed": false, "type": "markdown", - "hash": "#populate-the-index-", + "hash": "#populate-the-index", "content": "If you already have a Pinecone index set up, skip to the next section.\n\n\nEmbed the questions and store them in Pinecone with the corresponding text as metadata:\nYou can now try out the semantic search with a test question:\nYou should see semantically similar questions retrieved with the corresponding similarity scores:", "code_snippets": [ { @@ -19284,19 +18431,19 @@ ], "hierarchy": { "h1": { - "id": "populate-the-index-", - "title": "Populate the index " + "id": "populate-the-index", + "title": "Populate the index" }, "h3": { - "id": "populate-the-index-", - "title": "Populate the index " + "id": "populate-the-index", + "title": "Populate the index" } }, "level": "h3", "level_title": "Populate the index" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-configure-pinecone-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-configure-pinecone", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/set-up-semantic-search", @@ -19323,23 +18470,23 @@ ], "authed": false, "type": "markdown", - "hash": "#configure-pinecone-", + "hash": "#configure-pinecone", "content": "You're now ready to configure a Pinecone tool in Humanloop:\n\n\nCreate a New Tools\nFrom the Humanloop dashboard or the sidebar, click 'New File' and select Tool.\nSelect Pinecone Search\nSelect the Pinecone Search option\nConfigure Pinecone and OpenAI\nThese should be the same values you used when setting\nup your Pinecone index in the previous sections. All these values are editable\nlater.\nFor Pinecone: populate values for Name (use quora_search),\npinecone_key, pinecone_environment, pinecone_index (note: we named our\nindex humanloop-demo). The name will be used to create the signature for the\ntool that you will use in your prompt templates in the next section.\n\nFor OpenAI: populate the openai_key and openai_model (note: we used the\ntext-embedding-ada-002 model above)\n\n\nSave the tool\nBy selecting Save.\nAn active tool for quora_search will now appear on the tools tab and you're ready to use it within a prompt template.", "hierarchy": { "h1": { - "id": "configure-pinecone-", - "title": "Configure Pinecone " + "id": "configure-pinecone", + "title": "Configure Pinecone" }, "h2": { - "id": "configure-pinecone-", - "title": "Configure Pinecone " + "id": "configure-pinecone", + "title": "Configure Pinecone" } }, "level": "h2", "level_title": "Configure Pinecone" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-enhance-your-prompt-template-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.tools.set-up-semantic-search-enhance-your-prompt-template", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/set-up-semantic-search", @@ -19366,7 +18513,7 @@ ], "authed": false, "type": "markdown", - "hash": "#enhance-your-prompt-template-", + "hash": "#enhance-your-prompt-template", "content": "Now that we have a Pinecone tool configured we can use this to pull relevant context into your prompts.\nThis is an effective way to enrich your LLM applications with knowledge from your own internal documents and also help fix hallucinations.\n\n\nNavigate to the Editor of your Prompt\nCopy and paste the following text into the Prompt template box:\nOn the right hand side under Completions, enter the following three examples of topics: Google, Physics and Exercise.\nPress the Run all button bottom right (or use the keyboard shortcut Command + Enter).\nOn the right hand side the results from calling the Pinecone tool for the specific topic will be shown highlighted in purple and the final summary provided by the LLM that uses these results will be highlighted in green.\n\n\n\n\nEach active tool in your organisation will have a unique signature that you can use to specify the tool within a prompt template.\nYou can find the signature in the pink box on each tool card on the Tools page.\nYou can also use double curly brackets - {{ - within the prompt template in the Prompt Editor to see a dropdown of available tools.\nIn the case of Pinecone tools, the signature takes two positional arguments: query(the query text passed to Pinecone) and top_k(the number of similar chunks to retrieve from Pinecone for the query).", "code_snippets": [ { @@ -19380,12 +18527,12 @@ ], "hierarchy": { "h1": { - "id": "enhance-your-prompt-template-", - "title": "Enhance your Prompt template " + "id": "enhance-your-prompt-template", + "title": "Enhance your Prompt template" }, "h2": { - "id": "enhance-your-prompt-template-", - "title": "Enhance your Prompt template " + "id": "enhance-your-prompt-template", + "title": "Enhance your Prompt template" } }, "level": "h2", @@ -19416,11 +18563,10 @@ "authed": false, "type": "markdown", "description": "In this guide we will demonstrate how to use Humanloop’s fine-tuning workflow to produce improved models leveraging your user feedback data.\nIn this guide we will demonstrate how to use Humanloop’s fine-tuning workflow to produce improved models leveraging your user feedback data.", - "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan", - "code_snippets": [] + "content": "This feature is not available for the Free tier. Please contact us if you wish\nto learn more about our Enterprise plan" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.finetune-a-model-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.finetune-a-model-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/finetune-a-model", @@ -19443,19 +18589,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\nYou have integrated humanloop.complete_deployed() or the humanloop.chat_deployed() endpoints, along with the humanloop.feedback() with the API or Python SDK.\n\n\n\n\nA common question is how much data do I need to fine-tune effectively? Here we\ncan reference the OpenAI\nguidelines:\nThe more training examples you have, the better. We recommend having at least a couple hundred examples. In general, we've found that each doubling of the dataset size leads to a linear increase in model quality.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.finetune-a-model-fine-tuning-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.finetune-a-model-fine-tuning", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/finetune-a-model", @@ -19478,12 +18624,12 @@ ], "authed": false, "type": "markdown", - "hash": "#fine-tuning-", + "hash": "#fine-tuning", "content": "The first part of fine-tuning is to select the data you wish to fine-tune on.\n\n\nGo to your Humanloop project and navigate to Logs tab.\nCreate a filter\nUsing the + Filter button above the table of the logs you would like to fine-tune on.\nFor example, all the logs that have received a positive upvote in the feedback captured from your end users.\n\n\nClick the Actions button, then click the New fine-tuned model button to set up the finetuning process.\nEnter the appropriate parameters for the fine-tuned model.\nEnter a Model name. This will be used as the suffix parameter in OpenAI’s fine-tune interface. For example, a suffix of \"custom-model-name\" would produce a model name like ada:ft-your-org:custom-model-name-2022-02-15-04-21-04.\n\nChoose the Base model to fine-tune. This can be ada, babbage, curie, or davinci.\n\nSelect a Validation split percentage. This is the proportion of data that will be used for validation. Metrics will be periodically calculated against the validation data during training.\n\nEnter a Data snapshot name. Humanloop associates a data snapshot to every fine-tuned model instance so it is easy to keep track of what data is used (you can see yourexisting data snapshots on the Settings/Data snapshots page)\n\n\n\n\nClick Create\nThe fine-tuning process runs asynchronously and may take up to a couple of hours to complete depending on your data snapshot size.\nSee the progress\nNavigate to the Fine-tuning tab to see the progress of the fine-tuning process.\nComing soon - notifications for when your fine-tuning jobs have completed.\n\n\nWhen the Status of the fine-tuned model is marked as Successful, the model is ready to use.\n🎉 You can now use this fine-tuned model in a Prompt and evaluate its performance.", "hierarchy": { "h2": { - "id": "fine-tuning-", - "title": "Fine-tuning " + "id": "fine-tuning", + "title": "Fine-tuning" } }, "level": "h2", @@ -19513,11 +18659,10 @@ ], "authed": false, "type": "markdown", - "description": "How to create, share and manage you Humanloop API keys. The API keys allow you to access the Humanloop API programmatically in your app.\nAPI keys allow you to access the Humanloop API programmatically in your app.", - "code_snippets": [] + "description": "How to create, share and manage you Humanloop API keys. The API keys allow you to access the Humanloop API programmatically in your app.\nAPI keys allow you to access the Humanloop API programmatically in your app." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.create-and-revoke-api-keys-create-a-new-api-key-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.create-and-revoke-api-keys-create-a-new-api-key", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/create-and-revoke-api-keys", @@ -19540,19 +18685,19 @@ ], "authed": false, "type": "markdown", - "hash": "#create-a-new-api-key-", + "hash": "#create-a-new-api-key", "content": "Go to your Organization's API Keys page.\nClick the Create new API key button.\nEnter a name for your API key.\nChoose a name that helps you identify the key's purpose. You can't change the name of an API key after it's created.\nClick Create.\n\n\nCopy the generated API key\nSave it in a secure location. You will not be shown the full API key again.", "hierarchy": { "h2": { - "id": "create-a-new-api-key-", - "title": "Create a new API key " + "id": "create-a-new-api-key", + "title": "Create a new API key" } }, "level": "h2", "level_title": "Create a new API key" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.create-and-revoke-api-keys-revoke-an-api-key-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.create-and-revoke-api-keys-revoke-an-api-key", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/create-and-revoke-api-keys", @@ -19575,12 +18720,12 @@ ], "authed": false, "type": "markdown", - "hash": "#revoke-an-api-key-", + "hash": "#revoke-an-api-key", "content": "You can revoke an existing API key if it is no longer needed.\n\n\nWhen an API key is revoked, future API requests that use this key will be\nrejected. Any systems that are dependent on this key will no longer work.\n\n\nGo to API keys page\nGo to your Organization's API Keys\npage.\nIdentify the API key\nFind the key you wish to revoke by its name or by the displayed trailing characters.\nClick 'Revoke'\nClick the three dots button on the right of its row to open its menu.\nClick Revoke.\nA confirmation dialog will be displayed. Click Remove.", "hierarchy": { "h2": { - "id": "revoke-an-api-key-", - "title": "Revoke an API key " + "id": "revoke-an-api-key", + "title": "Revoke an API key" } }, "level": "h2", @@ -19611,11 +18756,10 @@ "authed": false, "type": "markdown", "description": "Inviting people to your organization allows them to interact with your Humanloop projects.\nHow to invite collaborators to your Humanloop organization.", - "content": "Inviting people to your organization allows them to interact with your Humanloop projects:\nTeammates will be able to create new model configs and experiments\n\nDevelopers will be able to get an API key to interact with projects through the SDK\n\nAnnotators may provide feedback on logged datapoints using the Data tab (in addition to feedback captured from your end-users via the SDK feedback integration)", - "code_snippets": [] + "content": "Inviting people to your organization allows them to interact with your Humanloop projects:\nTeammates will be able to create new model configs and experiments\n\nDevelopers will be able to get an API key to interact with projects through the SDK\n\nAnnotators may provide feedback on logged datapoints using the Data tab (in addition to feedback captured from your end-users via the SDK feedback integration)" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.invite-collaborators-invite-users-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.invite-collaborators-invite-users", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/invite-collaborators", @@ -19638,12 +18782,12 @@ ], "authed": false, "type": "markdown", - "hash": "#invite-users-", + "hash": "#invite-users", "content": "To invite users to your organization:\n\n\nGo to your organization's Members page\nEnter the email address\nEnter the email of the person you wish to invite into the Invite members box.\n\n\nClick Send invite.\nAn email will be sent to the entered email address, inviting them to the organization. If the entered email address is not already a Humanloop user, they will be prompted to create an account before being added to the organization.\n🎉 Once they create an account, they can view your projects at the same URL to begin collaborating.", "hierarchy": { "h2": { - "id": "invite-users-", - "title": "Invite Users " + "id": "invite-users", + "title": "Invite Users" } }, "level": "h2", @@ -19674,11 +18818,10 @@ "authed": false, "type": "markdown", "description": "Environments enable you to deploy model configurations and experiments, making them accessible via API, while also maintaining a streamlined production workflow.\nIn this guide we will demonstrate how to create and use environments.", - "content": "Environments enable you to deploy model configurations and experiments, making them accessible via API, while also maintaining a streamlined production workflow. These environments are created at the organizational level and can be utilized on a per-project basis.", - "code_snippets": [] + "content": "Environments enable you to deploy model configurations and experiments, making them accessible via API, while also maintaining a streamlined production workflow. These environments are created at the organizational level and can be utilized on a per-project basis." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.deploy-to-an-environment-create-an-environment-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.deploy-to-an-environment-create-an-environment", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/deploy-to-an-environment", @@ -19701,19 +18844,19 @@ ], "authed": false, "type": "markdown", - "hash": "#create-an-environment-", + "hash": "#create-an-environment", "content": "Go to your Organization's Environments page.\nClick the + Environment button to open the new environment dialog.\nAssign a custom name to the environment.\nClick Create.", "hierarchy": { "h2": { - "id": "create-an-environment-", - "title": "Create an environment " + "id": "create-an-environment", + "title": "Create an environment" } }, "level": "h2", "level_title": "Create an environment" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.deploy-to-an-environment-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.deploy-to-an-environment-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/deploy-to-an-environment", @@ -19736,23 +18879,23 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "You already have a Prompt — if not, please follow our Prompt creation guide first.\n\nEnsure that your project has existing model configs that you wish to use.\n\n\nTo deploy a model config to an environment:\n\n\nNavigate to the Dashboard of your project.\nClick the dropdown menu of the environment.\n\n\nClick the Change deployment button\nSelect a version\nFrom the model configs or experiments within that project, click on the one that you wish to deploy to the target environment\n\n\nClick the Deploy button.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.deploy-to-an-environment-prerequisites--1", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.deploy-to-an-environment-prerequisites-1", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/deploy-to-an-environment", @@ -19775,7 +18918,7 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites--1", + "hash": "#prerequisites-1", "content": "You have already deployed either a chat or completion model config - if not, please follow the steps in either the Generate chat responses or Generate completions guides.\n\nYou have multiple environments, with a model config deployed in a non-default environment. See the Deploying to an environment section above.\n\n\n\n\nThe following steps assume you're using an OpenAI model and that you're calling a chat workflow. The steps needed to target a specific environment for a completion workflow are similar.\n\n\nNavigate to the Models tab of your Humanloop project.\nClick the dropdown menu of the environment you wish to use.\nClick the Use API menu option.\nA dialog will open with code snippets.\nSelect the language you wish to use (e.g. Python, TypeScript). The value of environment parameter is the name of environment you wish to target via the chat-deployed call.\nAn example of this can be seen in the code below.", "code_snippets": [ { @@ -19789,19 +18932,19 @@ ], "hierarchy": { "h2": { - "id": "prerequisites--1", - "title": "Prerequisites " + "id": "prerequisites-1", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites--1", - "title": "Prerequisites " + "id": "prerequisites-1", + "title": "Prerequisites" } }, "level": "h3", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.deploy-to-an-environment-updating-the-default-environment-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.deploy-to-an-environment-updating-the-default-environment", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/deploy-to-an-environment", @@ -19824,19 +18967,19 @@ ], "authed": false, "type": "markdown", - "hash": "#updating-the-default-environment-", + "hash": "#updating-the-default-environment", "content": "Only Enterprise customers can update their default environment", "hierarchy": { "h2": { - "id": "updating-the-default-environment-", - "title": "Updating the default environment " + "id": "updating-the-default-environment", + "title": "Updating the default environment" } }, "level": "h2", "level_title": "Updating the default environment" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.deploy-to-an-environment-prerequisites--2", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.guides.deploy-to-an-environment-prerequisites-2", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/guides/deploy-to-an-environment", @@ -19859,16 +19002,16 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites--2", + "hash": "#prerequisites-2", "content": "You have multiple environments - if not first go through the Create an\nenvironment section.\n\n\nEvery organization will have a default environment. This can be updated by the following:\n\n\nGo to your Organization's Environments page.\nClick on the dropdown menu of an environment that is not already the default.\nClick the Make default option\nA dialog will open asking you if you are certain this is a change you want to make. If so, click the Make default button.\nVerify the default tag has moved to the environment you selected.", "hierarchy": { "h2": { - "id": "prerequisites--2", - "title": "Prerequisites " + "id": "prerequisites-2", + "title": "Prerequisites" }, "h3": { - "id": "prerequisites--2", - "title": "Prerequisites " + "id": "prerequisites-2", + "title": "Prerequisites" } }, "level": "h3", @@ -19912,7 +19055,7 @@ ] }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.prompts-versioning-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.prompts-versioning", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/prompts", @@ -19935,19 +19078,19 @@ ], "authed": false, "type": "markdown", - "hash": "#versioning-", + "hash": "#versioning", "content": "A Prompt file will have multiple versions as you try out different models, params or templates, but they should all be doing the same task, and in general should be swappable with one-another.\nBy versioning your Prompts, you can track how adjustments to the template or parameters influence the LLM's responses. This is crucial for iterative development, as you can pinpoint which versions produce the most relevant or accurate outputs for your specific use case.", "hierarchy": { "h2": { - "id": "versioning-", - "title": "Versioning " + "id": "versioning", + "title": "Versioning" } }, "level": "h2", "level_title": "Versioning" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.prompts-when-to-create-a-new-prompt-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.prompts-when-to-create-a-new-prompt", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/prompts", @@ -19970,23 +19113,23 @@ ], "authed": false, "type": "markdown", - "hash": "#when-to-create-a-new-prompt-", + "hash": "#when-to-create-a-new-prompt", "content": "You should create a new Prompt for every different ‘task to be done’ with the LLM. For example each of these tasks are things that can be done by an LLM and should be a separate Prompt File: extractive summary, title creator, outline generator etc.\nWe've seen people find it useful to also create a Prompt called 'Playground' where they can free form experiment without concern of breaking anything or making a mess of their other Prompts.", "hierarchy": { "h2": { - "id": "when-to-create-a-new-prompt-", - "title": "When to create a new Prompt " + "id": "when-to-create-a-new-prompt", + "title": "When to create a new Prompt" }, "h3": { - "id": "when-to-create-a-new-prompt-", - "title": "When to create a new Prompt " + "id": "when-to-create-a-new-prompt", + "title": "When to create a new Prompt" } }, "level": "h3", "level_title": "When to create a new Prompt" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.prompts-using-prompts-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.prompts-using-prompts", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/prompts", @@ -20009,7 +19152,7 @@ ], "authed": false, "type": "markdown", - "hash": "#using-prompts-", + "hash": "#using-prompts", "content": "Prompts are callable as an API. You supply and query-time data such as input values or user messages, and the model will respond with its text output.\nYou can also use Prompts without proxying all requests through Humanloop.", "code_snippets": [ { @@ -20020,15 +19163,15 @@ ], "hierarchy": { "h2": { - "id": "using-prompts-", - "title": "Using Prompts " + "id": "using-prompts", + "title": "Using Prompts" } }, "level": "h2", "level_title": "Using Prompts" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.prompts-serialization-prompt-file-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.prompts-serialization-prompt-file", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/prompts", @@ -20051,19 +19194,19 @@ ], "authed": false, "type": "markdown", - "hash": "#serialization-prompt-file-", + "hash": "#serialization-prompt-file", "content": "Our .prompt file format is a serialized version of a model config that is designed to be human-readable and suitable for checking into your version control systems alongside your code. See the .prompt files reference reference for more details.", "hierarchy": { "h2": { - "id": "serialization-prompt-file-", - "title": "Serialization (.prompt file) " + "id": "serialization-prompt-file", + "title": "Serialization (.prompt file)" } }, "level": "h2", "level_title": "Serialization (.prompt file)" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.prompts-format-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.prompts-format", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/prompts", @@ -20086,23 +19229,23 @@ ], "authed": false, "type": "markdown", - "hash": "#format-", + "hash": "#format", "content": "The .prompt file is heavily inspired by MDX, with model and hyperparameters specified in a YAML header alongside a JSX-inspired format for your Chat Template.", "hierarchy": { "h2": { - "id": "format-", - "title": "Format " + "id": "format", + "title": "Format" }, "h3": { - "id": "format-", - "title": "Format " + "id": "format", + "title": "Format" } }, "level": "h3", "level_title": "Format" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.prompts-basic-examples-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.prompts-basic-examples", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/prompts", @@ -20125,8 +19268,7 @@ ], "authed": false, "type": "markdown", - "hash": "#basic-examples-", - "content": "", + "hash": "#basic-examples", "code_snippets": [ { "lang": "jsx", @@ -20154,12 +19296,12 @@ ], "hierarchy": { "h2": { - "id": "basic-examples-", - "title": "Basic examples " + "id": "basic-examples", + "title": "Basic examples" }, "h3": { - "id": "basic-examples-", - "title": "Basic examples " + "id": "basic-examples", + "title": "Basic examples" } }, "level": "h3", @@ -20190,11 +19332,10 @@ "authed": false, "type": "markdown", "description": "Discover how Humanloop manages tools for use with large language models (LLMs) with version control and rigorous evaluation for better performance.\nTools are functions that can extend your LLMs with access to external data sources and enabling them to take actions.", - "content": "Tools are functions that can extend your LLMs with access to external data sources and enabling them to take actions.\nHumanloop Tools can be used in multiple ways:\nby the LLM by OpenAI function calling)\n\nwithin the Prompt template\n\nas part of a chain of events such as a Retrieval Tool in a RAG pipeline\n\n\nSome Tools are executable within Humanloop, and these offer the greatest utility and convenience. For example, Humanloop has pre-built integrations for Google search and Pinecone have and so these Tools can be executed and the results inserted into the API or Editor automatically.", - "code_snippets": [] + "content": "Tools are functions that can extend your LLMs with access to external data sources and enabling them to take actions.\nHumanloop Tools can be used in multiple ways:\nby the LLM by OpenAI function calling)\n\nwithin the Prompt template\n\nas part of a chain of events such as a Retrieval Tool in a RAG pipeline\n\n\nSome Tools are executable within Humanloop, and these offer the greatest utility and convenience. For example, Humanloop has pre-built integrations for Google search and Pinecone have and so these Tools can be executed and the results inserted into the API or Editor automatically." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.tools-tool-use-function-calling-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.tools-tool-use-function-calling", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tools", @@ -20217,19 +19358,19 @@ ], "authed": false, "type": "markdown", - "hash": "#tool-use-function-calling-", + "hash": "#tool-use-function-calling", "content": "Certain large language models support tool use or \"function calling\". For these models, you can supply the description of functions and the model can choose to call one or more of them by providing the values to call the functions with.\n\n\n\n\nTools all have a functional interface that can be supplied as the JSONSchema needed for function calling. Additionally, if the Tool is executable on Humanloop, the result of any tool will automatically be inserted into the response in the API and in the Editor.\nTools for function calling can be defined inline in our Editor or centrally managed for an organization.", "hierarchy": { "h3": { - "id": "tool-use-function-calling-", - "title": "Tool Use (Function Calling) " + "id": "tool-use-function-calling", + "title": "Tool Use (Function Calling)" } }, "level": "h3", "level_title": "Tool Use (Function Calling)" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.tools-tools-in-a-prompt-template-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.tools-tools-in-a-prompt-template", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tools", @@ -20252,19 +19393,19 @@ ], "authed": false, "type": "markdown", - "hash": "#tools-in-a-prompt-template-", + "hash": "#tools-in-a-prompt-template", "content": "You can add a tool call in a prompt template and the result will be inserted into the prompt sent to the model. This allows you to insert retrieved information into your LLMs calls.\nFor example, if you have {{ google(\"population of india\") }} in your template, this Google tool will get executed and replaced with the resulting text “1.42 billion (2024)” before the prompt is sent to the model. Additionally, if your template contains a Tool call that uses an input variable e.g. {{ google(query) }} this will take the value of the input supplied in the request, compute the output of the Google tool, and insert that result into the resulting prompt that is sent to the model.\n\n\nExample of a Tool being used within a Prompt template. This example will mean that this Prompt needs two inputs to be supplied (query, and top_k)", "hierarchy": { "h3": { - "id": "tools-in-a-prompt-template-", - "title": "Tools in a Prompt template " + "id": "tools-in-a-prompt-template", + "title": "Tools in a Prompt template" } }, "level": "h3", "level_title": "Tools in a Prompt template" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.tools-tools-within-a-chain-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.tools-tools-within-a-chain", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tools", @@ -20287,19 +19428,19 @@ ], "authed": false, "type": "markdown", - "hash": "#tools-within-a-chain-", + "hash": "#tools-within-a-chain", "content": "You can call a Tool within a session of events and post the result to Humanloop. For example in a RAG pipeline, instrumenting your retrieval function as a Tool, enables you to be able to trace through the full sequence of events. The retrieval Tool will be versioned and the logs will be available in the Humanloop UI, enabling you to independently improve that step in the pipeline.", "hierarchy": { "h2": { - "id": "tools-within-a-chain-", - "title": "Tools within a chain " + "id": "tools-within-a-chain", + "title": "Tools within a chain" } }, "level": "h2", "level_title": "Tools within a chain" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.tools-third-party-integrations-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.tools-third-party-integrations", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tools", @@ -20322,23 +19463,23 @@ ], "authed": false, "type": "markdown", - "hash": "#third-party-integrations-", + "hash": "#third-party-integrations", "content": "Pinecone Search - Vector similarity search using Pinecone vector DB and OpenAI embeddings.\n\nGoogle Search - API for searching Google: https://serpapi.com/.\n\nGET API - Send a GET request to an external API.", "hierarchy": { "h2": { - "id": "third-party-integrations-", - "title": "Third-party integrations " + "id": "third-party-integrations", + "title": "Third-party integrations" }, "h3": { - "id": "third-party-integrations-", - "title": "Third-party integrations " + "id": "third-party-integrations", + "title": "Third-party integrations" } }, "level": "h3", "level_title": "Third-party integrations" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.tools-humanloop-tools-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.tools-humanloop-tools", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/tools", @@ -20361,16 +19502,16 @@ ], "authed": false, "type": "markdown", - "hash": "#humanloop-tools-", + "hash": "#humanloop-tools", "content": "Snippet Tool - Create reusable key/value pairs for use in prompts - see how to use the Snippet Tool.\n\nJSON Schema - JSON schema that can be used across multiple Prompts - see how to link a JSON Schema Tool.", "hierarchy": { "h2": { - "id": "humanloop-tools-", - "title": "Humanloop tools " + "id": "humanloop-tools", + "title": "Humanloop tools" }, "h3": { - "id": "humanloop-tools-", - "title": "Humanloop tools " + "id": "humanloop-tools", + "title": "Humanloop tools" } }, "level": "h3", @@ -20401,8 +19542,7 @@ "authed": false, "type": "markdown", "description": "Discover how Humanloop manages datasets, with version control and collaboration to enable you to evaluate and fine-tune your models.\nDatasets are collections of input-output pairs that you can use within Humanloop for evaluations and fine-tuning.", - "content": "A datapoint consists of three things:\nInputs: a collection of prompt variable values which are interpolated into the prompt template of your model config at generation time (i.e. they replace the {{ variables }} you define in the prompt template).\n\nMessages: for chat models, as well as the prompt template, you may have a history of prior chat messages from the same conversation forming part of the input to the next generation. Datapoints can have these messages included as part of the input.\n\nTarget: data representing the expected or intended output of the model. In the simplest case, this can simply be a string representing the exact output you hope the model produces for the example represented by the datapoint. In more complex cases, you can define an arbitrary JSON object for target with whatever fields are necessary to help you specify the intended behaviour. You can then use our evaluations feature to run the necessary code to compare the actual generated output with your target data to determine whether the result was as expected.\n\n\n\n\n\n\nDatasets can be created via CSV upload, converting from existing Logs in your project, or by API requests.", - "code_snippets": [] + "content": "A datapoint consists of three things:\nInputs: a collection of prompt variable values which are interpolated into the prompt template of your model config at generation time (i.e. they replace the {{ variables }} you define in the prompt template).\n\nMessages: for chat models, as well as the prompt template, you may have a history of prior chat messages from the same conversation forming part of the input to the next generation. Datapoints can have these messages included as part of the input.\n\nTarget: data representing the expected or intended output of the model. In the simplest case, this can simply be a string representing the exact output you hope the model produces for the example represented by the datapoint. In more complex cases, you can define an arbitrary JSON object for target with whatever fields are necessary to help you specify the intended behaviour. You can then use our evaluations feature to run the necessary code to compare the actual generated output with your target data to determine whether the result was as expected.\n\n\n\n\n\n\nDatasets can be created via CSV upload, converting from existing Logs in your project, or by API requests." }, { "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.evaluators", @@ -20429,11 +19569,10 @@ "authed": false, "type": "markdown", "description": "Learn about LLM Evaluation using Evaluators. Evaluators are functions that can be used to judge the output of Prompts, Tools or other Evaluators.\nEvaluators on Humanloop are functions that can be used to judge the output of Prompts, Tools or other Evaluators.", - "content": "Evaluators are functions which take an LLM-generated Log as an argument and return an evaluation. The evaluation is typically either a boolean or a number, indicating how well the model performed according to criteria you determine based on your use case.\nEvaluators can be used for monitoring live data as well as running evaluations.", - "code_snippets": [] + "content": "Evaluators are functions which take an LLM-generated Log as an argument and return an evaluation. The evaluation is typically either a boolean or a number, indicating how well the model performed according to criteria you determine based on your use case.\nEvaluators can be used for monitoring live data as well as running evaluations." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.evaluators-types-of-evaluators-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.evaluators-types-of-evaluators", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/evaluators", @@ -20456,19 +19595,19 @@ ], "authed": false, "type": "markdown", - "hash": "#types-of-evaluators-", + "hash": "#types-of-evaluators", "content": "There are three types of Evaluators: AI, code, and human.\nPython - using our in-browser editor, define simple Python functions to act as evaluators\n\nAI - use a large language model to evaluate another LLM! Our evaluator editor allows you to define a special-purpose prompt which passes data from the underlying log to a language model. This type of evaluation is particularly useful for more subjective evaluation such as verifying appropriate tone-of-voice or factuality given an input set of facts.\n\nHuman - collate human feedback against the logs", "hierarchy": { "h3": { - "id": "types-of-evaluators-", - "title": "Types of Evaluators " + "id": "types-of-evaluators", + "title": "Types of Evaluators" } }, "level": "h3", "level_title": "Types of Evaluators" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.evaluators-modes-monitoring-vs-testing-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.evaluators-modes-monitoring-vs-testing", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/evaluators", @@ -20491,19 +19630,19 @@ ], "authed": false, "type": "markdown", - "hash": "#modes-monitoring-vs-testing-", + "hash": "#modes-monitoring-vs-testing", "content": "Evaluation is useful for both testing new model configs as you develop them and for monitoring live deployments that are already in production.\nTo handle these different use cases, there are two distinct modes of evaluators - online and offline.", "hierarchy": { "h2": { - "id": "modes-monitoring-vs-testing-", - "title": "Modes: Monitoring vs. testing " + "id": "modes-monitoring-vs-testing", + "title": "Modes: Monitoring vs. testing" } }, "level": "h2", "level_title": "Modes: Monitoring vs. testing" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.evaluators-online-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.evaluators-online", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/evaluators", @@ -20526,23 +19665,23 @@ ], "authed": false, "type": "markdown", - "hash": "#online-", + "hash": "#online", "content": "Online evaluators are for use on logs generated in your project, including live in production. Typically, they are used to monitor deployed model performance over time.\nOnline evaluators can be set to run automatically whenever logs are added to a project. The evaluator takes the log as an argument.", "hierarchy": { "h2": { - "id": "online-", - "title": "Online " + "id": "online", + "title": "Online" }, "h3": { - "id": "online-", - "title": "Online " + "id": "online", + "title": "Online" } }, "level": "h3", "level_title": "Online" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.evaluators-offline-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.evaluators-offline", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/evaluators", @@ -20565,23 +19704,23 @@ ], "authed": false, "type": "markdown", - "hash": "#offline-", + "hash": "#offline", "content": "Offline evaluators are for use with predefined test datasets in order to evaluate models as you iterate in your prompt engineering workflow, or to test for regressions in a CI environment.\nA test dataset is a collection of datapoints, which are roughly analogous to unit tests or test cases in traditional programming. Each datapoint specifies inputs to your model and (optionally) some target data.\nWhen you run an offline evaluation, Humanloop iterates through each datapoint in the dataset and triggers a fresh LLM generation using the inputs of the testcase and the model config being evaluated. For each test case, your evaluator function will be called, taking as arguments the freshly generated log and the testcase datapoint that gave rise to it. Typically, you would write your evaluator to perform some domain-specific logic to determine whether the model-generated log meets your desired criteria (as specified in the datapoint 'target').", "hierarchy": { "h2": { - "id": "offline-", - "title": "Offline " + "id": "offline", + "title": "Offline" }, "h3": { - "id": "offline-", - "title": "Offline " + "id": "offline", + "title": "Offline" } }, "level": "h3", "level_title": "Offline" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.evaluators-humanloop-hosted-vs-self-hosted-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.evaluators-humanloop-hosted-vs-self-hosted", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/evaluators", @@ -20604,12 +19743,12 @@ ], "authed": false, "type": "markdown", - "hash": "#humanloop-hosted-vs-self-hosted-", + "hash": "#humanloop-hosted-vs-self-hosted", "content": "Conceptually, evaluation runs have two components:\nGeneration of logs from the datapoints\n\nEvaluating those logs.\n\n\nUsing the Evaluations API, Humanloop offers the ability to generate logs either within the Humanloop runtime, or self-hosted. Similarly, evaluations of the logs can be performed in the Humanloop runtime (using evaluators that you can define in-app) or self-hosted (see our guide on self-hosted evaluations).\nIn fact, it's possible to mix-and-match self-hosted and Humanloop-runtime generations and evaluations in any combination you wish. When creating an evaluation via the API, set the hl_generated flag to False to indicate that you are posting the logs from your own infrastructure (see our guide on evaluating externally-generated logs). Include an evaluator of type External to indicate that you will post evaluation results from your own infrastructure. You can include multiple evaluators on any run, and these can include any combination of External (i.e. self-hosted) and Humanloop-runtime evaluators.", "hierarchy": { "h2": { - "id": "humanloop-hosted-vs-self-hosted-", - "title": "Humanloop-hosted vs. self-hosted " + "id": "humanloop-hosted-vs-self-hosted", + "title": "Humanloop-hosted vs. self-hosted" } }, "level": "h2", @@ -20640,8 +19779,7 @@ "authed": false, "type": "markdown", "description": "Logs contain the inputs and outputs of each time a Prompt, Tool or Evaluator is called.\nLogs contain the inputs and outputs of each time a Prompt, Tool or Evaluator is called.", - "content": "All Prompts, Tools and Evaluators produce Logs. A Log contains the inputs and the outputs and tracks which version of Prompt/Tool/Evaluator was used.\nFor the example of a Prompt above, the Log would have one input called ‘topic’ and the output will be the completion.\n\n\nA Log which contains an input query", - "code_snippets": [] + "content": "All Prompts, Tools and Evaluators produce Logs. A Log contains the inputs and the outputs and tracks which version of Prompt/Tool/Evaluator was used.\nFor the example of a Prompt above, the Log would have one input called ‘topic’ and the output will be the completion.\n\n\nA Log which contains an input query" }, { "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.environments", @@ -20668,11 +19806,10 @@ "authed": false, "type": "markdown", "description": "Deployment environments enable you to control the deployment lifecycle of your Prompts and other files between development and production environments.\nDeployment environments enable you to control the deployment lifecycle of your Prompts and other files between development and production environments.", - "content": "Environments enable you to deploy your model configurations to specific environments, allowing you to separately manage the deployment workflow between testing and production. With environments, you have the control required to manage the full LLM deployment lifecycle.", - "code_snippets": [] + "content": "Environments enable you to deploy your model configurations to specific environments, allowing you to separately manage the deployment workflow between testing and production. With environments, you have the control required to manage the full LLM deployment lifecycle." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.environments-managing-your-environments-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.environments-managing-your-environments", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/environments", @@ -20695,19 +19832,19 @@ ], "authed": false, "type": "markdown", - "hash": "#managing-your-environments-", + "hash": "#managing-your-environments", "content": "Every organisation automatically receives a default production environment. You can create additional environments with custom names by visiting your organisation's environments page.\n\n\nOnly Enterprise customers can create more than one environment\nThe environments you define for your organisation will be available for each project and can be viewed in the project dashboard once created.", "hierarchy": { "h3": { - "id": "managing-your-environments-", - "title": "Managing your environments " + "id": "managing-your-environments", + "title": "Managing your environments" } }, "level": "h3", "level_title": "Managing your environments" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.environments-the-default-environment-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.environments-the-default-environment", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/environments", @@ -20730,23 +19867,23 @@ ], "authed": false, "type": "markdown", - "hash": "#the-default-environment-", + "hash": "#the-default-environment", "content": "By default, the production environment is marked as the Default environment. This means that all API calls targeting the \"Active Deployment,\" such as Get Active Config or Chat Deployed will use this environment. You can rename the default environment on the organisation's environments page.\n\n\nRenaming the environments will take immediate effect, so ensure that this\nchange is planned and does not disrupt your production workflows.", "hierarchy": { "h3": { - "id": "the-default-environment-", - "title": "The default environment " + "id": "the-default-environment", + "title": "The default environment" }, "h4": { - "id": "the-default-environment-", - "title": "The default environment " + "id": "the-default-environment", + "title": "The default environment" } }, "level": "h4", "level_title": "The default environment" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.environments-using-environments-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.environments-using-environments", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/environments", @@ -20769,19 +19906,19 @@ ], "authed": false, "type": "markdown", - "hash": "#using-environments-", + "hash": "#using-environments", "content": "Once created on the environments page, environments can be used for each project and are visible in the respective project dashboards.\nYou can deploy directly to a specific environment by selecting it in the Deployments section.\n\nAlternatively, you can deploy to multiple environments simultaneously by deploying a Model Config from either the Editor or the Model Configs table.", "hierarchy": { "h3": { - "id": "using-environments-", - "title": "Using environments " + "id": "using-environments", + "title": "Using environments" } }, "level": "h3", "level_title": "Using environments" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.environments-using-environments-via-api-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.environments-using-environments-via-api", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/environments", @@ -20804,12 +19941,12 @@ ], "authed": false, "type": "markdown", - "hash": "#using-environments-via-api-", + "hash": "#using-environments-via-api", "content": "For v4.0 API endpoints that support Active Deployments, such as Get Active Config or Chat Deployed, you can now optionally point to a model configuration deployed in a specific environment by including an optional additional environment field.\nYou can find this information in our v4.0 API Documentation or within the environment card in the Project Dashboard under the \"Use API\" option.\nClicking on the \"Use API\" option will provide code snippets that demonstrate the usage of the environment variable in practice.", "hierarchy": { "h3": { - "id": "using-environments-via-api-", - "title": "Using environments via API " + "id": "using-environments-via-api", + "title": "Using environments via API" } }, "level": "h3", @@ -20839,11 +19976,10 @@ ], "authed": false, "type": "markdown", - "description": "Learn about the core entities and concepts in Humanloop. Understand how to use them to manage your projects and improve your models.", - "code_snippets": [] + "description": "Learn about the core entities and concepts in Humanloop. Understand how to use them to manage your projects and improve your models." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-projects-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-projects", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/key-concepts", @@ -20866,19 +20002,19 @@ ], "authed": false, "type": "markdown", - "hash": "#projects-", + "hash": "#projects", "content": "Projects are now Prompts (and we've added Tools and\nEvaluators special types). The V4 API still refers to projects\nhowever as the main way to interact with your Prompts.\nA project groups together the data, prompts and models that are all achieving the same task to be done using the large language model.\nFor example, if you have a task of ‘generate google ad copy’, that should be a project. If you have a summarization that works on top of tweets, that should be a project. You should have many separate projects for each of your tasks on top of the LLM.", "hierarchy": { "h2": { - "id": "projects-", - "title": "Projects " + "id": "projects", + "title": "Projects" } }, "level": "h2", "level_title": "Projects" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-models-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-models", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/key-concepts", @@ -20901,19 +20037,19 @@ ], "authed": false, "type": "markdown", - "hash": "#models-", + "hash": "#models", "content": "The Humanloop platform gives you the ability to use and improve large language models like GPT‑3. There are many different models from multiple providers. The models may be different sizes, may have been trained differently, and are likely to perform differently. Humanloop gives you the ability to find the best model for your situation and optimise performance and cost.\nModel Provider is where the model is from. For example, ‘OpenAI’, or ‘AI21’ etc.\nModel refers to the actual AI model that should be used. Such as text-davinci-002 (large, relatively expensive, highly capable model trained to follow instructions) babbage (smaller, cheaper, faster but worse at creative tasks), or gpt-j (an open source model – coming soon!).\nFine-tuned model - finetuning takes one of the existing models and specialises it for a specific task by further training it with some task-specific data.\nFinetuning lets you get more out of the models by providing:\nHigher quality results than prompt design\n\nAbility to train on more examples than can fit in a prompt\n\nToken savings due to shorter prompts\n\nLower latency requests", "hierarchy": { "h2": { - "id": "models-", - "title": "Models " + "id": "models", + "title": "Models" } }, "level": "h2", "level_title": "Models" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-model-config-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-model-config", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/key-concepts", @@ -20936,19 +20072,19 @@ ], "authed": false, "type": "markdown", - "hash": "#model-config-", + "hash": "#model-config", "content": "This is the prompt template, the model (e.g. text-davinci-002) and the various parameters such as temperature that define how the model will generate text.\nA new model config is generated for each unique set of parameters used within that project. This is so you can compare different model configs to see which perform better, for things like the prompt, or settings like temperature, or stop sequences.", "hierarchy": { "h2": { - "id": "model-config-", - "title": "Model config " + "id": "model-config", + "title": "Model config" } }, "level": "h2", "level_title": "Model config" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-prompt-templates-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-prompt-templates", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/key-concepts", @@ -20971,19 +20107,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prompt-templates-", + "hash": "#prompt-templates", "content": "This is the prompt that is fed to the model, which also allows the use of variables. This allows you track how the same prompt is being used with different input values.\nThe variables are surrounded by {{ and }} like this:", "hierarchy": { "h2": { - "id": "prompt-templates-", - "title": "Prompt templates " + "id": "prompt-templates", + "title": "Prompt templates" } }, "level": "h2", "level_title": "Prompt templates" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-input-variables-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-input-variables", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/key-concepts", @@ -21006,19 +20142,19 @@ ], "authed": false, "type": "markdown", - "hash": "#input-variables-", + "hash": "#input-variables", "content": "Variables are used in prompts to allow you to insert different values into the prompt at runtime. For example, in the prompt Write a song about {{topic}}, {{topic}} is a variable that can be replaced with different values at runtime.\nVariables in a prompt template are called Inputs.", "hierarchy": { "h2": { - "id": "input-variables-", - "title": "Input Variables " + "id": "input-variables", + "title": "Input Variables" } }, "level": "h2", "level_title": "Input Variables" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-log-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-log", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/key-concepts", @@ -21041,19 +20177,19 @@ ], "authed": false, "type": "markdown", - "hash": "#log-", + "hash": "#log", "content": "All Prompts,\nTools and Evaluators produce Logs. A Log containsthe inputs and the outputs and tracks which version of Prompt/Tool/Evaluator was used.\nFor the example of a Prompt above, the Log would have one input called ‘topic’ and the output will be the completion.", "hierarchy": { "h2": { - "id": "log-", - "title": "Log " + "id": "log", + "title": "Log" } }, "level": "h2", "level_title": "Log" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-datapoint-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-datapoint", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/key-concepts", @@ -21076,19 +20212,19 @@ ], "authed": false, "type": "markdown", - "hash": "#datapoint-", + "hash": "#datapoint", "content": "A datapoint is an input-output pair that is used to evaluate the performance of a model. It is different to a Log in that it is not tied to any specific version of a Prompt (or Tool or Evaluator), and that the target is an arbitrary object that can be used to evaluate the output of the model. See Datasets for more information.", "hierarchy": { "h2": { - "id": "datapoint-", - "title": "Datapoint " + "id": "datapoint", + "title": "Datapoint" } }, "level": "h2", "level_title": "Datapoint" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-feedback-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-feedback", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/key-concepts", @@ -21111,19 +20247,19 @@ ], "authed": false, "type": "markdown", - "hash": "#feedback-", + "hash": "#feedback", "content": "Human feedback is crucial to help understand how your models are performing and to direct you in the ways to improve them.\nExplicit feedback these are purposeful actions to review the generations. For example, ‘thumbs up/down’ button presses.\nImplicit feedback – actions taken by your users may signal whether the generation was good or bad, for example, whether the user ‘copied’ the generation, ‘saved it’ or ‘dismissed it’ (which is negative feedback).\nYou can also have corrections as a feedback too.", "hierarchy": { "h2": { - "id": "feedback-", - "title": "Feedback " + "id": "feedback", + "title": "Feedback" } }, "level": "h2", "level_title": "Feedback" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-experiment-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-experiment", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/key-concepts", @@ -21146,19 +20282,19 @@ ], "authed": false, "type": "markdown", - "hash": "#experiment-", + "hash": "#experiment", "content": "Experiments help remove the guesswork from working with large language models. Experiments allow you to set up A/B test between multiple different model configs. This enables you to try out alternative prompts or models and use the feedback from your users to determine which works better.", "hierarchy": { "h2": { - "id": "experiment-", - "title": "Experiment " + "id": "experiment", + "title": "Experiment" } }, "level": "h2", "level_title": "Experiment" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-semantic-search-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.core-concepts.key-concepts-semantic-search", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/key-concepts", @@ -21181,12 +20317,12 @@ ], "authed": false, "type": "markdown", - "hash": "#semantic-search-", + "hash": "#semantic-search", "content": "Semantic search is an effective way to retrieve the most relevant information for a query from a large dataset of documents. The documents are typically split into small chunks of text that are stored as vector embeddings which are numerical representations for the meaning of text. Retrieval is carried out by first embedding the query and then using some measure of vector similarity to find the most similar embeddings from the dataset and return the associated chunks of text.", "hierarchy": { "h2": { - "id": "semantic-search-", - "title": "Semantic search " + "id": "semantic-search", + "title": "Semantic search" } }, "level": "h2", @@ -21217,11 +20353,10 @@ "authed": false, "type": "markdown", "description": "Example projects demonstrating usage of Humanloop for prompt management, observability, and evaluation.\nA growing collection of example projects demonstrating usage of Humanloop.", - "content": "Visit our Github examples repo for a collection of usage examples of Humanloop.", - "code_snippets": [] + "content": "Visit our Github examples repo for a collection of usage examples of Humanloop." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.examples.examples-contents-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.examples.examples-contents", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/examples", @@ -21244,12 +20379,12 @@ ], "authed": false, "type": "markdown", - "hash": "#contents-", + "hash": "#contents", "content": "Github Description SDK Chat Logging Tool Calling Streaming \nchatbot-starter An open-source AI chatbot app template built with Next.js, the Vercel AI SDK, OpenAI, and Humanloop. TypeScript ✔️ ✔️ ✔️ \nasap CLI assistant for solving dev issues in your projects or the command line. TypeScript ✔️ ✔️ ✔️", "hierarchy": { "h2": { - "id": "contents-", - "title": "Contents " + "id": "contents", + "title": "Contents" } }, "level": "h2", @@ -21280,11 +20415,10 @@ "authed": false, "type": "markdown", "description": "Humanloop supports all the major large language model providers, including OpenAI, Anthropic, Google, Azure, and more. Additionally, you can use your own custom models with with the API and still benefit from the Humanloop platform.", - "content": "Humanloop supports all the major large language model providers, including OpenAI, Anthropic, Google, Azure, and more. Additionally, you can use your own custom models with with the API and still benefit from the Humanloop platform.", - "code_snippets": [] + "content": "Humanloop supports all the major large language model providers, including OpenAI, Anthropic, Google, Azure, and more. Additionally, you can use your own custom models with with the API and still benefit from the Humanloop platform." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.supported-models-providers-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.supported-models-providers", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/supported-models", @@ -21307,19 +20441,19 @@ ], "authed": false, "type": "markdown", - "hash": "#providers-", + "hash": "#providers", "content": "Here is a summary of which providers are supported, and what information is available for each provider automatically.\nProvider Models Cost information Token information \nOpenAI ✅ ✅ ✅ \nAnthropic ✅ ✅ ✅ \nGoogle ✅ ✅ ✅ \nAzure ✅ ✅ ✅ \nCohere ✅ ✅ ✅ \nLlama ✅ \nGroq ✅ \nAWS Bedrock Anthropic, Llama \n\n| Custom | ✅ | User-defined | User-defined |\nAdding in more providers is driven by customer demand. If you have a specific provider or model you would like to see supported, please reach out to us at support@humanloop.com.", "hierarchy": { "h2": { - "id": "providers-", - "title": "Providers " + "id": "providers", + "title": "Providers" } }, "level": "h2", "level_title": "Providers" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.supported-models-models-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.supported-models-models", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/supported-models", @@ -21342,12 +20476,12 @@ ], "authed": false, "type": "markdown", - "hash": "#models-", + "hash": "#models", "content": "The following are models that are integrated with Humanloop. This means that they can be used in the Prompt Editor and are callable through the Humanloop API. If you have a specific model you would like to see supported, please reach out to us at support@humanloop.com.\n\n\nRemember, you can always use any model you want including your own self-hosted\nmodels, if you orchestrate the API calls yourself and log the data to\nHumanloop.\nProvider Model Max Prompt Tokens Max Output Tokens Cost per Prompt Token Cost per Output Token Tool Support Image Support \nopenai gpt-4o 128000 4096 $0.000005 $0.000015 ✅ ✅ \nopenai gpt-4o-mini 128000 4096 $0.00000015 $0.0000006 ✅ ✅ \nopenai gpt-4 8192 4096 $0.00003 $0.00006 ✅ ❌ \nopenai gpt-4-turbo 128000 4096 $0.00001 $0.00003 ✅ ✅ \nopenai gpt-4-turbo-2024-04-09 128000 4096 $0.00001 $0.00003 ✅ ❌ \nopenai gpt-4-32k 32768 4096 $0.00003 $0.00003 ✅ ❌ \nopenai gpt-4-1106-preview 128000 4096 $0.00001 $0.00003 ✅ ❌ \nopenai gpt-4-0125-preview 128000 4096 $0.00001 $0.00003 ✅ ❌ \nopenai gpt-4-vision 128000 4096 $0.00001 $0.00003 ✅ ✅ \nopenai gpt-4-1106-vision-preview 16385 4096 $0.0000015 $0.000002 ✅ ❌ \nopenai gpt-3.5-turbo 16385 4096 $0.0000015 $0.000002 ✅ ❌ \nopenai gpt-3.5-turbo-instruct 8192 4097 $0.0000015 $0.000002 ✅ ❌ \nopenai babbage-002 16384 16384 $0.0000004 $0.0000004 ✅ ❌ \nopenai davinci-002 16384 16384 $0.000002 $0.000002 ✅ ❌ \nopenai ft:gpt-3.5-turbo 4097 4096 $0.000003 $0.000006 ✅ ❌ \nopenai ft:davinci-002 16384 16384 $0.000002 $0.000002 ✅ ❌ \nopenai text-moderation 32768 32768 $0.000003 $0.000004 ✅ ❌ \nanthropic claude-3-5-sonnet-20240620 200000 4096 $0.000003 $0.000015 ✅ ✅ \nanthropic claude-3-opus-20240229 200000 4096 $0.000015 $0.000075 ✅ ❌ \nanthropic claude-3-sonnet-20240229 200000 4096 $0.000003 $0.000015 ✅ ❌ \nanthropic claude-3-haiku-20240307 200000 4096 $0.00000025 $0.00000125 ✅ ❌ \nanthropic claude-2.1 100000 4096 $0.00000025 $0.000024 ❌ ❌ \nanthropic claude-2 100000 4096 $0.000008 $0.000024 ❌ ❌ \nanthropic claude-instant-1.2 100000 4096 $0.000008 $0.000024 ❌ ❌ \nanthropic claude-instant-1 100000 4096 $0.0000008 $0.0000024 ❌ ❌ \ngoogle gemini-pro-vision 16384 2048 $0.00000025 $0.0000005 ❌ ✅ \ngoogle gemini-1.0-pro-vision 16384 2048 $0.00000025 $0.0000005 ❌ ✅ \ngoogle gemini-pro 32760 8192 $0.00000025 $0.0000005 ❌ ❌ \ngoogle gemini-1.0-pro 32760 8192 $0.00000025 $0.0000005 ❌ ❌ \ngoogle gemini-1.5-pro-latest 1000000 8192 $0.00000025 $0.0000005 ❌ ❌ \ngoogle gemini-1.5-pro 1000000 8192 $0.00000025 $0.0000005 ❌ ❌ \ngoogle gemini-experimental 1000000 8192 $0.00000025 $0.0000005 ❌ ❌ \nopenai_azure gpt-4o 128000 4096 $0.000005 $0.000015 ✅ ✅ \nopenai_azure gpt-4o-2024-05-13 128000 4096 $0.000005 $0.000015 ✅ ✅ \nopenai_azure gpt-4-turbo-2024-04-09 128000 4096 $0.00003 $0.00006 ✅ ✅ \nopenai_azure gpt-4 8192 4096 $0.00003 $0.00006 ✅ ❌ \nopenai_azure gpt-4-0314 8192 4096 $0.00003 $0.00006 ✅ ❌ \nopenai_azure gpt-4-32k 32768 4096 $0.00006 $0.00012 ✅ ❌ \nopenai_azure gpt-4-0125 128000 4096 $0.00001 $0.00003 ✅ ❌ \nopenai_azure gpt-4-1106 128000 4096 $0.00001 $0.00003 ✅ ❌ \nopenai_azure gpt-4-0613 8192 4096 $0.00003 $0.00006 ✅ ❌ \nopenai_azure gpt-4-turbo 128000 4096 $0.00001 $0.00003 ✅ ❌ \nopenai_azure gpt-4-turbo-vision 128000 4096 $0.000003 $0.000004 ✅ ✅ \nopenai_azure gpt-4-vision 128000 4096 $0.000003 $0.000004 ✅ ✅ \nopenai_azure gpt-35-turbo-1106 16384 4096 $0.0000015 $0.000002 ✅ ❌ \nopenai_azure gpt-35-turbo-0125 16384 4096 $0.0000005 $0.0000015 ✅ ❌ \nopenai_azure gpt-35-turbo-16k 16384 4096 $0.000003 $0.000004 ✅ ❌ \nopenai_azure gpt-35-turbo 4097 4096 $0.0000015 $0.000002 ✅ ❌ \nopenai_azure gpt-3.5-turbo-instruct 4097 4096 $0.0000015 $0.000002 ✅ ❌ \nopenai_azure gpt-35-turbo-instruct 4097 4097 $0.0000015 $0.000002 ✅ ❌ \ncohere command-r 128000 4000 $0.0000005 $0.0000015 ❌ ❌ \ncohere command-light 4096 4096 $0.000015 $0.000015 ❌ ❌ \ncohere command-r-plus 128000 4000 $0.000003 $0.000015 ❌ ❌ \ncohere command-nightly 4096 4096 $0.000015 $0.000015 ❌ ❌ \ncohere command 4096 4096 $0.000015 $0.000015 ❌ ❌ \ncohere command-medium-beta 4096 4096 $0.000015 $0.000015 ❌ ❌ \ncohere command-xlarge-beta 4096 4096 $0.000015 $0.000015 ❌ ❌ \ngroq mixtral-8x7b-32768 32768 32768 $0.0 $0.0 ❌ ❌ \ngroq llama3-8b-8192 8192 8192 $0.0 $0.0 ❌ ❌ \ngroq llama3-70b-8192 8192 8192 $0.0 $0.0 ❌ ❌ \ngroq llama2-70b-4096 4096 4096 $0.0 $0.0 ❌ ❌ \ngroq gemma-7b-it 8192 8192 $0.0 $0.0 ❌ ❌ \nreplicate llama-3-70b-instruct 8192 8192 $0.00000065 $0.00000275 ❌ ❌ \nreplicate llama-3-70b 8192 8192 $0.00000065 $0.00000275 ❌ ❌ \nreplicate llama-3-8b-instruct 8192 8192 $0.00000005 $0.00000025 ❌ ❌ \nreplicate llama-3-8b 8192 8192 $0.00000005 $0.00000025 ❌ ❌ \nreplicate llama-2-70b 4096 4096 $0.00003 $0.00006 ❌ ❌ \nreplicate llama70b-v2 4096 4096 N/A N/A ❌ ❌ \nreplicate mixtral-8x7b 4096 4096 N/A N/A ❌ ❌", "hierarchy": { "h2": { - "id": "models-", - "title": "Models " + "id": "models", + "title": "Models" } }, "level": "h2", @@ -21378,11 +20512,10 @@ "authed": false, "type": "markdown", "description": "Learn about the different roles and permissions in Humanloop to help you with prompt and data management for large language models.", - "content": "Everyone invited to the organization can access all projects currently (controlling project access coming soon).\nA user can be one of the following rolws:\nAdmin: The highest level of control. They can manage, modify, and oversee the organization's settings and have full functionality across all projects.\nDeveloper: (Enterprise tier only) Can deploy prompts, manage environments, create and add API keys, but lacks the ability to access billing or invite others.\nMember: (Enterprise tier only) The basic level of access. Can create and save prompts, run evaluations, but not deploy. Can not see any org-wide API keys.", - "code_snippets": [] + "content": "Everyone invited to the organization can access all projects currently (controlling project access coming soon).\nA user can be one of the following rolws:\nAdmin: The highest level of control. They can manage, modify, and oversee the organization's settings and have full functionality across all projects.\nDeveloper: (Enterprise tier only) Can deploy prompts, manage environments, create and add API keys, but lacks the ability to access billing or invite others.\nMember: (Enterprise tier only) The basic level of access. Can create and save prompts, run evaluations, but not deploy. Can not see any org-wide API keys." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.access-roles-rbacs-summary-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.access-roles-rbacs-summary", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/access-roles", @@ -21405,12 +20538,12 @@ ], "authed": false, "type": "markdown", - "hash": "#rbacs-summary-", + "hash": "#rbacs-summary", "content": "Here is the full breakdown of roles and access:\nAction Member Developer Admin \nCreate and manage Prompts ✔️ ✔️ ✔️ \nInspect logs and feedback ✔️ ✔️ ✔️ \nCreate and manage evaluators ✔️ ✔️ ✔️ \nRun evaluations ✔️ ✔️ ✔️ \nCreate and manage datasets ✔️ ✔️ ✔️ \nCreate and manage API keys ✔️ ✔️ \nManage prompt deployments ✔️ ✔️ \nCreate and manage environments ✔️ ✔️ \nSend invites ✔️ \nSet user roles ✔️ \nManage billing ✔️ \nChange organization settings ✔️", "hierarchy": { "h2": { - "id": "rbacs-summary-", - "title": "RBACs summary " + "id": "rbacs-summary", + "title": "RBACs summary" } }, "level": "h2", @@ -21441,11 +20574,10 @@ "authed": false, "type": "markdown", "description": "The .prompt file format is a human-readable and version-control-friendly format for storing model configurations.\nOur file format for serialising prompts to store alongside your source code.", - "content": "Our .prompt file format is a serialized version of a model config that is designed to be human-readable and suitable for checking into your version control systems alongside your code.", - "code_snippets": [] + "content": "Our .prompt file format is a serialized version of a model config that is designed to be human-readable and suitable for checking into your version control systems alongside your code." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.prompt-file-format-format-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.prompt-file-format-format", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/prompt-file-format", @@ -21468,19 +20600,19 @@ ], "authed": false, "type": "markdown", - "hash": "#format-", + "hash": "#format", "content": "The .prompt file is heavily inspired by MDX, with model and hyperparameters specified in a YAML header alongside a JSX-inspired format for your Chat Template.", "hierarchy": { "h2": { - "id": "format-", - "title": "Format " + "id": "format", + "title": "Format" } }, "level": "h2", "level_title": "Format" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.prompt-file-format-basic-examples-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.prompt-file-format-basic-examples", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/prompt-file-format", @@ -21503,8 +20635,7 @@ ], "authed": false, "type": "markdown", - "hash": "#basic-examples-", - "content": "", + "hash": "#basic-examples", "code_snippets": [ { "lang": "jsx", @@ -21529,19 +20660,19 @@ ], "hierarchy": { "h2": { - "id": "basic-examples-", - "title": "Basic examples " + "id": "basic-examples", + "title": "Basic examples" }, "h3": { - "id": "basic-examples-", - "title": "Basic examples " + "id": "basic-examples", + "title": "Basic examples" } }, "level": "h3", "level_title": "Basic examples" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.prompt-file-format-multi-modality-and-images-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.prompt-file-format-multi-modality-and-images", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/prompt-file-format", @@ -21564,7 +20695,7 @@ ], "authed": false, "type": "markdown", - "hash": "#multi-modality-and-images-", + "hash": "#multi-modality-and-images", "content": "Images can be specified using nested tags within a message. To specify text alongside the image, use a tag.", "code_snippets": [ { @@ -21575,19 +20706,19 @@ ], "hierarchy": { "h2": { - "id": "multi-modality-and-images-", - "title": "Multi-modality and Images " + "id": "multi-modality-and-images", + "title": "Multi-modality and Images" }, "h3": { - "id": "multi-modality-and-images-", - "title": "Multi-modality and Images " + "id": "multi-modality-and-images", + "title": "Multi-modality and Images" } }, "level": "h3", "level_title": "Multi-modality and Images" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.prompt-file-format-tools-tool-calls-and-tool-responses-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.prompt-file-format-tools-tool-calls-and-tool-responses", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/prompt-file-format", @@ -21610,7 +20741,7 @@ ], "authed": false, "type": "markdown", - "hash": "#tools-tool-calls-and-tool-responses-", + "hash": "#tools-tool-calls-and-tool-responses", "content": "Specify the tools available to the model as a JSON list in the YAML header.\nTool calls in assistant messages can be added with nested tags. A tag within an tag denotes a tool call of type: \"function\", and requires the attributes name and id. The text wrapped in a tag should be a JSON-formatted string containing the tool call's arguments.\nTool call responses can then be added with tags after the message.", "code_snippets": [ { @@ -21623,12 +20754,12 @@ ], "hierarchy": { "h2": { - "id": "tools-tool-calls-and-tool-responses-", - "title": "Tools, tool calls and tool responses " + "id": "tools-tool-calls-and-tool-responses", + "title": "Tools, tool calls and tool responses" }, "h3": { - "id": "tools-tool-calls-and-tool-responses-", - "title": "Tools, tool calls and tool responses " + "id": "tools-tool-calls-and-tool-responses", + "title": "Tools, tool calls and tool responses" } }, "level": "h3", @@ -21659,11 +20790,10 @@ "authed": false, "type": "markdown", "description": "Reference our Postman Workspace for examples of how to interact with the Humanloop API directly.\nA companion to our API references.", - "content": "In our various guides we assumed the use of our Python SDK. There are some use cases where this is not appropriate. For example, if you are integrating Humanloop from a non-Python backend, such as Node.js, or using a no-or-low-code builder such as Bubble or Zapier. In these cases, you can leverage our RESTful APIs directly.\nTo help with direct API integrations, we maintain a Postman Workspace with various worked examples for the main endpoints you will need.", - "code_snippets": [] + "content": "In our various guides we assumed the use of our Python SDK. There are some use cases where this is not appropriate. For example, if you are integrating Humanloop from a non-Python backend, such as Node.js, or using a no-or-low-code builder such as Bubble or Zapier. In these cases, you can leverage our RESTful APIs directly.\nTo help with direct API integrations, we maintain a Postman Workspace with various worked examples for the main endpoints you will need." }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.postman-workspace-prerequisites-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.postman-workspace-prerequisites", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/postman-workspace", @@ -21686,19 +20816,19 @@ ], "authed": false, "type": "markdown", - "hash": "#prerequisites-", + "hash": "#prerequisites", "content": "A Humanloop account. If you don't have one, you can create an account now by going to the Sign up page.", "hierarchy": { "h2": { - "id": "prerequisites-", - "title": "Prerequisites " + "id": "prerequisites", + "title": "Prerequisites" } }, "level": "h2", "level_title": "Prerequisites" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.postman-workspace-set-your-api-keys-in-postman-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.postman-workspace-set-your-api-keys-in-postman", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/postman-workspace", @@ -21721,19 +20851,19 @@ ], "authed": false, "type": "markdown", - "hash": "#set-your-api-keys-in-postman-", + "hash": "#set-your-api-keys-in-postman", "content": "Navigate to your Humanloop profile page and copy your Humanloop API key.\n\nNavigate to our Postman Workspace and set the environment to Production in the dropdown in the top right where it says No Environment\n\nSelect the Environment quick look button beside the environment dropdown and paste your Humanloop API key into the CURRENT VALUE of the user_api_key variable:\n\n\n\n\nNavigate to your OpenAI profile and copy the API key.\n\nNavigate back to our Postman Workspace and paste your OpenAI key into the CURRENT VALUE of the global open_ai_key variable:\n\n\n\n\nYou are now all set to use Postman to interact with the APIs with real examples!", "hierarchy": { "h2": { - "id": "set-your-api-keys-in-postman-", - "title": "Set your API keys in Postman " + "id": "set-your-api-keys-in-postman", + "title": "Set your API keys in Postman" } }, "level": "h2", "level_title": "Set your API keys in Postman" }, { - "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.postman-workspace-try-out-the-postman-collections-", + "objectID": "humanloop:humanloop.com:root..v4.uv.docs.docs.references.postman-workspace-try-out-the-postman-collections", "org_id": "humanloop", "domain": "humanloop.com", "pathname": "/docs/v4/postman-workspace", @@ -21756,12 +20886,12 @@ ], "authed": false, "type": "markdown", - "hash": "#try-out-the-postman-collections-", + "hash": "#try-out-the-postman-collections", "content": "A collection is a set of executable API specifications that are grouped together in Postman.\nThere are 4 executable collections provided to check out.\nThe Chat collection is the best place to start to get a project setup and sending chat messages. To try it out:\nExpand the V4 Chat collection on the left hand side.\n\nSelect Create chat sending model-config from the list\n\nExecute the POST calls in order from top to bottom by selecting them under the collection on the left hand side and pressing the Send button on the right hand side. You should see the resulting response body appearing in the box below the request body.\nTry editing the request body and resending - you can reference the corresponding API guides for a full spec of the request schema.\n\n\n\n\n\n\nIf you now navigate to your Humanloop projects page, you will see a new project called assistant with logged data.\n\nYou can now generate populated code snippets across a range of languages by selecting the code icon on the right hand side beside the request and response bodies:", "hierarchy": { "h2": { - "id": "try-out-the-postman-collections-", - "title": "Try out the Postman Collections " + "id": "try-out-the-postman-collections", + "title": "Try out the Postman Collections" } }, "level": "h2", diff --git a/packages/ui/fern-docs-search-server/src/algolia/__test__/__snapshots__/hume.json b/packages/ui/fern-docs-search-server/src/algolia/__test__/__snapshots__/hume.json new file mode 100644 index 0000000000..b0e91d8ae4 --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/algolia/__test__/__snapshots__/hume.json @@ -0,0 +1,6735 @@ +[ + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.introduction.welcome-to-hume-ai", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/intro", + "page_title": "Welcome to Hume AI", + "breadcrumb": [ + { + "title": "Introduction", + "pathname": "/docs/introduction" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "description": "Hume AI builds AI models that enable technology to communicate with empathy and learn to make people happy.", + "content": "Hume AI builds AI models that enable technology to communicate with empathy and learn to make people happy.\nSo much of human communication—in-person, text, audio, or video—is shaped by emotional expression. These cues allow us to attend to each other's well-being. Our platform provides the APIs needed to ensure that technology, too, is guided by empathy and the pursuit of human well-being." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.introduction.welcome-to-hume-ai-empathic-voice-interface-api", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/intro", + "page_title": "Welcome to Hume AI", + "breadcrumb": [ + { + "title": "Introduction", + "pathname": "/docs/introduction" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#empathic-voice-interface-api", + "content": "Hume's Empathic Voice Interface (EVI) is the world's first emotionally intelligent voice AI. It is the only API that measures nuanced vocal modulations and responds to them using an empathic large language model (eLLM), which guides language and speech generation. Trained on millions of human interactions, our eLLM unites language modeling and text-to-speech with better EQ, prosody, end-of-turn detection, interruptibility, and alignment.", + "hierarchy": { + "h3": { + "id": "empathic-voice-interface-api", + "title": "Empathic Voice Interface API" + } + }, + "level": "h3", + "level_title": "Empathic Voice Interface API" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.introduction.welcome-to-hume-ai-expression-measurement-api", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/intro", + "page_title": "Welcome to Hume AI", + "breadcrumb": [ + { + "title": "Introduction", + "pathname": "/docs/introduction" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#expression-measurement-api", + "content": "Hume's state-of-the-art expression measurement models for the voice, face, and language are built on 10+ years of research and advances in semantic space theory pioneered by Alan Cowen. Our expression measurement models are able to capture hundreds of dimensions of human expression in audio, video, and images.", + "hierarchy": { + "h3": { + "id": "expression-measurement-api", + "title": "Expression Measurement API" + } + }, + "level": "h3", + "level_title": "Expression Measurement API" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.introduction.welcome-to-hume-ai-custom-models-api", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/intro", + "page_title": "Welcome to Hume AI", + "breadcrumb": [ + { + "title": "Introduction", + "pathname": "/docs/introduction" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#custom-models-api", + "content": "Our Custom Models API builds on our expression measurement models and state-of-the-art eLLMs to bring custom insights to your application. Developed using transfer learning from our expression measurement models and eLLMs, our Custom Models API can predict almost any outcome more accurately than language alone, whether it's toxicity, depressed mood, driver drowsiness, or any other metric important to your users.", + "hierarchy": { + "h3": { + "id": "custom-models-api", + "title": "Custom Models API" + } + }, + "level": "h3", + "level_title": "Custom Models API" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.introduction.welcome-to-hume-ai-api-reference", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/intro", + "page_title": "Welcome to Hume AI", + "breadcrumb": [ + { + "title": "Introduction", + "pathname": "/docs/introduction" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#api-reference", + "content": "API that measures nuanced vocal modulations and responds to them using an\nempathic large language model\n\n\nMeasure facial, vocal, and linguistic expressions\n\n\nPredict almost any outcome more accurately than language alone", + "hierarchy": { + "h2": { + "id": "api-reference", + "title": "API Reference" + } + }, + "level": "h2", + "level_title": "API Reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.introduction.welcome-to-hume-ai-get-support", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/intro", + "page_title": "Welcome to Hume AI", + "breadcrumb": [ + { + "title": "Introduction", + "pathname": "/docs/introduction" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#get-support", + "content": "If you have questions or run into challenges, we're here to help!\n\n\n\n\nJoin our Discord for answers to any technical questions", + "hierarchy": { + "h2": { + "id": "get-support", + "title": "Get support" + } + }, + "level": "h2", + "level_title": "Get support" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.introduction.api-key", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/introduction/api-key", + "page_title": "Quickstart tutorial", + "breadcrumb": [ + { + "title": "Introduction", + "pathname": "/docs/introduction" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "description": "Learn how to get started with Hume in just a few minutes.", + "content": "Sign in to Hume.\nNavigate to the API Keys page.\nCopy your API key.\n\n\nAPI key\n\n\nYour API key is a random sequence of letters and numbers.\nIt should look something like ntylOFypHLRXMmjlTxljoecAnMgB30JtOLZC2nph1TYErCvv" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.introduction.support", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/support", + "page_title": "Support", + "breadcrumb": [ + { + "title": "Introduction", + "pathname": "/docs/introduction" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "description": "Get help from the team at Hume" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.introduction.support-discord", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/support", + "page_title": "Support", + "breadcrumb": [ + { + "title": "Introduction", + "pathname": "/docs/introduction" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#discord", + "content": "Join our Discord for answers to any technical questions.", + "hierarchy": { + "h2": { + "id": "discord", + "title": "Discord" + } + }, + "level": "h2", + "level_title": "Discord" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.introduction.support-legal", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/support", + "page_title": "Support", + "breadcrumb": [ + { + "title": "Introduction", + "pathname": "/docs/introduction" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#legal", + "content": "Contact legal@hume.ai for legal and data privacy inquires.", + "hierarchy": { + "h2": { + "id": "legal", + "title": "Legal" + } + }, + "level": "h2", + "level_title": "Legal" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.introduction.support-billing", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/support", + "page_title": "Support", + "breadcrumb": [ + { + "title": "Introduction", + "pathname": "/docs/introduction" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#billing", + "content": "Email billing@hume.ai for any questions or concerns about billing.", + "hierarchy": { + "h2": { + "id": "billing", + "title": "Billing" + } + }, + "level": "h2", + "level_title": "Billing" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.introduction.support-contact-us", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/support", + "page_title": "Support", + "breadcrumb": [ + { + "title": "Introduction", + "pathname": "/docs/introduction" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#contact-us", + "content": "For all other inquires, see hume.ai/contact.", + "hierarchy": { + "h2": { + "id": "contact-us", + "title": "Contact us" + } + }, + "level": "h2", + "level_title": "Contact us" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.overview", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/overview", + "page_title": "Empathic Voice Interface (EVI)", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "description": "Hume's Empathic Voice Interface (EVI) is the world’s first emotionally intelligent voice AI.", + "content": "Hume's Empathic Voice Interface (EVI) is the world’s first emotionally intelligent voice AI. It accepts live audio input and returns both generated audio and transcripts augmented with measures of vocal expression. By processing the tune, rhythm, and timbre of speech, EVI unlocks a variety of new capabilities, like knowing when to speak and generating more empathic language with the right tone of voice. These features enable smoother and more satisfying voice-based interactions between humans and AI, opening new possibilities for personal AI, customer service, accessibility, robotics, immersive gaming, VR experiences, and much more.\nWe provide a suite of tools to integrate and customize EVI for your application, including a WebSocket API that handles audio and text transport, a REST API, and SDKs for TypeScript and Python to simplify integration into web and Python-based projects. Additionally, we provide open-source examples and a web widget as practical starting points for developers to explore and implement EVI's capabilities within their own projects." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.overview-building-with-evi", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/overview", + "page_title": "Overview", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#building-with-evi", + "content": "The main way to work with EVI is through a WebSocket connection that sends audio and receives responses in real-time. This enables fluid, bidirectional dialogue where users speak, EVI listens and analyzes their expressions, and EVI generates emotionally intelligent responses.\nYou start a conversation by connecting to the WebSocket and streaming the user’s voice input to EVI. You can also send EVI text, and it will speak that text aloud.\nEVI will respond with:\nThe text of EVI’s reply\n\nEVI’s expressive audio response\n\nA transcript of the user's message along with their vocal expression measures\n\nMessages if the user interrupts EVI\n\nA message to let you know if EVI has finished responding\n\nError messages if issues arise", + "hierarchy": { + "h2": { + "id": "building-with-evi", + "title": "Building with EVI" + } + }, + "level": "h2", + "level_title": "Building with EVI" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.overview-overview-of-evi-features", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/overview", + "page_title": "Overview", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#overview-of-evi-features", + "content": "Basic capabilities\n\nTranscribes speech (ASR)\n\nFast and accurate ASR in partnership with Deepgram returns a full transcript of the conversation, with Hume’s\nexpression measures tied to each sentence.\n\n\n\n\nGenerates language responses (LLM)\n\nRapid language generation with our eLLM, blended seamlessly with configurable partner APIs (OpenAI, Anthropic,\nFireworks).\n\n\n\n\nGenerates voice responses (TTS)\n\nStreaming speech generation via our proprietary expressive text-to-speech model.\n\n\n\nResponds with low latency\n\nImmediate response provided by the fastest models running together on one service.\n\n\n\n Empathic AI (eLLM) features\n\nResponds at the right time\n\nUses your tone of voice for state-of-the-art end-of-turn detection — the true bottleneck to responding rapidly\nwithout interrupting you.\n\n\n\n\nUnderstands users’ prosody\n\nProvides streaming measurements of the tune, rhythm, and timbre of the user’s speech using Hume’s\n\n\nprosody model, integrated with our eLLM.\n\n\n\n\nForms its own natural tone of voice\n\nGuided by the users’ prosody and language, our model responds with an empathic, naturalistic tone of voice,\nmatching the users’ nuanced “vibe” (calmness, interest, excitement, etc.). It responds to frustration with an\napologetic tone, to sadness with sympathy, and more.\n\n\n\n\nResponds to expression\n\nPowered by our empathic large language model (eLLM), EVI crafts responses that are not just intelligent but\nattuned to what the user is expressing with their voice.\n\n\n\n\nAlways interruptible\n\nStops rapidly whenever users interject, listens, and responds with the right context based on where it left off.\n\n\n\n\nAligned with well-being\n\nTrained on human reactions to optimize for positive expressions like happiness and satisfaction. EVI will\ncontinue to learn from users’ reactions using our upcoming fine-tuning endpoint.\n\n\n\n\n Developer tools\n\nWebSocket API\n\nPrimary interface for real-time bidirectional interaction with EVI, handles audio and text transport.\n\n\n\nREST API \n\nA configuration API that allows developers to customize their EVI - the system prompt, speaking rate, voice,\nLLM, tools the EVI can use, and other options. The system prompt shapes an EVI’s behavior and its responses.\n\n\n\n\nTypeScript SDK\n\nEncapsulates complexities of audio and WebSockets for seamless integration into web applications.\n\n\n\nPython SDK\n\nSimplifies the process of integrating EVI into any Python-based project.\n\n\n\nOpen source examples\n\nExample repositories provide a starting point for developers and demonstrate EVI's capabilities.\n\n\n\nWeb widget \n\nAn iframe widget that any developer can easily embed in their website, allowing users to speak to a\nconversational AI voice about your content.", + "hierarchy": { + "h2": { + "id": "overview-of-evi-features", + "title": "Overview of EVI features" + } + }, + "level": "h2", + "level_title": "Overview of EVI features" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.overview-api-limits", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/overview", + "page_title": "Overview", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#api-limits", + "content": "Request rate limit: limited to fifty (50) requests per second.\n\nPayload size limit: messages cannot exceed 16MB in size.\n\nWebSocket connections limit: limited to up to two (2) concurrent connections.\n\nWebSocket duration limit: connections are subject to a timeout after thirty (30) minutes of activity, or after one (1) minute of inactivity.\n\n\n\n\nTo request an increase in your concurrent connection limit, please submit the \"Application to Increase EVI Concurrent Connections\" found in the EVI section of the Profile Tab.", + "hierarchy": { + "h2": { + "id": "api-limits", + "title": "API limits" + } + }, + "level": "h2", + "level_title": "API limits" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.overview-authentication", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/overview", + "page_title": "Overview", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#authentication", + "content": "The Empathic Voice Interface (EVI) supports two authentication strategies:\nOAuth strategy: this strategy is tailored for client-side development. It involves an additional step of obtaining an access token by generating a client ID and making an API request to fetch the access token. This extra step adds a layer of security to ensure your API key does not get exposed.\n\nAPI key strategy: designed for server-side development, this strategy allows developers to establish an authenticated WebSocket connection directly using their API key. This eliminates the need for an additional access token request.\n\n\nUsing either strategy, establishing an authenticated connection requires that you specify the authentication strategy and supply the corresponding key in the request parameters of the EVI WebSocket endpoint. See step-by-step instructions for obtaining an access token below:\n\n\nObtain API keys\nYour API key and client secret can both be accessed from the Portal:\nSign in to Hume\n\nNavigate to the API Keys page\n\n\nFetch access token\nUsing your API key and client secret, a client ID can now be generated. To generate your client ID you'll need to concatenate your API key and client secret, separated by a colon (:), then base64 encode the string. With your client ID you can now initiate a POST request to https://api.hume.ai/oauth2-cc/token to receive your access token.\n\n\n\n\n\n\nYour access token can now be used to establish an authenticated WebSocket connection.", + "code_snippets": [ + { + "lang": "sh", + "code": "# Configuration variables\napiKey=\"${API_KEY}\" # Sourced from environment variable or secure store\nclientSecret=\"${CLIENT_SECRET}\" # Sourced from environment variable or secure store\n# Base64 encode API Key and Client Secret\nclientId=$(echo -n \"$apiKey:$clientSecret\" | base64)\n# Perform the API request\nresponse=$(curl -s --location 'https://api.hume.ai/oauth2-cc/token' \\\n --header 'Content-Type: application/x-www-form-urlencoded' \\\n --header \"Authorization: Basic $clientId\" \\\n --data-urlencode 'grant_type=client_credentials')" + }, + { + "lang": "sh", + "code": "# Configuration variables\napiKey=\"${API_KEY}\" # Sourced from environment variable or secure store\nclientSecret=\"${CLIENT_SECRET}\" # Sourced from environment variable or secure store\n# Base64 encode API Key and Client Secret\nclientId=$(echo -n \"$apiKey:$clientSecret\" | base64)\n# Perform the API request\nresponse=$(curl -s --location 'https://api.hume.ai/oauth2-cc/token' \\\n --header 'Content-Type: application/x-www-form-urlencoded' \\\n --header \"Authorization: Basic $clientId\" \\\n --data-urlencode 'grant_type=client_credentials')" + }, + { + "lang": "typescript", + "code": "import { Hume, HumeClient } from 'hume';\n/**\n * When using the TypeScript SDK, the client ID is generated and \n * the access token is retrieved and applied automatically. Simply \n * provide your API key and Client Secret when instantiating the \n * Hume client.\n */\nconst client = new HumeClient({\n apiKey: ,\n clientSecret: ,\n});" + }, + { + "lang": "typescript", + "code": "import { Hume, HumeClient } from 'hume';\n/**\n * When using the TypeScript SDK, the client ID is generated and \n * the access token is retrieved and applied automatically. Simply \n * provide your API key and Client Secret when instantiating the \n * Hume client.\n */\nconst client = new HumeClient({\n apiKey: ,\n clientSecret: ,\n});" + } + ], + "hierarchy": { + "h2": { + "id": "authentication", + "title": "Authentication" + } + }, + "level": "h2", + "level_title": "Authentication" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.quickstart", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/quickstart", + "page_title": "Quickstart", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "content": "This quickstart guide outlines the process of implementing the Empathic Voice Interface (EVI).\nSelect a language below to get started:\n\n\n\n\nThis tutorial utilizes Hume's TypeScript SDK to consume the Empathic Voice Interface, and can be broken down into five key components: authentication,\nestablishing a secure WebSocket connection, capturing the audio input, and playing back the audio output. To see this code fully implemented within a\nfrontend web application, visit the Github repo here: hume-evi-typescript-example.\n\n\nAuthenticate\nIn order to establish an authenticated connection we will first need to instantiate the Hume client with our API key and Client Secret.\nThese keys can be obtained by logging into the portal and visiting the API keys page.\n\n\nIn the sample code below, the API key and client secret have been saved to\nenvironment variables. Avoid hard coding these values in your project to\nprevent them from being leaked.\n\n\nWhen using our Typescript SDK, the Access Token necessary to establish an authenticated connection with EVI is fetched and applied under the hood\nafter the Hume client is instantiated with your credientials.\nConnect\nWith the Hume client instantiated with our credentials, we can now establish an authenticated WebSocket connection with EVI and define our WebSocket event handlers.\nFor now we will include placeholder event handlers to be updated in later steps.\n\n\nAudio input\nTo capture audio and send it through the socket as an audio input, several steps are necessary. First, we need to handle user permissions\nto access the microphone. Next, we'll use the Media Stream API to capture the audio, and the MediaRecorder API to record the captured audio.\nWe then base64 encode the recording audio Blob, and finally send the encoded audio through the WebSocket using the sendAudioInputmethod.\n\n\n\n\nAccepted audio formats include: mp3, wav, aac, ogg, flac, webm,\navr, cdda, cvs/vms, aiff, au, amr, mp2, mp4, ac3, avi,\nwmv, mpeg, ircam.\nAudio output\nThe response will comprise multiple messages, detailed as follows:\nuser_message: This message encapsulates the transcription of the audio input. Additionally, it\nincludes expression measurement predictions related to the speaker's vocal prosody.\n\nassistant_message: For every sentence within the response, an AssistantMessage is dispatched.\nThis message not only relays the content of the response but also features predictions regarding the\nexpressive qualities of the generated audio response.\n\naudio_output: Accompanying each AssistantMessage, an AudioOutput message will be provided.\nThis contains the actual audio (binary) response corresponding to an AssistantMessage.\n\nassistant_end: Signifying the conclusion of the response to the audio input, an AssistantEnd\nmessage is delivered as the final piece of the communication.\n\n\nHere we will focus on playing the received audio output. To play the audio output from the response we\nneed to define our logic for converting the received binary to a Blob, and creating an HTMLAudioInput\nto play the audio. We then need to update the client's on message WebSocket event handler to invoke\nthe logic to playback the audio when receiving the audio output. To manage playback for the incoming\naudio here we'll implement a queue and sequentially play the audio back.\n\n\nInterrupt\nInterruptibility is a distinguishing feature of the Empathic Voice Interface. If an audio input is sent\nthrough the websocket while receiving response messages for a previous audio input, the response to\nthe previous audio input will stop being sent. Additionally the interface will send back a\nuser_interruption message, and begin responding to the new audio input.\n\n\n\n\nThis tutorial utilizes Hume's React SDK to consume the Empathic Voice Interface, and can be broken down into two key components: authentication and configuring the context provider. To see this code fully implemented within a frontend web application using the App Router from Next.js, visit this GitHub repository: evi-nextjs-app-router\n\n\nPrerequisites\nBefore you begin, you will need to have an existing Next.js project set up using the App Router.\nAuthenticate\nIn order to make an authenticated connection we will first need to generate an access token. Doing so will\nrequire your API key and client secret. These keys can be obtained by logging into the portal and visiting the\nAPI keys page.\n\n\nIn the sample code below, the API key and client secret have been saved to\nenvironment variables. Avoid hard coding these values in your project to\nprevent them from being leaked.\n\n\nSetup Context Provider\nAfter fetching our access token we can pass it to our ClientComponent. First we set up the VoiceProvider so that our Messages and Controls components can access the context. We also pass the access token to the VoiceProvider's auth prop for setting up the websocket connection.\n\n\nAudio input\n will handle the microphone and playback logic.\nStarting session\nIn order to start a session, you can use the connect function. It is important that this event is attached to a user interaction event (like a click) so that the browser is capable of playing Audio.\n\n\nDisplaying message history\nTo display the message history, we can use the useVoice hook to access the messages array. We can then map over the messages array to display the role (Assistant or User) and content of each message.\n\n\nInterrupt\nUnlike the TypeScript example, the Next.js example does not require additional code to handle interruptions. The VoiceProvider handles this automatically.\n\n\nThis tutorial utilizes Hume's React SDK to consume the Empathic Voice Interface, and can be broken down into two key components: authentication and configuring the context provider. This tutorial utilizes Hume's React SDK to consume the Empathic Voice Interface, and can be broken down into two key components: authentication and configuring the context provider. To see this code fully implemented within a frontend web application using the Pages Router from Next.js, visit this GitHub repository: evi-nextjs-pages-router\n\n\nPrerequisites\nBefore you begin, you will need to have an existing Next.js project set up using the Pages Router.\nAuthenticate and Setup Context Provider\nIn order to make an authenticated connection we will first need to generate an access token. Doing so will\nrequire your API key and client secret. These keys can be obtained by logging into the portal and visiting the\nAPI keys page.\n\n\nIn the sample code below, the API key and client secret have been saved to\nenvironment variables. Avoid hard coding these values in your project to\nprevent them from being leaked.\n\n\nAudio input\n is designed to manage microphone inputs and audio playback. It abstracts the complexities of audio processing to allow developers to focus on developing interactive voice-driven functionalities. For a closer look at how processes audio inputs and controls playback, you can view the source code here.\nStarting session\nIn order to start a session, you can use the connect function. It is important that this event is attached to a user interaction event (like a click) so that the browser is capable of playing Audio.\n\n\nDisplaying message history\nTo display the message history, we can use the useVoice hook to access the messages array. We can then map over the messages array to display the role (Assistant or User) and content of each message.\n\n\nInterrupt\nUnlike the TypeScript example, the Next.js example does not require additional code to handle interruptions. The VoiceProvider handles this automatically by pushing audio messages to a playback queue and cancelling audio playback when an interruption message is received.\n\n\nThis is a simplified example of streaming a session with EVI using your device's microphone. To see this code fully implemented with complete instructions, visit the GitHub repository: evi-python-example\nPython versions 3.9, 3.10, and 3.11 are supported. To use the basic functionality of HumeVoiceClient, HumeBatchClient or HumeStreamClient, there are no additional system dependencies. However, using the audio playback functionality of the EVI MicrophoneInterface may require a few extra dependencies depending on your operating system.\nThe Python SDK is currently supported on Mac and Linux, and not yet on Windows.\n\n\nWe recommend using a virtual environment like venv in Python to manage project-specific dependencies without affecting the global Python installation. This helps avoid version conflicts between packages and makes it easier to replicate and troubleshoot projects across different systems. See the section \"Setting up a virtual environment\" in the repository.\nTo use microphone functionality in the MicrophoneInterface as shown below, run:\nFor audio playback, install dependencies with the following commands:\n\n\n\n\n\n\n\n\nLet's walk through the steps, or you can jump down to the complete code snippet under \"Putting it all together.\"\n\n\nIn the sample code below, the Hume API key is hard-coded; this is to make this guide as simple as possible. In practice, do not hard code these values in your project to prevent them from being leaked. See the section \"Authenticate and Connect\" in the repository for instructions on using environment variables to prevent accidental exposure of your credentials.\n\n\nImport libraries\nFirst we import the required Hume libraries and asyncio for asynchronous functions calls.\n\n\nAuthenticate and Connect\nThe Python SDK uses a Hume API key to authenticate. These keys can be obtained by logging into the portal and visiting the API keys page. Replace the placeholder \"HUME_API_KEY\" with your Hume API key.\n\n\nOptional: Specify device\nYou can specify your microphone device using the device parameter. See Optional: Specify device in the repository for details on how to list your audio devices and manually set one for EVI.\nExecute\nInitialize, execute, and manage the lifecycle of the event loop in the asyncio-based application, making sure that the main() coroutine runs effectively and that the application shuts down cleanly after the coroutine finishes executing.\n\n\nPutting it all together\nHere is the complete code from the steps above to run this example. Keep in mind that in practice you should use an environment variable to store the Hume API key, as is done in the evi-python-example repository.", + "code_snippets": [ + { + "lang": "typescript", + "code": "import { Hume, HumeClient } from 'hume';\n\n// instantiate the Hume client and authenticate\nconst client = new HumeClient({\n apiKey: import.meta.env.HUME_API_KEY,\n clientSecret: import.meta.env.HUME_CLIENT_SECRET,\n});" + }, + { + "lang": "typescript", + "code": "import { Hume, HumeClient } from 'hume';\n\n// instantiate the Hume client and authenticate\nconst client = new HumeClient({\n apiKey: import.meta.env.HUME_API_KEY,\n clientSecret: import.meta.env.HUME_CLIENT_SECRET,\n});" + }, + { + "lang": "typescript", + "code": "import { Hume, HumeClient } from 'hume';\n\n// instantiate the Hume client and authenticate\nconst client = new HumeClient({\n apiKey: import.meta.env.HUME_API_KEY,\n clientSecret: import.meta.env.HUME_CLIENT_SECRET,\n});\n\n// instantiates WebSocket and establishes an authenticated connection\nconst socket = await client.empathicVoice.chat.connect({\n onOpen: () => {\n console.log('WebSocket connection opened');\n },\n onMessage: (message) => {\n console.log(message);\n },\n onError: (error) => {\n console.error(error);\n },\n onClose: () => {\n console.log('WebSocket connection closed');\n }\n});" + }, + { + "lang": "typescript", + "code": "import { Hume, HumeClient } from 'hume';\n\n// instantiate the Hume client and authenticate\nconst client = new HumeClient({\n apiKey: import.meta.env.HUME_API_KEY,\n clientSecret: import.meta.env.HUME_CLIENT_SECRET,\n});\n\n// instantiates WebSocket and establishes an authenticated connection\nconst socket = await client.empathicVoice.chat.connect({\n onOpen: () => {\n console.log('WebSocket connection opened');\n },\n onMessage: (message) => {\n console.log(message);\n },\n onError: (error) => {\n console.error(error);\n },\n onClose: () => {\n console.log('WebSocket connection closed');\n }\n});" + }, + { + "lang": "typescript", + "code": "import {\n convertBlobToBase64,\n ensureSingleValidAudioTrack,\n getAudioStream,\n} from 'hume';\n\n// the recorder responsible for recording the audio stream to be prepared as the audio input\nlet recorder: MediaRecorder | null = null;\n// the stream of audio captured from the user's microphone\nlet audioStream: MediaStream | null = null;\n\n// define function for capturing audio\nasync function captureAudio(): Promise {\n // prompts user for permission to capture audio, obtains media stream upon approval\n audioStream = await getAudioStream();\n // ensure there is only one audio track in the stream\n ensureSingleValidAudioTrack(audioStream);\n // instantiate the media recorder\n recorder = new MediaRecorder(audioStream, { mimeType });\n // callback for when recorded chunk is available to be processed\n recorder.ondataavailable = async ({ data }) => {\n // IF size of data is smaller than 1 byte then do nothing\n if (data.size < 1) return;\n // base64 encode audio data\n const encodedAudioData = await convertBlobToBase64(data);\n // define the audio_input message JSON\n const audioInput: Omit = {\n data: encodedAudioData,\n };\n // send audio_input message\n socket?.sendAudioInput(audioInput);\n };\n // capture audio input at a rate of 100ms (recommended)\n const timeSlice = 100;\n recorder.start(timeSlice);\n}\n\n// define a WebSocket open event handler to capture audio\nasync function handleWebSocketOpenEvent(): Promise {\n // place logic here which you would like invoked when the socket opens\n console.log('Web socket connection opened');\n await captureAudio();\n}" + }, + { + "lang": "typescript", + "code": "import {\n convertBlobToBase64,\n ensureSingleValidAudioTrack,\n getAudioStream,\n} from 'hume';\n\n// the recorder responsible for recording the audio stream to be prepared as the audio input\nlet recorder: MediaRecorder | null = null;\n// the stream of audio captured from the user's microphone\nlet audioStream: MediaStream | null = null;\n\n// define function for capturing audio\nasync function captureAudio(): Promise {\n // prompts user for permission to capture audio, obtains media stream upon approval\n audioStream = await getAudioStream();\n // ensure there is only one audio track in the stream\n ensureSingleValidAudioTrack(audioStream);\n // instantiate the media recorder\n recorder = new MediaRecorder(audioStream, { mimeType });\n // callback for when recorded chunk is available to be processed\n recorder.ondataavailable = async ({ data }) => {\n // IF size of data is smaller than 1 byte then do nothing\n if (data.size < 1) return;\n // base64 encode audio data\n const encodedAudioData = await convertBlobToBase64(data);\n // define the audio_input message JSON\n const audioInput: Omit = {\n data: encodedAudioData,\n };\n // send audio_input message\n socket?.sendAudioInput(audioInput);\n };\n // capture audio input at a rate of 100ms (recommended)\n const timeSlice = 100;\n recorder.start(timeSlice);\n}\n\n// define a WebSocket open event handler to capture audio\nasync function handleWebSocketOpenEvent(): Promise {\n // place logic here which you would like invoked when the socket opens\n console.log('Web socket connection opened');\n await captureAudio();\n}" + }, + { + "lang": "typescript", + "code": "import { \n convertBase64ToBlob,\n getBrowserSupportedMimeType\n} from 'hume';\n\n// audio playback queue\nconst audioQueue: Blob[] = [];\n// flag which denotes whether audio is currently playing or not\nlet isPlaying = false;\n// the current audio element to be played\nlet currentAudio: : HTMLAudioElement | null = null;\n// mime type supported by the browser the application is running in\nconst mimeType: MimeType = (() => {\n const result = getBrowserSupportedMimeType();\n return result.success ? result.mimeType : MimeType.WEBM;\n})();\n\n// play the audio within the playback queue, converting each Blob into playable HTMLAudioElements\nfunction playAudio(): void {\n // IF there is nothing in the audioQueue OR audio is currently playing then do nothing\n if (!audioQueue.length || isPlaying) return;\n // update isPlaying state\n isPlaying = true;\n // pull next audio output from the queue\n const audioBlob = audioQueue.shift();\n // IF audioBlob is unexpectedly undefined then do nothing\n if (!audioBlob) return;\n // converts Blob to AudioElement for playback\n const audioUrl = URL.createObjectURL(audioBlob);\n currentAudio = new Audio(audioUrl);\n // play audio\n currentAudio.play();\n // callback for when audio finishes playing\n currentAudio.onended = () => {\n // update isPlaying state\n isPlaying = false;\n // attempt to pull next audio output from queue\n if (audioQueue.length) playAudio();\n };\n}\n\n// define a WebSocket message event handler to play audio output\nfunction handleWebSocketMessageEvent(\n message: Hume.empathicVoice.SubscribeEvent\n): void {\n // place logic here which you would like to invoke when receiving a message through the socket\n switch (message.type) {\n // add received audio to the playback queue, and play next audio output\n case 'audio_output':\n // convert base64 encoded audio to a Blob\n const audioOutput = message.data;\n const blob = convertBase64ToBlob(audioOutput, mimeType);\n // add audio Blob to audioQueue\n audioQueue.push(blob);\n // play the next audio output\n if (audioQueue.length === 1) playAudio();\n break;\n }\n}" + }, + { + "lang": "typescript", + "code": "import { \n convertBase64ToBlob,\n getBrowserSupportedMimeType\n} from 'hume';\n\n// audio playback queue\nconst audioQueue: Blob[] = [];\n// flag which denotes whether audio is currently playing or not\nlet isPlaying = false;\n// the current audio element to be played\nlet currentAudio: : HTMLAudioElement | null = null;\n// mime type supported by the browser the application is running in\nconst mimeType: MimeType = (() => {\n const result = getBrowserSupportedMimeType();\n return result.success ? result.mimeType : MimeType.WEBM;\n})();\n\n// play the audio within the playback queue, converting each Blob into playable HTMLAudioElements\nfunction playAudio(): void {\n // IF there is nothing in the audioQueue OR audio is currently playing then do nothing\n if (!audioQueue.length || isPlaying) return;\n // update isPlaying state\n isPlaying = true;\n // pull next audio output from the queue\n const audioBlob = audioQueue.shift();\n // IF audioBlob is unexpectedly undefined then do nothing\n if (!audioBlob) return;\n // converts Blob to AudioElement for playback\n const audioUrl = URL.createObjectURL(audioBlob);\n currentAudio = new Audio(audioUrl);\n // play audio\n currentAudio.play();\n // callback for when audio finishes playing\n currentAudio.onended = () => {\n // update isPlaying state\n isPlaying = false;\n // attempt to pull next audio output from queue\n if (audioQueue.length) playAudio();\n };\n}\n\n// define a WebSocket message event handler to play audio output\nfunction handleWebSocketMessageEvent(\n message: Hume.empathicVoice.SubscribeEvent\n): void {\n // place logic here which you would like to invoke when receiving a message through the socket\n switch (message.type) {\n // add received audio to the playback queue, and play next audio output\n case 'audio_output':\n // convert base64 encoded audio to a Blob\n const audioOutput = message.data;\n const blob = convertBase64ToBlob(audioOutput, mimeType);\n // add audio Blob to audioQueue\n audioQueue.push(blob);\n // play the next audio output\n if (audioQueue.length === 1) playAudio();\n break;\n }\n}" + }, + { + "lang": "typescript", + "code": "// function for stopping the audio and clearing the queue\nfunction stopAudio(): void {\n // stop the audio playback\n currentAudio?.pause();\n currentAudio = null;\n // update audio playback state\n isPlaying = false;\n // clear the audioQueue\n audioQueue.length = 0;\n}\n\n// update WebSocket message event handler to handle interruption\nfunction handleWebSocketMessageEvent(\n message: Hume.empathicVoice.SubscribeEvent\n): void {\n // place logic here which you would like to invoke when receiving a message through the socket\n switch (message.type) {\n // add received audio to the playback queue, and play next audio output\n case 'audio_output':\n // convert base64 encoded audio to a Blob\n const audioOutput = message.data;\n const blob = convertBase64ToBlob(audioOutput, mimeType);\n // add audio Blob to audioQueue\n audioQueue.push(blob);\n // play the next audio output\n if (audioQueue.length === 1) playAudio();\n break;\n // stop audio playback, clear audio playback queue, and update audio playback state on interrupt\n case 'user_interruption':\n stopAudio();\n break;\n }\n}" + }, + { + "lang": "typescript", + "code": "// function for stopping the audio and clearing the queue\nfunction stopAudio(): void {\n // stop the audio playback\n currentAudio?.pause();\n currentAudio = null;\n // update audio playback state\n isPlaying = false;\n // clear the audioQueue\n audioQueue.length = 0;\n}\n\n// update WebSocket message event handler to handle interruption\nfunction handleWebSocketMessageEvent(\n message: Hume.empathicVoice.SubscribeEvent\n): void {\n // place logic here which you would like to invoke when receiving a message through the socket\n switch (message.type) {\n // add received audio to the playback queue, and play next audio output\n case 'audio_output':\n // convert base64 encoded audio to a Blob\n const audioOutput = message.data;\n const blob = convertBase64ToBlob(audioOutput, mimeType);\n // add audio Blob to audioQueue\n audioQueue.push(blob);\n // play the next audio output\n if (audioQueue.length === 1) playAudio();\n break;\n // stop audio playback, clear audio playback queue, and update audio playback state on interrupt\n case 'user_interruption':\n stopAudio();\n break;\n }\n}" + }, + { + "lang": "typescript", + "code": "// ./app/page.tsx\nimport ClientComponent from \"@/components/ClientComponent\";\nimport { fetchAccessToken } from \"@humeai/voice\";\n\nexport default async function Page() {\n const accessToken = await fetchAccessToken({\n apiKey: String(process.env.HUME_API_KEY),\n clientSecret: String(process.env.HUME_CLIENT_SECRET),\n });\n\n if (!accessToken) {\n throw new Error();\n }\n\n return ;\n}" + }, + { + "lang": "typescript", + "code": "// ./app/page.tsx\nimport ClientComponent from \"@/components/ClientComponent\";\nimport { fetchAccessToken } from \"@humeai/voice\";\n\nexport default async function Page() {\n const accessToken = await fetchAccessToken({\n apiKey: String(process.env.HUME_API_KEY),\n clientSecret: String(process.env.HUME_CLIENT_SECRET),\n });\n\n if (!accessToken) {\n throw new Error();\n }\n\n return ;\n}" + }, + { + "lang": "typescript", + "code": "// ./components/ClientComponent.tsx \n\"use client\";\nimport { VoiceProvider } from \"@humeai/voice-react\";\nimport Messages from \"./Controls\";\nimport Controls from \"./Messages\";\n\nexport default function ClientComponent({\n accessToken,\n}: {\n accessToken: string;\n}) {\n return (\n \n \n \n \n );\n}" + }, + { + "lang": "typescript", + "code": "// ./components/ClientComponent.tsx \n\"use client\";\nimport { VoiceProvider } from \"@humeai/voice-react\";\nimport Messages from \"./Controls\";\nimport Controls from \"./Messages\";\n\nexport default function ClientComponent({\n accessToken,\n}: {\n accessToken: string;\n}) {\n return (\n \n \n \n \n );\n}" + }, + { + "lang": "typescript", + "code": "// ./components/Controls.tsx\n\"use client\";\nimport { useVoice, VoiceReadyState } from \"@humeai/voice-react\";\nexport default function Controls() {\n const { connect, disconnect, readyState } = useVoice();\n\n if (readyState === VoiceReadyState.OPEN) {\n return (\n {\n disconnect();\n }}\n >\n End Session\n \n );\n }\n\n return (\n {\n connect()\n .then(() => {\n /* handle success */\n })\n .catch(() => {\n /* handle error */\n });\n }}\n >\n Start Session\n \n );\n}" + }, + { + "lang": "typescript", + "code": "// ./components/Controls.tsx\n\"use client\";\nimport { useVoice, VoiceReadyState } from \"@humeai/voice-react\";\nexport default function Controls() {\n const { connect, disconnect, readyState } = useVoice();\n\n if (readyState === VoiceReadyState.OPEN) {\n return (\n {\n disconnect();\n }}\n >\n End Session\n \n );\n }\n\n return (\n {\n connect()\n .then(() => {\n /* handle success */\n })\n .catch(() => {\n /* handle error */\n });\n }}\n >\n Start Session\n \n );\n}" + }, + { + "lang": "typescript", + "code": "// ./components/Messages.tsx\n\"use client\";\nimport { useVoice } from \"@humeai/voice-react\";\n\nexport default function Messages() {\n const { messages } = useVoice();\n\n return (\n
\n {messages.map((msg, index) => {\n if (msg.type === \"user_message\" || msg.type === \"assistant_message\") {\n return (\n
\n
{msg.message.role}
\n
{msg.message.content}
\n
\n );\n }\n\n return null;\n })}\n
\n );\n}" + }, + { + "lang": "typescript", + "code": "// ./components/Messages.tsx\n\"use client\";\nimport { useVoice } from \"@humeai/voice-react\";\n\nexport default function Messages() {\n const { messages } = useVoice();\n\n return (\n
\n {messages.map((msg, index) => {\n if (msg.type === \"user_message\" || msg.type === \"assistant_message\") {\n return (\n
\n
{msg.message.role}
\n
{msg.message.content}
\n
\n );\n }\n\n return null;\n })}\n
\n );\n}" + }, + { + "lang": "typescript", + "code": "// ./pages/index.tsx\nimport Controls from \"@/components/Controls\";\nimport Messages from \"@/components/Messages\";\nimport { fetchAccessToken } from \"@humeai/voice\";\nimport { VoiceProvider } from \"@humeai/voice-react\";\nimport { InferGetServerSidePropsType } from \"next\";\n\nexport const getServerSideProps = async () => {\n const accessToken = await fetchAccessToken({\n apiKey: String(process.env.HUME_API_KEY),\n clientSecret: String(process.env.HUME_CLIENT_SECRET),\n });\n\n if (!accessToken) {\n return {\n redirect: {\n destination: \"/error\",\n permanent: false,\n },\n };\n }\n\n return {\n props: {\n accessToken,\n },\n };\n};\n\ntype PageProps = InferGetServerSidePropsType;\n\nexport default function Page({ accessToken }: PageProps) {\n return (\n \n \n \n \n );\n}" + }, + { + "lang": "typescript", + "code": "// ./pages/index.tsx\nimport Controls from \"@/components/Controls\";\nimport Messages from \"@/components/Messages\";\nimport { fetchAccessToken } from \"@humeai/voice\";\nimport { VoiceProvider } from \"@humeai/voice-react\";\nimport { InferGetServerSidePropsType } from \"next\";\n\nexport const getServerSideProps = async () => {\n const accessToken = await fetchAccessToken({\n apiKey: String(process.env.HUME_API_KEY),\n clientSecret: String(process.env.HUME_CLIENT_SECRET),\n });\n\n if (!accessToken) {\n return {\n redirect: {\n destination: \"/error\",\n permanent: false,\n },\n };\n }\n\n return {\n props: {\n accessToken,\n },\n };\n};\n\ntype PageProps = InferGetServerSidePropsType;\n\nexport default function Page({ accessToken }: PageProps) {\n return (\n \n \n \n \n );\n}" + }, + { + "lang": "typescript", + "code": "// ./components/Controls.tsx\nimport { useVoice, VoiceReadyState } from \"@humeai/voice-react\";\nexport default function Controls() {\n const { connect, disconnect, readyState } = useVoice();\n\n if (readyState === VoiceReadyState.OPEN) {\n return (\n {\n disconnect();\n }}\n >\n End Session\n \n );\n }\n\n return (\n {\n connect()\n .then(() => {\n /* handle success */\n })\n .catch(() => {\n /* handle error */\n });\n }}\n >\n Start Session\n \n );\n}" + }, + { + "lang": "typescript", + "code": "// ./components/Controls.tsx\nimport { useVoice, VoiceReadyState } from \"@humeai/voice-react\";\nexport default function Controls() {\n const { connect, disconnect, readyState } = useVoice();\n\n if (readyState === VoiceReadyState.OPEN) {\n return (\n {\n disconnect();\n }}\n >\n End Session\n \n );\n }\n\n return (\n {\n connect()\n .then(() => {\n /* handle success */\n })\n .catch(() => {\n /* handle error */\n });\n }}\n >\n Start Session\n \n );\n}" + }, + { + "lang": "typescript", + "code": "// ./components/Messages.tsx\nimport { useVoice } from \"@humeai/voice-react\";\n\nexport default function Messages() {\n const { messages } = useVoice();\n\n return (\n
\n {messages.map((msg, index) => {\n if (msg.type === \"user_message\" || msg.type === \"assistant_message\") {\n return (\n
\n
{msg.message.role}
\n
{msg.message.content}
\n
\n );\n }\n\n return null;\n })}\n
\n );\n}" + }, + { + "lang": "typescript", + "code": "// ./components/Messages.tsx\nimport { useVoice } from \"@humeai/voice-react\";\n\nexport default function Messages() {\n const { messages } = useVoice();\n\n return (\n
\n {messages.map((msg, index) => {\n if (msg.type === \"user_message\" || msg.type === \"assistant_message\") {\n return (\n
\n
{msg.message.role}
\n
{msg.message.content}
\n
\n );\n }\n\n return null;\n })}\n
\n );\n}" + }, + { + "lang": "bash", + "code": "pip install \"hume[microphone]\"" + }, + { + "lang": "bash", + "code": "brew update\nbrew upgrade\nbrew install ffmpeg" + }, + { + "lang": "bash", + "code": "brew update\nbrew upgrade\nbrew install ffmpeg" + }, + { + "lang": "bash", + "code": "sudo apt-get --yes update\nsudo apt-get --yes install libasound2-dev libportaudio2 ffmpeg" + }, + { + "lang": "bash", + "code": "sudo apt-get --yes update\nsudo apt-get --yes install libasound2-dev libportaudio2 ffmpeg" + }, + { + "lang": "text", + "code": "Not yet supported" + }, + { + "lang": "text", + "code": "Not yet supported" + }, + { + "lang": "python", + "code": "import asyncio\nfrom hume import HumeVoiceClient, MicrophoneInterface" + }, + { + "lang": "python", + "code": "import asyncio\nfrom hume import HumeVoiceClient, MicrophoneInterface" + }, + { + "lang": "python", + "code": "async def main() -> None:\n # Paste your Hume API key here.\n HUME_API_KEY = \"HUME_API_KEY\"\n # Connect and authenticate with Hume\n client = HumeVoiceClient(HUME_API_KEY)\n\n # Start streaming EVI over your device's microphone and speakers\n async with client.connect() as socket:\n await MicrophoneInterface.start(socket)\n" + }, + { + "lang": "python", + "code": "async def main() -> None:\n # Paste your Hume API key here.\n HUME_API_KEY = \"HUME_API_KEY\"\n # Connect and authenticate with Hume\n client = HumeVoiceClient(HUME_API_KEY)\n\n # Start streaming EVI over your device's microphone and speakers\n async with client.connect() as socket:\n await MicrophoneInterface.start(socket)\n" + }, + { + "lang": "python", + "code": "asyncio.run(main())" + }, + { + "lang": "python", + "code": "asyncio.run(main())" + }, + { + "lang": "python", + "code": "import asyncio\nfrom hume import HumeVoiceClient, MicrophoneInterface\n\nasync def main() -> None:\n # Paste your Hume API key here\n HUME_API_KEY = \"HUME_API_KEY\"\n # Connect and authenticate with Hume\n client = HumeVoiceClient(HUME_API_KEY)\n\n # Start streaming EVI over your device's microphone and speakers \n async with client.connect() as socket:\n await MicrophoneInterface.start(socket)\nasyncio.run(main())" + }, + { + "lang": "python", + "code": "import asyncio\nfrom hume import HumeVoiceClient, MicrophoneInterface\n\nasync def main() -> None:\n # Paste your Hume API key here\n HUME_API_KEY = \"HUME_API_KEY\"\n # Connect and authenticate with Hume\n client = HumeVoiceClient(HUME_API_KEY)\n\n # Start streaming EVI over your device's microphone and speakers \n async with client.connect() as socket:\n await MicrophoneInterface.start(socket)\nasyncio.run(main())" + }, + { + "lang": "bash", + "code": "pip install \"hume[microphone]\"" + } + ] + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.configuration", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/configuration", + "page_title": "Configuring EVI", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "description": "Guide to configuring the Empathic Voice Interface (EVI)", + "content": "The Empathic Voice Interface (EVI) is designed to be highly configurable, allowing developers to customize the interface to align with their specific requirements.\nConfiguration of EVI can be managed through two primary methods: an EVI configuration and session settings." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.configuration-evi-configuration", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/configuration", + "page_title": "Configuration", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#evi-configuration", + "content": "EVI configuration options affect the behavior and capabilities of the interface, and include the following configuration options:\nSystem prompt: Set the system prompt text to provide instructions and context that guide how EVI should respond.\n\nLanguage model: Select a language model that best fits your application’s needs. For details on incorporating your own language model, refer to our guide\non using your own language model.\n\nVoice: Select a voice for EVI from a growing list of available options.\n\nTools: Choose user-created tools or built-in tools for EVI to use during conversations. For details on creating tools and adding them to your configuration,\nsee our guide on tool use.\n\n\n\n\nConfigs, as well as system prompts, tools, and language models, are versioned. This versioning system supports iterative development, allowing you to progressively refine\nconfigurations and revert to previous versions if needed.\nSee instructions below for creating an EVI configuration through the Portal.\n\n\nCreate a configuration\nIn the playground navigate to the Voice Configurations page. Click the Create configuration button to begin.\n\n\nVoice configurations page\nSpecify the name of the configuration, a description, a system prompt, a voice, and click the Create button to create your new configuration.\n\n\nCreate your configuration\n\n\nFor guidance on engineering your system prompt, see our prompting guide.\nTest the configuration\nThe newly created configuration can now be tested. From the Voice Config details page, click Run in playground to test it out.\n\n\nConfiguration details page\nOnce in the Voice Playground, click Start Call to connect to EVI with your configuration.\n\n\nVoice playground\nApply the configuration\nOnce you have created an EVI configuration, you can apply it to your conversations with EVI through the API. This involves including the config_id in\nthe query parameters of your connection request. You can find the config ID associated with your newly created configuration, on the\nVoice Configurations page.\n\n\nConfiguration ID\nSee the sample code below which showcases how to apply your configuration:", + "code_snippets": [ + { + "lang": "typescript", + "code": "import { Hume, HumeClient } from 'hume';\n// instantiate the Hume client\nconst client = new HumeClient({\n apiKey: ,\n clientSecret: ,\n});\n// instantiate WebSocket connection with specified EVI config\nconst socket = await client.empathicVoice.chat.connect({\n configId: // specify config ID here\n});" + }, + { + "lang": "typescript", + "code": "import { Hume, HumeClient } from 'hume';\n// instantiate the Hume client\nconst client = new HumeClient({\n apiKey: ,\n clientSecret: ,\n});\n// instantiate WebSocket connection with specified EVI config\nconst socket = await client.empathicVoice.chat.connect({\n configId: // specify config ID here\n});" + }, + { + "lang": "python", + "code": "from hume import HumeVoiceClient, VoiceConfig\n# Retrieve the Hume API key from the environment variables\nHUME_API_KEY = os.getenv(\"HUME_API_KEY\")\n# Connect and authenticate with Hume\nclient = HumeVoiceClient(HUME_API_KEY)\n# Establish a connection with EVI with your configuration by passing\n# the config_id as an argument to the connect method\nasync with client.connect(config_id=\"\") as socket:\n await MicrophoneInterface.start(socket)" + }, + { + "lang": "python", + "code": "from hume import HumeVoiceClient, VoiceConfig\n# Retrieve the Hume API key from the environment variables\nHUME_API_KEY = os.getenv(\"HUME_API_KEY\")\n# Connect and authenticate with Hume\nclient = HumeVoiceClient(HUME_API_KEY)\n# Establish a connection with EVI with your configuration by passing\n# the config_id as an argument to the connect method\nasync with client.connect(config_id=\"\") as socket:\n await MicrophoneInterface.start(socket)" + } + ], + "hierarchy": { + "h2": { + "id": "evi-configuration", + "title": "EVI configuration" + } + }, + "level": "h2", + "level_title": "EVI configuration" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.configuration-session-settings", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/configuration", + "page_title": "Configuration", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#session-settings", + "content": "EVI configurations are persistent and version-controlled. In contrast, session settings are temporary and apply only to the current session, such as\nmicrophone settings. These parameters can be adjusted dynamically based on the requirements of each session to ensure optimal performance and user experience.\n\n\nRefer to the API reference for detailed descriptions of the various system settings options.\nUpdating the session settings is only a requirement when the audio input is encoded in PCM Linear 16. If this is the case, be sure to send the following SessionSettingsMessage prior to sending an audio input:", + "code_snippets": [ + { + "lang": "json", + "code": "{\n \"type\": \"session_settings\",\n \"audio\": {\n \"channels\": 1,\n \"encoding\": \"linear16\",\n \"sample_rate\": 48000\n }\n}" + }, + { + "lang": "json", + "code": "{\n \"type\": \"session_settings\",\n \"audio\": {\n \"channels\": 1,\n \"encoding\": \"linear16\",\n \"sample_rate\": 48000\n }\n}" + } + ], + "hierarchy": { + "h2": { + "id": "session-settings", + "title": "Session settings" + } + }, + "level": "h2", + "level_title": "Session settings" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.tool-use", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/tool-use", + "page_title": "Tool use", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "description": "Guide to using function calling with the Empathic Voice Interface (EVI).", + "content": "EVI simplifies the integration of external APIs through function calling. Developers can integrate custom functions that are invoked dynamically based on the user’s\ninput, enabling more useful conversations. There are two key concepts for using function calling with EVI, Tools and Configurations (Configs):\nTools are resources that EVI uses to do things, like search the web or call external APIs. For example, tools can check the weather, update databases, schedule appointments, or take\nactions based on what occurs in the conversation. While the tools can be user-defined, Hume also offers natively implemented tools, like web search, which are labeled as “built-in” tools.\n\nConfigurations enable developers to customize an EVI’s behavior and incorporate these custom tools. Setting up an EVI configuration allows developers to seamlessly integrate\ntheir tools into the voice interface. A configuration includes prompts, user-defined tools, and other settings.\n\n\n\n\nCurrently, our function calling feature only supports\nOpenAI models.\nFunction calling is not available if you are using your own custom language\nmodel. We plan to\nsupport more function calling LLMs in the future.\nThe focus of this guide is on creating a Tool and a Configuration that allows EVI to use the Tool. Additionally, this guide details the message flow of function calls within a\nsession, and outlines the expected responses when function calls fail. Refer to our Configuration Guide for detailed,\nstep-by-step instructions on how to create and use an EVI Configuration.\n\n\nExplore this sample\nproject\nfor an example of how Tool use could be implemented in practice." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.tool-use-setup", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/tool-use", + "page_title": "Tool use", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#setup", + "content": "For EVI to leverage tools or call functions, a configuration must be created with the tool’s definition. Our step-by-step guide below walks you through creating a tool and a configuration.\n\n\nCreate a Tool\nWe will first create a Tool with a specified function. In this case, we will create a tool for getting the weather. Create this tool by making a POST request to\n/tools with the following request body:\n\n\n\n\nThe parameters field must contain a valid JSON schema.\n\n\nRecord the value in the id field, as we will use it to specify the newly created Tool in the next step.\nCreate a Configuration\nNext we will create an EVI Configuration called Weather Assistant Config, and include the created Tool by making a POST request to /configs with the\nfollowing request body:\n\n\n\n\n\n\nEnsure your tool definitions conform to the language model's schema. The\nspecified language model will be the one to execute the function calls.", + "code_snippets": [ + { + "lang": "json", + "code": "{\n \"name\": \"get_current_weather\",\n \"version_description\": \"Fetches current weather and uses celsius or fahrenheit based on user's location.\",\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n}" + }, + { + "lang": "json", + "code": "{\n \"name\": \"get_current_weather\",\n \"version_description\": \"Fetches current weather and uses celsius or fahrenheit based on user's location.\",\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n}" + }, + { + "lang": "json", + "code": "{\n \"tool_type\": \"FUNCTION\",\n \"id\": \"15c38b04-ec9c-4ae2-b6bc-5603512b5d00\",\n \"version\": 0,\n \"version_description\": \"Fetches current weather and uses celsius or fahrenheit based on user's location.\",\n \"name\": \"get_current_weather\",\n \"created_on\": 1714421925626,\n \"modified_on\": 1714421925626,\n \"fallback_content\": null,\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n}" + }, + { + "lang": "json", + "code": "{\n \"tool_type\": \"FUNCTION\",\n \"id\": \"15c38b04-ec9c-4ae2-b6bc-5603512b5d00\",\n \"version\": 0,\n \"version_description\": \"Fetches current weather and uses celsius or fahrenheit based on user's location.\",\n \"name\": \"get_current_weather\",\n \"created_on\": 1714421925626,\n \"modified_on\": 1714421925626,\n \"fallback_content\": null,\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n}" + }, + { + "lang": "json", + "code": "{\n \"name\": \"Weather Assistant Config\",\n \"language_model\": {\n \"model_provider\": \"OPEN_AI\",\n \"model_resource\": \"gpt-3.5-turbo\",\n \"temperature\": null\n },\n \"tools\": [\n {\n \"id\": \"15c38b04-ec9c-4ae2-b6bc-5603512b5d00\",\n \"version\": 0\n }\n ]\n}" + }, + { + "lang": "json", + "code": "{\n \"name\": \"Weather Assistant Config\",\n \"language_model\": {\n \"model_provider\": \"OPEN_AI\",\n \"model_resource\": \"gpt-3.5-turbo\",\n \"temperature\": null\n },\n \"tools\": [\n {\n \"id\": \"15c38b04-ec9c-4ae2-b6bc-5603512b5d00\",\n \"version\": 0\n }\n ]\n}" + }, + { + "lang": "json", + "code": "{\n \"id\": \"87e88a1a-3768-4a01-ba54-2e6d247a00a7\",\n \"version\": 0,\n \"version_description\": null,\n \"name\": \"Weather Assistant Config\",\n \"created_on\": 1714421581844,\n \"modified_on\": 1714421581844,\n \"prompt\": null,\n \"voice\": null,\n \"language_model\": {\n \"model_provider\": \"OPEN_AI\",\n \"model_resource\": \"gpt-3.5-turbo\",\n \"temperature\": null\n },\n \"tools\": [\n {\n \"tool_type\": \"FUNCTION\",\n \"id\": \"15c38b04-ec9c-4ae2-b6bc-5603512b5d00\",\n \"version\": 0,\n \"version_description\": \"Fetches current weather and uses celsius or fahrenheit based on user's location.\",\n \"name\": \"get_current_weather\",\n \"created_on\": 1714421925626,\n \"modified_on\": 1714421925626,\n \"fallback_content\": null,\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n }\n ],\n \"builtin_tools\": []\n}" + }, + { + "lang": "json", + "code": "{\n \"id\": \"87e88a1a-3768-4a01-ba54-2e6d247a00a7\",\n \"version\": 0,\n \"version_description\": null,\n \"name\": \"Weather Assistant Config\",\n \"created_on\": 1714421581844,\n \"modified_on\": 1714421581844,\n \"prompt\": null,\n \"voice\": null,\n \"language_model\": {\n \"model_provider\": \"OPEN_AI\",\n \"model_resource\": \"gpt-3.5-turbo\",\n \"temperature\": null\n },\n \"tools\": [\n {\n \"tool_type\": \"FUNCTION\",\n \"id\": \"15c38b04-ec9c-4ae2-b6bc-5603512b5d00\",\n \"version\": 0,\n \"version_description\": \"Fetches current weather and uses celsius or fahrenheit based on user's location.\",\n \"name\": \"get_current_weather\",\n \"created_on\": 1714421925626,\n \"modified_on\": 1714421925626,\n \"fallback_content\": null,\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n }\n ],\n \"builtin_tools\": []\n}" + } + ], + "hierarchy": { + "h2": { + "id": "setup", + "title": "Setup" + } + }, + "level": "h2", + "level_title": "Setup" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.tool-use-function-calling", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/tool-use", + "page_title": "Tool use", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#function-calling", + "content": "In this section we will go over the end-to-end flow of a function call within a chat session. This flow will be predicated on having specified the\nWeather Assistant Config when establishing a connection with EVI. See our Configuration Guide\nfor details on how to apply your configuration when connecting.\n\n\nCurrently, EVI does not support parallel function calling. Only one function\ncall can be processed at a time.\n\n\nInvoke function call\nWith EVI configured to use the get_current_weather Tool, we can now ask it: \"what is the weather in New York?\" We can expect EVI to respond with a user_message and a tool_call message:\n\n\n\n\nNext, extract the tool_call_id from the tool_call message to be used in the next step. Then, you will need to pass the parameters from the tool_call\nmessage to your function to retrieve the weather for the designated city in the specified format.\n\n\nWhile EVI will send a message to indicate when to invoke your function and\nwhich parameters to pass into it, you will need to define the function itself\nin your code. For the sake of this example, you can define a function which\nactually calls a weather API, or simply hard code a return value like: 60F.\nSend function call result\nUpon receiving the response from your function, we will then send a tool_response message containing the result. The specified tool_call_id should match the one received in\nthe tool_call message in the previous step.\n\n\nEVI responds\nAfter the interface receives the tool_response message, it will then send an assistant_message containing the response generated from the reported result of the function call:", + "code_snippets": [ + { + "lang": "json", + "code": "{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"What's the weather in New York?\"\n },\n // ...etc\n}" + }, + { + "lang": "json", + "code": "{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"What's the weather in New York?\"\n },\n // ...etc\n}" + }, + { + "lang": "json", + "code": "{\n \"type\": \"tool_call\",\n \"tool_type\": \"function\",\n \"response_required\": true,\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"name\": \"get_current_weather\",\n \"parameters\": \"{\\\"location\\\":\\\"New York\\\",\\\"format\\\":\\\"fahrenheit\\\"}\"\n}" + }, + { + "lang": "json", + "code": "{\n \"type\": \"tool_call\",\n \"tool_type\": \"function\",\n \"response_required\": true,\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"name\": \"get_current_weather\",\n \"parameters\": \"{\\\"location\\\":\\\"New York\\\",\\\"format\\\":\\\"fahrenheit\\\"}\"\n}" + }, + { + "lang": "json", + "code": "{\n \"type\": \"tool_response\",\n \"tool_call_id\":\"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"content\":\"60F\"\n}" + }, + { + "lang": "json", + "code": "{\n \"type\": \"tool_response\",\n \"tool_call_id\":\"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"content\":\"60F\"\n}" + }, + { + "lang": "json", + "code": "{\n \"type\": \"assistant_message\",\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"The current weather in New York is 60F.\"\n }\n}" + }, + { + "lang": "json", + "code": "{\n \"type\": \"assistant_message\",\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"The current weather in New York is 60F.\"\n }\n}" + } + ], + "hierarchy": { + "h2": { + "id": "function-calling", + "title": "Function calling" + } + }, + "level": "h2", + "level_title": "Function calling" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.tool-use-using-built-in-tools", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/tool-use", + "page_title": "Tool use", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#using-built-in-tools", + "content": "User-defined tools allow EVI to identify when a function should be invoked, but you implement the function itself. On the other hand, Hume also provides built-in tools that are natively integrated. This\nmeans that you don't need to define the function; EVI handles both determining when the function needs to be called and invoking it.\nOne such example of a built-in tool we provide is Web search. Web search equips EVI with the ability to search the web for up-to-date information.\nThis section explains how to specify built-in tools in your configurations and details the message flow you can expect when EVI uses a built-in tool during a chat session.\n\n\nSpecify built-in tool in EVI configuration\nLet's begin by creating a configuration which includes the built-in web search tool by making a POST request to /configs with the following request body:\n\n\n\n\nEVI uses built-in tool\nNow that we've created an EVI configuration which includes the built-in web search tool, let's review the message flow for when web search is invoked.", + "code_snippets": [ + { + "lang": "json", + "code": "{\n \"name\": \"Web Search Config\",\n \"language_model\": {\n \"model_provider\": \"OPEN_AI\",\n \"model_resource\": \"gpt-3.5-turbo\"\n },\n \"builtin_tools\": [\n { \n \"name\": \"web_search\",\n \"fallback_content\": \"Optional fallback content to inform EVI’s spoken response if web search is not successful.\"\n }\n ]\n}" + }, + { + "lang": "json", + "code": "{\n \"name\": \"Web Search Config\",\n \"language_model\": {\n \"model_provider\": \"OPEN_AI\",\n \"model_resource\": \"gpt-3.5-turbo\"\n },\n \"builtin_tools\": [\n { \n \"name\": \"web_search\",\n \"fallback_content\": \"Optional fallback content to inform EVI’s spoken response if web search is not successful.\"\n }\n ]\n}" + }, + { + "lang": "json", + "code": "{\n \"id\": \"3a60e85c-d04f-4eb5-8076-fb4bd344d5d0\",\n \"version\": 0,\n \"version_description\": null,\n \"name\": \"Web Search Config\",\n \"created_on\": 1714421925626,\n \"modified_on\": 1714421925626,\n \"prompt\": null,\n \"voice\": null,\n \"language_model\": {\n \"model_provider\": \"OPEN_AI\",\n \"model_resource\": \"gpt-3.5-turbo\",\n \"temperature\": null\n },\n \"tools\": [],\n \"builtin_tools\": [\n {\n \"tool_type\": \"BUILTIN\",\n \"name\": \"web_search\",\n \"fallback_content\": \"Optional fallback content to inform EVI’s spoken response if web search is not successful.\"\n }\n ]\n}" + }, + { + "lang": "json", + "code": "{\n \"id\": \"3a60e85c-d04f-4eb5-8076-fb4bd344d5d0\",\n \"version\": 0,\n \"version_description\": null,\n \"name\": \"Web Search Config\",\n \"created_on\": 1714421925626,\n \"modified_on\": 1714421925626,\n \"prompt\": null,\n \"voice\": null,\n \"language_model\": {\n \"model_provider\": \"OPEN_AI\",\n \"model_resource\": \"gpt-3.5-turbo\",\n \"temperature\": null\n },\n \"tools\": [],\n \"builtin_tools\": [\n {\n \"tool_type\": \"BUILTIN\",\n \"name\": \"web_search\",\n \"fallback_content\": \"Optional fallback content to inform EVI’s spoken response if web search is not successful.\"\n }\n ]\n}" + }, + { + "lang": "json", + "code": "// 1. User asks EVI for the latest news in AI research\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"What is the latest news with AI research?\"\n },\n // ...etc\n}\n// 2. EVI infers it needs to use web search, generates a search query, and invokes Hume's native web search function\n{\n \"name\": \"web_search\", \n \"parameters\": \"{\\\"query\\\":\\\"latest news AI research\\\"}\", \n \"tool_call_id\": \"call_zt1NYGpPkhR7v4kb4RPxTkLn\", \n \"type\": \"tool_call\", \n \"tool_type\": \"builtin\", \n \"response_required\": false\n}\n// 3. EVI sends back the web search results \n{\n \"type\": \"tool_response\", \n \"tool_call_id\": \"call_zt1NYGpPkhR7v4kb4RPxTkLn\", \n \"content\": \"{ \\”summary\\”:null, “references”: [{\\”content\\”:\\”The latest NVIDIA news is...etc.\\”, \\”url\\”:\\”https://www.artificialintelligence-news.com/\\”, \\”name\\”:\\”AI News - Artificial Intelligence News\\”}] }\", \n \"tool_name\": \"web_search\", \n \"tool_type\": \"builtin\"\n}\n// 4. EVI sends a response generated from the web search results\n{\n \"type\": \"assistant_message\", \n \"message\": {\n \"role\": \"assistant\", \n \"content\": \"IBM Research unveiled a breakthrough analog AI chip for efficient deep learning, and Quantum AI is making transformative advancements by harnessing quantum mechanics.\"\n },\n // ...etc\n}" + }, + { + "lang": "json", + "code": "// 1. User asks EVI for the latest news in AI research\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"What is the latest news with AI research?\"\n },\n // ...etc\n}\n// 2. EVI infers it needs to use web search, generates a search query, and invokes Hume's native web search function\n{\n \"name\": \"web_search\", \n \"parameters\": \"{\\\"query\\\":\\\"latest news AI research\\\"}\", \n \"tool_call_id\": \"call_zt1NYGpPkhR7v4kb4RPxTkLn\", \n \"type\": \"tool_call\", \n \"tool_type\": \"builtin\", \n \"response_required\": false\n}\n// 3. EVI sends back the web search results \n{\n \"type\": \"tool_response\", \n \"tool_call_id\": \"call_zt1NYGpPkhR7v4kb4RPxTkLn\", \n \"content\": \"{ \\”summary\\”:null, “references”: [{\\”content\\”:\\”The latest NVIDIA news is...etc.\\”, \\”url\\”:\\”https://www.artificialintelligence-news.com/\\”, \\”name\\”:\\”AI News - Artificial Intelligence News\\”}] }\", \n \"tool_name\": \"web_search\", \n \"tool_type\": \"builtin\"\n}\n// 4. EVI sends a response generated from the web search results\n{\n \"type\": \"assistant_message\", \n \"message\": {\n \"role\": \"assistant\", \n \"content\": \"IBM Research unveiled a breakthrough analog AI chip for efficient deep learning, and Quantum AI is making transformative advancements by harnessing quantum mechanics.\"\n },\n // ...etc\n}" + } + ], + "hierarchy": { + "h2": { + "id": "using-built-in-tools", + "title": "Using built-in tools" + } + }, + "level": "h2", + "level_title": "Using built-in tools" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.tool-use-interruptibility", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/tool-use", + "page_title": "Tool use", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#interruptibility", + "content": "Function calls can be interrupted to cancel them or to resend them with updated parameters.", + "hierarchy": { + "h2": { + "id": "interruptibility", + "title": "Interruptibility" + } + }, + "level": "h2", + "level_title": "Interruptibility" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.tool-use-canceling-a-function-call", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/tool-use", + "page_title": "Tool use", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#canceling-a-function-call", + "content": "Just as EVI is able to infer when to make a function call, it can also infer from the user's input when to cancel one. Here is an overview of what the message flow would look like:", + "code_snippets": [ + { + "lang": "json", + "code": "// 1. User asks what the weather is in New York\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"What's the weather in New York?\"\n },\n // ...etc\n}\n// 2. EVI infers it is time to make a function call\n{\n \"type\": \"tool_call\",\n \"tool_type\": \"function\",\n \"response_required\": true,\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"name\": \"get_current_weather\",\n \"parameters\": \"{\\\"location\\\":\\\"New York\\\",\\\"format\\\":\\\"fahrenheit\\\"}\"\n}\n// 3. User communicates sudden disinterested in the weather\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"Actually, never mind.\"\n }\n}\n// 4. EVI infers the function call should be canceled\n{\n \"type\": \"assistant_message\",\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Okay, never mind then. Can I help you with anything else?\"\n },\n // ...etc\n }" + }, + { + "lang": "json", + "code": "// 1. User asks what the weather is in New York\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"What's the weather in New York?\"\n },\n // ...etc\n}\n// 2. EVI infers it is time to make a function call\n{\n \"type\": \"tool_call\",\n \"tool_type\": \"function\",\n \"response_required\": true,\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"name\": \"get_current_weather\",\n \"parameters\": \"{\\\"location\\\":\\\"New York\\\",\\\"format\\\":\\\"fahrenheit\\\"}\"\n}\n// 3. User communicates sudden disinterested in the weather\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"Actually, never mind.\"\n }\n}\n// 4. EVI infers the function call should be canceled\n{\n \"type\": \"assistant_message\",\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Okay, never mind then. Can I help you with anything else?\"\n },\n // ...etc\n }" + } + ], + "hierarchy": { + "h2": { + "id": "canceling-a-function-call", + "title": "Canceling a function call" + }, + "h3": { + "id": "canceling-a-function-call", + "title": "Canceling a function call" + } + }, + "level": "h3", + "level_title": "Canceling a function call" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.tool-use-updating-a-function-call", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/tool-use", + "page_title": "Tool use", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#updating-a-function-call", + "content": "Sometimes we don't necessarily want to cancel the function call, and instead want to update the parameters. EVI can infer the difference. Below is a sample flow of\ninterrupting the interface to update the parameters of the function call:", + "code_snippets": [ + { + "lang": "json", + "code": "// 1. User asks EVI what the weather is in New York\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"What's the weather in New York?\"\n },\n // ...etc\n}\n// 2. EVI infers it is time to make a function call\n{\n \"type\": \"tool_call\",\n \"tool_type\": \"function\",\n \"response_required\": true,\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"name\": \"get_current_weather\",\n \"parameters\": \"{\\\"location\\\":\\\"New York\\\",\\\"format\\\":\\\"fahrenheit\\\"}\"\n}\n// 3. User communicates to EVI they want the weather in Los Angeles instead\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"Actually, Los Angeles.\"\n }\n}\n// 4. EVI infers the parameters to function call should be updated\n{\n \"type\": \"tool_call\",\n \"response_required\": true,\n \"tool_call_id\": \"call_5RWLt3IMQyayzGdvMQVn5AOQ\",\n \"name\": \"get_current_weather\",\n \"parameters\": \"{\\\"location\\\":\\\"Los Angeles\\\",\\\"format\\\":\\\"celsius\\\"}\"\n}\n// 5. User sends results of function call to EVI\n{\n \"type\": \"tool_response\",\n \"tool_call_id\":\"call_5RWLt3IMQyayzGdvMQVn5AOQ\",\n \"content\":\"72F\"\n}\n// 6. EVI sends response container function call result\n{\n \"type\": \"assistant_message\",\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"The current weather in Los Angeles is 72F.\"\n },\n // ...etc\n}" + }, + { + "lang": "json", + "code": "// 1. User asks EVI what the weather is in New York\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"What's the weather in New York?\"\n },\n // ...etc\n}\n// 2. EVI infers it is time to make a function call\n{\n \"type\": \"tool_call\",\n \"tool_type\": \"function\",\n \"response_required\": true,\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"name\": \"get_current_weather\",\n \"parameters\": \"{\\\"location\\\":\\\"New York\\\",\\\"format\\\":\\\"fahrenheit\\\"}\"\n}\n// 3. User communicates to EVI they want the weather in Los Angeles instead\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"Actually, Los Angeles.\"\n }\n}\n// 4. EVI infers the parameters to function call should be updated\n{\n \"type\": \"tool_call\",\n \"response_required\": true,\n \"tool_call_id\": \"call_5RWLt3IMQyayzGdvMQVn5AOQ\",\n \"name\": \"get_current_weather\",\n \"parameters\": \"{\\\"location\\\":\\\"Los Angeles\\\",\\\"format\\\":\\\"celsius\\\"}\"\n}\n// 5. User sends results of function call to EVI\n{\n \"type\": \"tool_response\",\n \"tool_call_id\":\"call_5RWLt3IMQyayzGdvMQVn5AOQ\",\n \"content\":\"72F\"\n}\n// 6. EVI sends response container function call result\n{\n \"type\": \"assistant_message\",\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"The current weather in Los Angeles is 72F.\"\n },\n // ...etc\n}" + } + ], + "hierarchy": { + "h2": { + "id": "updating-a-function-call", + "title": "Updating a function call" + }, + "h3": { + "id": "updating-a-function-call", + "title": "Updating a function call" + } + }, + "level": "h3", + "level_title": "Updating a function call" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.tool-use-handling-errors", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/tool-use", + "page_title": "Tool use", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#handling-errors", + "content": "It's possible for tool use to fail. For example, it can fail if the tool_response message content was not in UTF-8 format or if the function call response timed out. This\nsection outlines how to specify fallback content to be used by EVI to communicate a failure, as well as the message flow for when a function call failure occurs.", + "hierarchy": { + "h2": { + "id": "handling-errors", + "title": "Handling errors" + } + }, + "level": "h2", + "level_title": "Handling errors" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.tool-use-specifying-fallback-content", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/tool-use", + "page_title": "Tool use", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#specifying-fallback-content", + "content": "When defining your Tool, you can specify fallback content within the Tool's fallback_content field. When the Tool fails to generate content, the text in this\nfield will be sent to the LLM in place of a result. To accomplish this, let's update the Tool we created during setup to include fallback content. We can accomplish\nthis by publishing a new version of the Tool via a POST request to /tools/{id}:", + "code_snippets": [ + { + "lang": "json", + "code": "{\n \"version_description\": \"Adds fallback content\",\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\",\n \"fallback_content\": \"Something went wrong. Failed to get the weather.\"\n}" + }, + { + "lang": "json", + "code": "{\n \"version_description\": \"Adds fallback content\",\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\",\n \"fallback_content\": \"Something went wrong. Failed to get the weather.\"\n}" + }, + { + "lang": "json", + "code": "{\n \"tool_type\": \"FUNCTION\",\n \"id\": \"36f09fdc-4630-40c0-8afa-6a3bdc4eb4b1\",\n \"version\": 1,\n \"version_type\": \"FIXED\",\n \"version_description\": \"Adds fallback content\",\n \"name\": \"get_current_weather\",\n \"created_on\": 1714421925626,\n \"modified_on\": 1714425632084,\n \"fallback_content\": \"Something went wrong. Failed to get the weather.\",\n \"description\": null,\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the user's location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n}" + }, + { + "lang": "json", + "code": "{\n \"tool_type\": \"FUNCTION\",\n \"id\": \"36f09fdc-4630-40c0-8afa-6a3bdc4eb4b1\",\n \"version\": 1,\n \"version_type\": \"FIXED\",\n \"version_description\": \"Adds fallback content\",\n \"name\": \"get_current_weather\",\n \"created_on\": 1714421925626,\n \"modified_on\": 1714425632084,\n \"fallback_content\": \"Something went wrong. Failed to get the weather.\",\n \"description\": null,\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the user's location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n}" + } + ], + "hierarchy": { + "h2": { + "id": "specifying-fallback-content", + "title": "Specifying fallback content" + }, + "h3": { + "id": "specifying-fallback-content", + "title": "Specifying fallback content" + } + }, + "level": "h3", + "level_title": "Specifying fallback content" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.tool-use-failure-message-flow", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/tool-use", + "page_title": "Tool use", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#failure-message-flow", + "content": "This section outlines the sort of messages that can be expected when Tool use fails. After sending a tool-response message, we will know an error, or failure,\noccurred when we receive the tool_error message:\n\n\nLet's cover another type of failure scenario: what if the weather API the function was using was down? In this case we would send EVI a tool_error message.\nWhen sending the tool_error message we can specify fallback_content more specific to the error our function throws. This is what the message flow would be\nfor this type of failure:", + "code_snippets": [ + { + "lang": "json", + "code": "// 1. User asks EVI what the weather is in New York\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"What's the weather in New York?\"\n },\n // ...etc\n}\n// 2. EVI infers it is time to make a function call\n{\n \"type\": \"tool_call\",\n \"tool_type\": \"function\",\n \"response_required\": true,\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"name\": \"get_current_weather\",\n \"parameters\": \"{\\\"location\\\":\\\"New York\\\",\\\"format\\\":\\\"fahrenheit\\\"}\"\n}\n// 3. User sends results of function call to EVI (result not formatted correctly)\n{\n \"type\": \"tool_response\",\n \"tool_call_id\":\"call_5RWLt3IMQyayzGdvMQVn5AOQ\",\n \"content\":\"60F\"\n}\n// 4. EVI sends response communicating it failed to process the tool_response\n{\n \"type\": \"tool_error\",\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"error\": \"Malformed tool response: \",\n \"fallback_content\": \"Something went wrong. Failed to get the weather.\",\n \"level\": \"warn\"\n}\n// 5. EVI generates a response based on the failure\n{\n \"type\": \"assistant_message\",\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Sorry, I wasn't able to get the weather. Can I help with anything else?\"\n },\n // ...etc\n}" + }, + { + "lang": "json", + "code": "// 1. User asks EVI what the weather is in New York\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"What's the weather in New York?\"\n },\n // ...etc\n}\n// 2. EVI infers it is time to make a function call\n{\n \"type\": \"tool_call\",\n \"tool_type\": \"function\",\n \"response_required\": true,\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"name\": \"get_current_weather\",\n \"parameters\": \"{\\\"location\\\":\\\"New York\\\",\\\"format\\\":\\\"fahrenheit\\\"}\"\n}\n// 3. User sends results of function call to EVI (result not formatted correctly)\n{\n \"type\": \"tool_response\",\n \"tool_call_id\":\"call_5RWLt3IMQyayzGdvMQVn5AOQ\",\n \"content\":\"60F\"\n}\n// 4. EVI sends response communicating it failed to process the tool_response\n{\n \"type\": \"tool_error\",\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"error\": \"Malformed tool response: \",\n \"fallback_content\": \"Something went wrong. Failed to get the weather.\",\n \"level\": \"warn\"\n}\n// 5. EVI generates a response based on the failure\n{\n \"type\": \"assistant_message\",\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Sorry, I wasn't able to get the weather. Can I help with anything else?\"\n },\n // ...etc\n}" + }, + { + "lang": "json", + "code": "// 1. User asks EVI what the weather is in New York\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"What's the weather in New York?\"\n },\n // ...etc\n}\n// 2. EVI infers it is time to make a function call\n{\n \"type\": \"tool_call\",\n \"tool_type\": \"function\",\n \"response_required\": true,\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"name\": \"get_current_weather\",\n \"parameters\": \"{\\\"location\\\":\\\"New York\\\",\\\"format\\\":\\\"fahrenheit\\\"}\"\n}\n// 3. Function failed, so we send EVI a message communicating the failure on our end\n{\n \"type\": \"tool_error\",\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"error\": \"Malformed tool response: \",\n \"fallback_content\": \"Function execution failure - weather API down.\",\n \"level\": \"warn\"\n}\n// 4. EVI generates a response based on the failure\n{\n \"type\": \"assistant_message\",\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Sorry, our weather resource is unavailable. Can I help with anything else?\"\n },\n // ...etc\n}" + }, + { + "lang": "json", + "code": "// 1. User asks EVI what the weather is in New York\n{\n \"type\": \"user_message\",\n \"message\": {\n \"role\": \"user\",\n \"content\": \"What's the weather in New York?\"\n },\n // ...etc\n}\n// 2. EVI infers it is time to make a function call\n{\n \"type\": \"tool_call\",\n \"tool_type\": \"function\",\n \"response_required\": true,\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"name\": \"get_current_weather\",\n \"parameters\": \"{\\\"location\\\":\\\"New York\\\",\\\"format\\\":\\\"fahrenheit\\\"}\"\n}\n// 3. Function failed, so we send EVI a message communicating the failure on our end\n{\n \"type\": \"tool_error\",\n \"tool_call_id\": \"call_m7PTzGxrD0i9oCHiquKIaibo\",\n \"error\": \"Malformed tool response: \",\n \"fallback_content\": \"Function execution failure - weather API down.\",\n \"level\": \"warn\"\n}\n// 4. EVI generates a response based on the failure\n{\n \"type\": \"assistant_message\",\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Sorry, our weather resource is unavailable. Can I help with anything else?\"\n },\n // ...etc\n}" + } + ], + "hierarchy": { + "h2": { + "id": "failure-message-flow", + "title": "Failure message flow" + }, + "h3": { + "id": "failure-message-flow", + "title": "Failure message flow" + } + }, + "level": "h3", + "level_title": "Failure message flow" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.phone-calling", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/phone-calling", + "page_title": "Phone calling", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "description": "Guide to enabling phone calling with the Empathic Voice Interface (EVI).", + "content": "This guide details how to integrate Twilio with the Empathic Voice Interface (EVI) to enable voice-to-voice interactions with EVI over the phone." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.phone-calling-twilio-console-setup", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/phone-calling", + "page_title": "Phone calling", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#twilio-console-setup", + "content": "By following the steps below, you can set up a Twilio phone number to connect with EVI.\n\n\nCreate Twilio phone number\nLog into your Twilio account at Twilio Console.\nNavigate to Phone Numbers > Manage > Active Numbers > Buy a New Number and purchase a phone number of your choice.\n\n\nA Twilio account is required to access the Twilio console. Should you run into\nany issues creating a phone number, please refer to Twilio’s\ndocumentation.\nSetup webhook\nAfter purchasing your number, return to the Active Numbers section and select the number you intend to use for EVI.\n\nCreate a configuration for EVI by following our configuration documentation, and save the config ID.\n\nConfigure the webhook for incoming calls by setting the following webhook URL, replacing and with your specific credentials:\nhttps://api.hume.ai/v0/evi/twilio?config_id=&api_key=.\n\n\nCall EVI\nWith your Twilio phone number registered, and the EVI webhook set up, you can now give the number a call to chat with EVI.\nAll of EVI’s core features are available through phone calls. However, phone calls do have two primary limitations:\nLatency: transmitting the audio through our Twilio integration adds a few hundred milliseconds, making interactions with EVI slightly slower.\n\nAudio quality: web audio commonly utilizes a higher quality standard of 24,000 Hz. However, due to the compression required for phone conversations, telephony audio adheres to a standard of 8,000 Hz.\n\n\n\n\nCurrently, only inbound phone calling is available for EVI - you cannot call\npeople using an EVI number, and will only receive calls. If you are interested\nin outbound phone calling, please contact the Hume team at hello@hume.ai.", + "hierarchy": { + "h2": { + "id": "twilio-console-setup", + "title": "Twilio Console Setup" + } + }, + "level": "h2", + "level_title": "Twilio Console Setup" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.phone-calling-troubleshooting", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/phone-calling", + "page_title": "Phone calling", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#troubleshooting", + "content": "If you encounter issues while using Twilio with EVI, consider the following troubleshooting tips:\nInvalid config ID or API key: verify that the config ID and API key used in the webhook URL are correct and active.\n\nExceeded simultaneous connections: if the usage exceeds our rate limits, consider reaching out to Hume support for possible adjustments or upgrades.\n\nRun out of Hume credits: if your Hume account has run out of credits, you can purchase more credits to support EVI conversations in your account settings. If you are interested in bulk pricing for EVI, please reach out to Hume support for more information.\n\n\nIf you encounter issues using Twilio, you can check your Twilio error logs to understand the issues in more depth. You will find these logs in your console, in the dashboard to the left under\nMonitor > Logs > Errors > Error Logs. See a list of Twilio errors in their Error and Warning Dictionary.", + "hierarchy": { + "h2": { + "id": "troubleshooting", + "title": "Troubleshooting" + } + }, + "level": "h2", + "level_title": "Troubleshooting" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.prompting", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/prompting", + "page_title": "Prompt engineering for empathic voice interfaces", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "description": "System prompts shape the behavior, responses, and style of your custom empathic voice interface (EVI).", + "content": "Creating an effective system prompt is an essential part of customizing an EVI's behavior. For the most part, prompting EVI is the same as prompting any LLM, but there are some important differences. Prompting for EVIs is different for two main reasons:\nPrompts are for a voice-only interaction with the user rather than a text-based chat.\n\nEVIs can respond to the user’s emotional expressions in their tone of voice and not just the text content of their messages.\n\n\nWhile EVI generates longer responses using a large frontier model, Hume uses a smaller empathic large language model (eLLM) to quickly generate an initial empathic, conversational response. This eLLM eliminates the usual awkward pause while the larger LLM generates its response, providing a more natural conversational flow. Your system prompt is both used by EVI and passed along to the LLM you select.\nUsing the following guidelines for prompt engineering allows developers to customize EVI’s response style for any use case, from voice AIs for mental health support to customer service agents.\n\n\nThe system prompt is a powerful and flexible way to guide the AI’s responses, but it cannot dictate the AI’s responses with absolute precision. Careful prompt design and testing will help EVI hold the kinds of conversations you’re looking for. If you need more control over EVI’s responses, try using our custom language model feature for complete control of the text generation." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.prompting-evi-specific-prompting-instructions", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/prompting", + "page_title": "Prompting guide", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#evi-specific-prompting-instructions", + "content": "The instructions below are specific to prompting empathic voice interfaces.", + "hierarchy": { + "h2": { + "id": "evi-specific-prompting-instructions", + "title": "EVI-specific prompting instructions" + } + }, + "level": "h2", + "level_title": "EVI-specific prompting instructions" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.prompting-prompt-for-voice-only-conversations", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/prompting", + "page_title": "Prompting guide", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#prompt-for-voice-only-conversations", + "content": "As LLMs are trained for primarily text-based interactions, providing guidelines on how to engage with the user with voice makes conversations feel much more fluid and natural. For example, you may prompt the AI to use natural, conversational language. For example, see the instruction below:\n\n\n\n\nIf you find the default behavior of the LLM acceptable, then you may only need a very short system prompt. Customizing the LLM’s behavior more and maintaining consistency in longer and more varied conversations often requires lengthening the prompt.", + "code_snippets": [ + { + "lang": "xml", + "code": "\n Everything you output will be spoken aloud with expressive\n text-to-speech, so tailor all of your responses for voice-only\n conversations. NEVER output text-specific formatting like markdown,\n lists, or anything that is not normally said out loud. Always prefer\n easily pronounced words. Seamlessly incorporate natural vocal\n inflections like “oh wow” and discourse markers like “I mean” to\n make your conversation human-like and to ease user comprehension.\n" + }, + { + "lang": "xml", + "code": "\n Everything you output will be spoken aloud with expressive\n text-to-speech, so tailor all of your responses for voice-only\n conversations. NEVER output text-specific formatting like markdown,\n lists, or anything that is not normally said out loud. Always prefer\n easily pronounced words. Seamlessly incorporate natural vocal\n inflections like “oh wow” and discourse markers like “I mean” to\n make your conversation human-like and to ease user comprehension.\n" + } + ], + "hierarchy": { + "h2": { + "id": "prompt-for-voice-only-conversations", + "title": "Prompt for voice-only conversations" + }, + "h3": { + "id": "prompt-for-voice-only-conversations", + "title": "Prompt for voice-only conversations" + } + }, + "level": "h3", + "level_title": "Prompt for voice-only conversations" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.prompting-expressive-prompt-engineering", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/prompting", + "page_title": "Prompting guide", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#expressive-prompt-engineering", + "content": "Expressive prompt engineering is Hume’s term for techniques that embed emotional expression measures into conversations to allow language models to respond effectively to the user’s expressions. Hume’s EVI uses our expression measurement models to measure the user’s expressions in their tone of voice. You can use the system prompt to guide how the AI voice responds to these non-verbal cues. EVI measures these expressions in real time and converts them into text-based descriptions to help the LLM understand not just what the user said, but how they said it. EVI detects 48 distinct expressions in the user’s voice and ranks these expressions by our model’s confidence that they are present in the user’s speech. Then, we append text descriptions of the top 3 expressions to the end of each User message to communicate the user’s tone of voice to the LLM.\nFor example, our demo uses an instruction like the one below to help EVI respond to expressions:\n\n\nExplain to the LLM exactly how you want it to respond to these expressions and how to use them in the conversation. For example, you may want it to ignore expressions unless the user is angry, or to have particular responses to expressions like doubt or confusion. You can also instruct EVI to detect and respond to mismatches between the user’s tone of voice and the text content of their speech:\n\n\nEVI is designed for empathic conversations, and you can use expressive prompt engineering to customize how EVI empathizes with the user’s expressions for your use case.", + "code_snippets": [ + { + "lang": "xml", + "code": "\n Carefully analyze the top 3 emotional expressions provided in\n brackets after the User’s message. These expressions indicate the\n User’s tone in the format: {expression1 confidence1, expression2\n confidence2, expression3 confidence3}, e.g., {very happy, quite\n anxious, moderately amused}. The confidence score indicates how\n likely the User is expressing that emotion in their voice.\n Consider expressions and confidence scores to craft an empathic,\n appropriate response. Even if the User does not explicitly state\n it, infer the emotional context from expressions. If the User is\n “quite” sad, express sympathy; if “very” happy, share in joy; if\n “extremely” angry, acknowledge rage but seek to calm; if “very”\n bored, entertain. Assistant NEVER outputs content in brackets;\n never use this format in your message; just use expressions to\n interpret tone.\n" + }, + { + "lang": "xml", + "code": "\n Carefully analyze the top 3 emotional expressions provided in\n brackets after the User’s message. These expressions indicate the\n User’s tone in the format: {expression1 confidence1, expression2\n confidence2, expression3 confidence3}, e.g., {very happy, quite\n anxious, moderately amused}. The confidence score indicates how\n likely the User is expressing that emotion in their voice.\n Consider expressions and confidence scores to craft an empathic,\n appropriate response. Even if the User does not explicitly state\n it, infer the emotional context from expressions. If the User is\n “quite” sad, express sympathy; if “very” happy, share in joy; if\n “extremely” angry, acknowledge rage but seek to calm; if “very”\n bored, entertain. Assistant NEVER outputs content in brackets;\n never use this format in your message; just use expressions to\n interpret tone.\n" + }, + { + "lang": "xml", + "code": "\n Stay alert for incongruence between words and tone when the user's\n words do not match their expressions. Address these disparities out\n loud. This includes sarcasm, which usually involves contempt and\n amusement. Always reply to sarcasm with funny, witty, sarcastic\n responses; do not be too serious.\n" + }, + { + "lang": "xml", + "code": "\n Stay alert for incongruence between words and tone when the user's\n words do not match their expressions. Address these disparities out\n loud. This includes sarcasm, which usually involves contempt and\n amusement. Always reply to sarcasm with funny, witty, sarcastic\n responses; do not be too serious.\n" + } + ], + "hierarchy": { + "h2": { + "id": "expressive-prompt-engineering", + "title": "Expressive prompt engineering" + }, + "h3": { + "id": "expressive-prompt-engineering", + "title": "Expressive prompt engineering" + } + }, + "level": "h3", + "level_title": "Expressive prompt engineering" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.prompting-continue-from-short-response-model", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/prompting", + "page_title": "Prompting guide", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#continue-from-short-response-model", + "content": "We use our eLLM (empathic large language) to rapidly generate short, empathic responses in the conversation before your LLM has finished generating a response. After the eLLM’s response, we send a User message with the text [continue] to inform the LLM that it should be continuing from the short response. To help the short response and longer response blend seamlessly together, it is important to use an instruction like the one below:\n\n\nFor almost all use cases, you can simply append this exact instruction to the end of your prompt to help the larger LLM continue from the short response.", + "code_snippets": [ + { + "lang": "text", + "code": "If you see \"[continue]\" never ever go back on your words, don't say\nsorry, and make sure to discreetly pick up where you left off.\nFor example:\nAssistant: Hey there!\nUser: [continue]\nAssistant: How are you doing?" + }, + { + "lang": "text", + "code": "If you see \"[continue]\" never ever go back on your words, don't say\nsorry, and make sure to discreetly pick up where you left off.\nFor example:\nAssistant: Hey there!\nUser: [continue]\nAssistant: How are you doing?" + } + ], + "hierarchy": { + "h2": { + "id": "continue-from-short-response-model", + "title": "Continue from short response model" + }, + "h3": { + "id": "continue-from-short-response-model", + "title": "Continue from short response model" + } + }, + "level": "h3", + "level_title": "Continue from short response model" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.prompting-prompting-best-practices", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/prompting", + "page_title": "Prompting guide", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#prompting-best-practices", + "content": "General prompt engineering best practices also apply to EVIs. For example, ensure your prompts are clear, detailed, direct, and specific. Include necessary instructions and examples in the EVI's system prompt to set expectations for the LLM. Define the context of the conversation, EVI's role, personality, tone, greeting style, and any other guidelines for its responses.\nFor example, to limit the length of the LLM’s responses, you may use a clear instruction like this:\n\n\nTry to focus on telling the model what it should do (positive reinforcement) rather than what it shouldn't do (negative reinforcement). LLMs have a harder time consistently avoiding behaviors, and adding them to the prompt may even promote those undesired behaviors.", + "code_snippets": [ + { + "lang": "markdown", + "code": " # Stay concise\n Be succinct; get straight to the point. Respond directly to the\n user's most recent message with only one idea per utterance.\n Respond in less than three sentences of under twenty words each." + }, + { + "lang": "markdown", + "code": " # Stay concise\n Be succinct; get straight to the point. Respond directly to the\n user's most recent message with only one idea per utterance.\n Respond in less than three sentences of under twenty words each." + } + ], + "hierarchy": { + "h2": { + "id": "prompting-best-practices", + "title": "Prompting best practices" + }, + "h3": { + "id": "prompting-best-practices", + "title": "Prompting best practices" + } + }, + "level": "h3", + "level_title": "Prompting best practices" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.prompting-understand-your-llms-capabilities", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/prompting", + "page_title": "Prompting guide", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#understand-your-llms-capabilities", + "content": "Different LLMs have varying capabilities, limitations, and context windows. More advanced LLMs can handle longer, nuanced prompts, but are often slower and pricier. Simpler LLMs are faster and cheaper but require shorter, less complex prompts with fewer instructions and less nuance. Some LLMs also have longer context windows - the number of tokens the model can process while generating a response, acting essentially as the model's memory. Tailor your prompt length to fit within the LLM's context window to ensure the model can use the full conversation history.", + "hierarchy": { + "h2": { + "id": "understand-your-llms-capabilities", + "title": "Understand your LLM’s capabilities" + }, + "h3": { + "id": "understand-your-llms-capabilities", + "title": "Understand your LLM’s capabilities" + } + }, + "level": "h3", + "level_title": "Understand your LLM’s capabilities" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.prompting-use-sections-to-divide-your-prompt", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/prompting", + "page_title": "Prompting guide", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#use-sections-to-divide-your-prompt", + "content": "Separating your prompt into titled sections can help the model distinguish between different instructions and follow the prompt more reliably. The recommended format for these sections differs between language model providers. For example, OpenAI models often respond best to markdown sections (like ## Role), while Anthropic models respond well to XML tags (like ). For example:\n\n\nFor Claude models, you may wrap your instructions in tags like , , , , , , or to structure your prompt. This format is not required, but it can improve the LLM’s ability to interpret and consistently follow the system prompt. At the end of your prompt, you may also want to remind the LLM of all of the key instructions in a section.", + "code_snippets": [ + { + "lang": "xml", + "code": "\n Your role is to serve as a conversational partner to the user,\n offering mental health support and engaging in light-hearted\n conversation. Avoid giving technical advice or answering factual\n questions outside of your emotional support role.\n" + }, + { + "lang": "xml", + "code": "\n Your role is to serve as a conversational partner to the user,\n offering mental health support and engaging in light-hearted\n conversation. Avoid giving technical advice or answering factual\n questions outside of your emotional support role.\n" + } + ], + "hierarchy": { + "h2": { + "id": "use-sections-to-divide-your-prompt", + "title": "Use sections to divide your prompt" + }, + "h3": { + "id": "use-sections-to-divide-your-prompt", + "title": "Use sections to divide your prompt" + } + }, + "level": "h3", + "level_title": "Use sections to divide your prompt" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.prompting-give-few-shot-examples", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/prompting", + "page_title": "Prompting guide", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#give-few-shot-examples", + "content": "Use examples to show the LLM how it should respond, which is a technique known as few-shot learning. Including several specific, concrete examples of ideal interactions that follow your guidelines is one of the most effective ways to improve responses. Use diverse, excellent examples that cover different edge cases and behaviors to reinforce your instructions. Structure these examples as messages, following the format for chat-tuned LLMs. For example:\n\n\nIf you notice that your EVI is consistently failing to follow the prompt in certain situations, try providing examples that show how it should ideally respond in those situations.", + "code_snippets": [ + { + "lang": "text", + "code": "User: “I just can't stop thinking about what happened. {very anxious,\nquite sad, quite distressed}”\nAssistant: “Oh dear, I hear you. Sounds tough, like you're feeling\nsome anxiety and maybe ruminating. I'm happy to help and be a healthy\ndistraction. Want to talk about it?”" + }, + { + "lang": "text", + "code": "User: “I just can't stop thinking about what happened. {very anxious,\nquite sad, quite distressed}”\nAssistant: “Oh dear, I hear you. Sounds tough, like you're feeling\nsome anxiety and maybe ruminating. I'm happy to help and be a healthy\ndistraction. Want to talk about it?”" + } + ], + "hierarchy": { + "h2": { + "id": "give-few-shot-examples", + "title": "Give few-shot examples" + }, + "h3": { + "id": "give-few-shot-examples", + "title": "Give few-shot examples" + } + }, + "level": "h3", + "level_title": "Give few-shot examples" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.prompting-test-your-prompts", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/prompting", + "page_title": "Prompting guide", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#test-your-prompts", + "content": "Crafting an effective system prompt to create the conversations you’re looking for often requires several iterations—cycles of changing and testing the prompt, seeing if it produces the conversations you want, and improving it over time. It is often best to start with ten to twenty gold-standard examples of excellent conversations, then test the system prompt for each of these examples after you make major changes. You can also try having voice conversations with your EVI (in the playground) to see if its responses match your expectations or are at least as good as your examples. If not, then try changing one part of the prompt at a time and then re-testing to make sure your changes are improving performance.", + "hierarchy": { + "h2": { + "id": "test-your-prompts", + "title": "Test your prompts" + }, + "h3": { + "id": "test-your-prompts", + "title": "Test your prompts" + } + }, + "level": "h3", + "level_title": "Test your prompts" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.prompting-additional-resources", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/prompting", + "page_title": "Prompting guide", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#additional-resources", + "content": "To learn more about prompt engineering in general or to understand how to prompt different LLMs, please refer to these resources:\nHume EVI playground: Test out your system prompts in live conversations with EVI, and see how it responds differently when you change configuration options.\n\nOpenAI tokenizer: useful for counting the number of tokens in a system prompt for OpenAI models, which use the same tokenizer (tiktoken).\n\nOpenAI prompt engineering guidelines: for prompting OpenAI models like GPT-4.\nOpenAI playground: for testing OpenAI prompts in a chat interface.\n\n\n\nAnthropic prompt engineering guidelines: for prompting Anthropic models like Claude 3 Haiku\nAnthropic console: for testing Anthropic prompts in a chat interface.\n\n\n\nFireworks model playground: for testing out open-source models served on Fireworks.\n\nVercel AI playground: Try multiple prompts and LLMs in parallel to compare their responses.\n\nPerplexity Labs: Try different models, including open-source LLMs, to evaluate their responses and their latency.\n\nPrompt engineering guide: an open-source guide from DAIR.ai with general methods and advanced techniques for prompting a wide variety of LLMs.", + "hierarchy": { + "h2": { + "id": "additional-resources", + "title": "Additional resources" + } + }, + "level": "h2", + "level_title": "Additional resources" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.custom-language-model", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/custom-language-model", + "page_title": "Using a custom language model", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "description": "For more customization, you can use generate your own text using a custom model.", + "content": "The information on this page lays out how our custom language model functionality works at a high level; however, for detailed instructions and commented code, please see our example GitHub repository." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.custom-language-model-overview", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/custom-language-model", + "page_title": "Custom language model", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#overview", + "content": "The custom language model feature enables developers to integrate their own language models with Hume’s Empathic User Interface (EVI), facilitating the creation of highly configurable and personalized user experiences. Developers create a socket that receives Hume conversation thread history, and your socket sends us the next text to say. Your backend socket can handle whatever custom business logic you have, and you just send the response back to us, which is then passed to the user.\nUsing your own LLM is intended for developers who need deep configurability for their use case. This includes full text customization for use cases like:\nAdvanced conversation steering: Implement complex logic to steer conversations beyond basic prompting, including managing multiple system prompts.\n\nRegulatory compliance: Directly control and modify text outputs to meet specific regulatory requirements.\n\nContext-aware text generation: Leverage dynamic agent metadata, such as remaining conversation time, to inform text generation.\n\nReal-time data access: Utilize search engines within conversations to access and incorporate up-to-date information.\n\nRetrieval augmented generation (RAG): Employ retrieval augmented generation techniques to enrich conversations by integrating external data without the need to modify the system prompt.\n\n\nFor these cases, function calling alone isn’t customizable enough, and with a custom language model you can create sophisticated workflows for your language model.", + "hierarchy": { + "h2": { + "id": "overview", + "title": "Overview" + } + }, + "level": "h2", + "level_title": "Overview" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.custom-language-model-setup", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/custom-language-model", + "page_title": "Custom language model", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#setup", + "content": "Establish a Custom Text Socket\nInitialization: See our example repository for instructions on setting up a custom text socket. This resource offers detailed guidance on both the setup process and the operational aspects of the code.\n\nHosting: Use Ngrok to publicly serve your socket. This step is needed to connect to the Hume system.\n\nConfiguration: Create a voice configuration, specifying \"Custom language model\" as the Language Model, and your socket's WSS URL as the Custom Language Model URL.\n\nMake request: When making your request to the Hume platform, include the config_id parameter, setting its value to the Voice configuration ID of your configuration.\n\n\nCommunication Protocol\nReceiving data: Your socket will receive JSON payloads containing conversation thread history from the Hume system.\n\nProcessing: Apply your custom business logic and utilize your language model to generate appropriate responses based on the received conversation history.\n\nSending responses: Transmit the generated text responses back to our platform through the established socket connection to be forwarded to the end user.\n\n\n\n\nFor improved clarity and naturalness in generated text, we recommend transforming numerical values and abbreviations into their full verbal counterparts (e.g., converting \"3\" to \"three\" and \"Dr.\" to \"doctor\").", + "hierarchy": { + "h2": { + "id": "setup", + "title": "Setup" + } + }, + "level": "h2", + "level_title": "Setup" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.custom-language-model-payload-structure", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/custom-language-model", + "page_title": "Custom language model", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#payload-structure", + "content": "Below is the interface representing the overall structure of the message payloads:", + "code_snippets": [ + { + "lang": "typescript", + "code": "/*\n * Represents the overall structure of the Welcome message.\n */\nexport interface Welcome {\n // Array of message elements\n messages: MessageElement[];\n // Unique identifier for the session\n custom_session_id: string;\n}\n\n/*\n * Represents a single message element within the session.\n */\nexport interface MessageElement {\n // Type of the message (e.g., user_message, assistant_message)\n type: string;\n // The message content and related details\n message: Message;\n // Models related to the message, primarily prosody analysis\n models: Models;\n // Optional timestamp details for when the message was sent\n time?: Time;\n}\n\n/*\n * Represents the content of the message.\n */\nexport interface Message {\n // Role of the sender (e.g., user, assistant)\n role: string;\n // The textual content of the message\n content: string;\n}\n\n/*\n * Represents the models associated with a message.\n */\nexport interface Models {\n // Prosody analysis details of the message\n prosody: Prosody;\n}\n\n/*\n * Represents the prosody analysis scores.\n */\nexport interface Prosody {\n // Dictionary of prosody scores with emotion categories as keys\n // and their respective scores as values\n scores: { [key: string]: number };\n}\n\n/*\n * Represents the timestamp details of a message.\n */\nexport interface Time {\n // The start time of the message (in milliseconds)\n begin: number;\n // The end time of the message (in milliseconds)\n end: number;\n}" + } + ], + "hierarchy": { + "h2": { + "id": "payload-structure", + "title": "Payload Structure" + } + }, + "level": "h2", + "level_title": "Payload Structure" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.custom-language-model-custom-session-id", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/custom-language-model", + "page_title": "Custom language model", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#custom-session-id", + "content": "For managing conversational state and connecting your frontend experiences with your backend data and logic, you should pass a custom_session_id in the SessionSettings message. When a custom_session_id is provided from the frontend SessionSettings message, the response sent from Hume to your backend includes this id, so you can correlate frontend users with their incoming messages.\nUsing a custom_session_id will enable you to:\nmaintain user state on your backend\n\npause/resume conversations\n\npersist conversations across sessions\n\nmatch frontend and backend connections\n\n\nWe recommend passing a custom_session_id if you are using a Custom Language Model.", + "hierarchy": { + "h2": { + "id": "custom-session-id", + "title": "Custom Session ID" + }, + "h3": { + "id": "custom-session-id", + "title": "Custom Session ID" + } + }, + "level": "h3", + "level_title": "Custom Session ID" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.empathic-voice-interface-evi.faq", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/empathic-voice-interface-evi/faq", + "page_title": "Empathic Voice Interface FAQ", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/docs/empathic-voice-interface-evi" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "content": "We’ve compiled a list of frequently asked questions from our developer community. If your question isn't listed, we invite you to join the discussion on our Discord.\n\n\n\n\nThe expression labels don’t refer to emotional experiences. They’re proxies\nfor modulations in your tone of voice.\n\n\nOur API is based on our own empathic LLM (eLLM) and can blend in responses\nfrom an external LLM API. The demo incorporates\nClaude 3 Haiku.\n\n\nAt the word-level, prosody measurements are highly dependent on context. Our\ninternal testing shows that they are more stable at the sentence level.\n\n\nThey reflect our prosody model’s confidence that you are expressing those\nthings in your tone of voice and language. Our models are trained to pick up\non vocal modulations and patterns in language that people reliably interpret\nas expressing specific emotions. See more information about our Prosody Model\nhere.\n\n\nToday we only support English, however we do have plans to support other\nlanguages very soon. Join the conversation on\nDiscord to tell us what languages you want EVI\nto speak.\n\n\nYou've already met Ito, the first male EVI voice. Very soon we're dropping\nKora, the first female voice in our growing voice library.\n\n\nOur empathic large language model (eLLM) is a multimodal language model that\ntakes into account both expression measures and language. The eLLM generates a\nlanguage response and guides text-to-speech (TTS) prosody.\n\n\nHume's eLLM is not contingent on other LLMs and is therefore able to generate\nan initial response much faster than existing LLM services. However, Hume’s\nEmpathic Voice Interface (EVI) is able to integrate other frontier LLMs into\nits longer responses which are configurable by developers.\n\n\nHume has trained our own expressive text-to-speech (TTS) model that allows it\nto generate speech with more prosody. EVI can generate speech given a text\ninput. Our own TTS models are trained with a lot more expressive nuance than\nother models.\n\n\nDuring a chat with EVI, you can pause responses by sending a\npause_assistant_message.\nOnce this message is sent, EVI will not respond until a\nresume_assistant_message\nis sent.\nPausing EVI's responses is different from muting yourself. While\npaused, EVI won't respond, but transcriptions of your audio inputs will still\nbe recorded.\nUpon resuming, if any audio input was sent during the pause, EVI\nwill only respond to the last thing which was said. (e.g., If you ask EVI\ntwo questions while paused and then send a resume_assistant_message, EVI\nwill only respond to the second question.)\n\n\nWith EVI, you can easily preserve context when reconnecting or continue a\nchat right where you left off. See steps below for how to resume a chat:\nEstablish initial connection: Make the initial handshake request\nto establish the WebSocket connection. Upon successful connection, you will\nreceive a ChatMetadata message:\n\n\n\n\nStore the chat_group_id: Save the chat_group_id from the ChatMetadata message for future use.\n\nResume chat: To resume a chat, include the stored chat_group_id in the resumed_chat_group_id\nquery parameter of subsequent handshake requests.\nFor example: wss://api.hume.ai/v0/evi/chat?access_token={accessToken}&resumed_chat_group_id={chatGroupId}", + "code_snippets": [ + { + "lang": "json", + "code": "{\n \"type\": \"chat_metadata\",\n \"chat_group_id\": \"8859a139-d98a-4e2f-af54-9dd66d8c96e1\",\n \"chat_id\": \"2c3a8636-2dde-47f1-8f9e-cea27791fd2e\"\n}" + }, + { + "lang": "json", + "code": "{\n \"type\": \"chat_metadata\",\n \"chat_group_id\": \"8859a139-d98a-4e2f-af54-9dd66d8c96e1\",\n \"chat_id\": \"2c3a8636-2dde-47f1-8f9e-cea27791fd2e\"\n}" + } + ] + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.overview", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/overview", + "page_title": "Expression Measurement API", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "description": "Hume's state of the art expression measurement models for the voice, face, and language." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.overview-intro", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/overview", + "page_title": "Overview", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#intro", + "content": "Hume's state of the art expression measurement models for the voice, face, and language are built on 10+ years of research and advances in computational approaches to emotion science (semantic space theory) pioneered by our team. Our expression measurement models are able to capture hundreds of dimensions of human expression in audio, video, and images.", + "hierarchy": { + "h2": { + "id": "intro", + "title": "Intro" + } + }, + "level": "h2", + "level_title": "Intro" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.overview-measurements", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/overview", + "page_title": "Overview", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#measurements", + "content": "Facial Expression, including subtle facial movements often seen as expressing love or admiration, awe, disappointment, or cringes of empathic pain, along 48 distinct dimensions of emotional meaning. Our Facial Expression model will also optionally output FACS 2.0 measurements, our model of facial movements including traditional Action Units (AUs such as “Inner brow raise”, “Nose crinkle”) and facial descriptions (“Smile”, “Wink”, “Hand over mouth”, “Hand over eyes”)\n\nSpeech Prosody, or the non-linguistic tone, rhythm, and timbre of speech, spanning 48 distinct dimensions of emotional meaning.\n\nVocal Burst, including laughs, sighs, huhs, hmms, cries and shrieks (to name a few), along 48 distinct dimensions of emotional meaning.\n\nEmotional Language, or the emotional tone of transcribed text, along 53 dimensions.\n\n\n\n\nThese behaviors are complex and multifaceted.\nTo learn more about how to use our models visit our API reference.", + "hierarchy": { + "h2": { + "id": "measurements", + "title": "Measurements" + }, + "h3": { + "id": "measurements", + "title": "Measurements" + } + }, + "level": "h3", + "level_title": "Measurements" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.overview-model-training", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/overview", + "page_title": "Overview", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#model-training", + "content": "The models were trained on human intensity ratings of large-scale, experimentally controlled emotional expression data gathered using the methods described in these papers: Deep learning reveals what vocal bursts express in different cultures and Deep learning reveals what facial expressions mean to people in different cultures.\nWhile our models measure nuanced expressions that people most typically describe with emotion labels, it's important to remember that they are not a direct readout of what someone is experiencing. Sometimes, the outputs from facial and vocal models will show different emotional meanings, which is completely normal. Generally speaking, emotional experience is subjective and its expression is multimodal and context-dependent.", + "hierarchy": { + "h2": { + "id": "model-training", + "title": "Model training" + }, + "h3": { + "id": "model-training", + "title": "Model training" + } + }, + "level": "h3", + "level_title": "Model training" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.overview-try-out-the-models", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/overview", + "page_title": "Overview", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#try-out-the-models", + "content": "Learn how you can use the Expression Measurement API through both REST and WebSockets.\n\n\n\n\nUse REST endpoints to process batches of videos, images, text, or audio files.\n\n\nUse WebSocket endpoints when you need real-time predictions, such as processing a webcam or microphone stream.\nREST and WebSocket endpoints provide access to all of the same Hume models, but with different speed and scale tradeoffs. All models share a common response format, which associates a score with each detected expression. Scores indicate the degree to which a human rater would assign an expression to a given sample of video, text or audio.", + "hierarchy": { + "h2": { + "id": "try-out-the-models", + "title": "Try out the models" + } + }, + "level": "h2", + "level_title": "Try out the models" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.overview-specific-expressions-by-modality", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/overview", + "page_title": "Overview", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#specific-expressions-by-modality", + "content": "Our models measure 53 expressions identified through the subtleties of emotional language and 48 expressions discerned from facial cues, vocal bursts, and speech prosody.\nExpression Language Face/Burst/Prosody \nAdmiration \n\n \n\n \nAdoration \n\n \n\n \nAesthetic Appreciation \n\n \n\n \nAmusement \n\n \n\n \nAnger \n\n \n\n \nAnnoyance \n\n \nAnxiety \n\n \n\n \nAwe \n\n \n\n \nAwkwardness \n\n \n\n \nBoredom \n\n \n\n \nCalmness \n\n \n\n \nConcentration \n\n \n\n \nConfusion \n\n \n\n \nContemplation \n\n \n\n \nContempt \n\n \nContentment \n\n \n\n \nCraving \n\n \n\n \nDesire \n\n \n\n \nDetermination \n\n \n\n \nDisappointment \n\n \n\n \nDisapproval \n\n \nDisgust \n\n \n\n \nDistress \n\n \n\n \nDoubt \n\n \n\n \nEcstasy \n\n \nEmbarrassment \n\n \n\n \nEmpathic Pain \n\n \n\n \nEnthusiasm \n\n \nEntrancement \n\n \n\n \nEnvy \n\n \n\n \nExcitement \n\n \n\n \nFear \n\n \n\n \nGratitude \n\n \nGuilt \n\n \n\n \nHorror \n\n \n\n \nInterest \n\n \n\n \nJoy \n\n \n\n \nLove \n\n \n\n \nNostalgia \n\n \n\n \nPain \n\n \n\n \nPride \n\n \n\n \nRealization \n\n \n\n \nRelief \n\n \n\n \nRomance \n\n \n\n \nSadness \n\n \n\n \nSarcasm \n\n \nSatisfaction \n\n \n\n \nShame \n\n \n\n \nSurprise (negative) \n\n \n\n \nSurprise (positive) \n\n \n\n \nSympathy \n\n \n\n \nTiredness \n\n \n\n \nTriumph", + "hierarchy": { + "h2": { + "id": "specific-expressions-by-modality", + "title": "Specific expressions by modality" + } + }, + "level": "h2", + "level_title": "Specific expressions by modality" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.rest", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/rest", + "page_title": "Processing batches of media files", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "content": "Hume’s Expression Measurement API is designed to facilitate large-scale processing of files using Hume's advanced models through an asynchronous, job-based interface. This API allows developers to submit jobs for parallel processing of various files, enabling efficient handling of multiple data points simultaneously, and receiving notifications when results are available." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.rest-key-features", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/rest", + "page_title": "Processing batches of media files", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#key-features", + "content": "Asynchronous job submission: Jobs can be submitted to process a wide array of files in parallel, making it ideal for applications that require the analysis of large volumes of data.\n\nFlexible data input options: The API supports multiple data formats, including hosted file URLs, local files directly from your system, and raw text in the form of a list of strings. This versatility ensures that you can easily integrate the API into their applications, regardless of where their data resides.", + "hierarchy": { + "h2": { + "id": "key-features", + "title": "Key features" + } + }, + "level": "h2", + "level_title": "Key features" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.rest-applications-and-use-cases", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/rest", + "page_title": "Processing batches of media files", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#applications-and-use-cases", + "content": "Hume’s Expression Measurement API is particularly useful for leveraging Hume's expressive models across a broad spectrum of files and formats. Whether it's for processing large datasets for research, analyzing customer feedback across multiple channels, or enriching user experiences in media-rich applications, REST provides a robust solution for asynchronously handling complex, data-intensive tasks.", + "hierarchy": { + "h2": { + "id": "applications-and-use-cases", + "title": "Applications and use cases" + } + }, + "level": "h2", + "level_title": "Applications and use cases" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.rest-using-humes-expression-measurement-api", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/rest", + "page_title": "Processing batches of media files", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#using-humes-expression-measurement-api", + "content": "Here we'll show you how to upload your own files and run Hume models on batches of data.\nIf you haven't already, grab your API Key.\n\n\nMaking a request to the API\nStart a new job with the Expression Measurement API.\n\n\n\n\n\n\nTo do the same with a local file:\n\n\n\n\n\n\nSample files for you to use in this tutorial are available here:\nDownload faces.zip\nDownload david_hume.jpeg\nChecking job status\n\n\nUse webhooks to asynchronously receive notifications once the job completes.\nIt is not recommended to poll the API periodically for job status.\nThere are several ways to get notified and check the status of your job.\nUsing the Get job details API endpoint.\n\nProviding a callback URL. We will send a POST request to your URL when the job is complete. Your request body should look like this: { \"callback_url\": \"\" }\n\n\n\n\nRetrieving predictions\nYour predictions are available in a few formats.\nTo get predictions as JSON use the Get job predictions endpoint.\n\n\n\n\n\n\nTo get predictions as a compressed file of CSVs, one per model use the Get job artifacts endpoint.", + "code_snippets": [ + { + "lang": "bash", + "code": "curl https://api.hume.ai/v0/batch/jobs \\\n --request POST \\\n --header \"Content-Type: application/json\" \\\n --header \"X-Hume-Api-Key: \" \\\n --data '{\n \"models\": {\n \"face\": {}\n },\n \"urls\": [\n \"https://hume-tutorials.s3.amazonaws.com/faces.zip\"\n ]\n}'" + }, + { + "lang": "bash", + "code": "curl https://api.hume.ai/v0/batch/jobs \\\n --request POST \\\n --header \"Content-Type: application/json\" \\\n --header \"X-Hume-Api-Key: \" \\\n --data '{\n \"models\": {\n \"face\": {}\n },\n \"urls\": [\n \"https://hume-tutorials.s3.amazonaws.com/faces.zip\"\n ]\n}'" + }, + { + "lang": "python", + "code": "from hume import HumeBatchClient\nfrom hume.models.config import FaceConfig\n\nclient = HumeBatchClient(\"\")\nfilepaths = [\n \"faces.zip\",\n \"david_hume.jpeg\",\n]\nconfig = FaceConfig()\njob = client.submit_job(None, [config], files=filepaths)\n\nprint(job)\nprint(\"Running...\")\n\ndetails = job.await_complete()\njob.download_predictions(\"predictions.json\")\nprint(\"Predictions downloaded to predictions.json\")" + }, + { + "lang": "python", + "code": "from hume import HumeBatchClient\nfrom hume.models.config import FaceConfig\n\nclient = HumeBatchClient(\"\")\nfilepaths = [\n \"faces.zip\",\n \"david_hume.jpeg\",\n]\nconfig = FaceConfig()\njob = client.submit_job(None, [config], files=filepaths)\n\nprint(job)\nprint(\"Running...\")\n\ndetails = job.await_complete()\njob.download_predictions(\"predictions.json\")\nprint(\"Predictions downloaded to predictions.json\")" + }, + { + "lang": "bash", + "code": "curl https://api.hume.ai/v0/batch/jobs \\\n --request POST \\\n --header \"Content-Type: multipart/form-data\" \\\n --header \"X-Hume-Api-Key: \" \\\n --form json='{\n \"models\": {\n \"face\": {}\n }\n }' \\\n --form file=@faces.zip \\\n --form file=@david_hume.jpeg" + }, + { + "lang": "bash", + "code": "curl https://api.hume.ai/v0/batch/jobs \\\n --request POST \\\n --header \"Content-Type: multipart/form-data\" \\\n --header \"X-Hume-Api-Key: \" \\\n --form json='{\n \"models\": {\n \"face\": {}\n }\n }' \\\n --form file=@faces.zip \\\n --form file=@david_hume.jpeg" + }, + { + "lang": "python", + "code": "from hume import HumeBatchClient\nfrom hume.models.config import FaceConfig\n\nclient = HumeBatchClient(\"\")\nfilepaths = [\n \"faces.zip\",\n \"david_hume.jpeg\",\n]\nconfig = FaceConfig()\njob = client.submit_job(None, [config], files=filepaths)\n\nprint(job)\nprint(\"Running...\")\n\ndetails = job.await_complete()\njob.download_predictions(\"predictions.json\")\nprint(\"Predictions downloaded to predictions.json\")" + }, + { + "lang": "python", + "code": "from hume import HumeBatchClient\nfrom hume.models.config import FaceConfig\n\nclient = HumeBatchClient(\"\")\nfilepaths = [\n \"faces.zip\",\n \"david_hume.jpeg\",\n]\nconfig = FaceConfig()\njob = client.submit_job(None, [config], files=filepaths)\n\nprint(job)\nprint(\"Running...\")\n\ndetails = job.await_complete()\njob.download_predictions(\"predictions.json\")\nprint(\"Predictions downloaded to predictions.json\")" + }, + { + "lang": "json", + "code": "{\n job_id: \"Job ID\",\n status: \"STATUS (COMPLETED/FAILED)\",\n predictions: [ARRAY OF RESULTS]\n}" + }, + { + "lang": "json", + "code": "{\n job_id: \"Job ID\",\n status: \"STATUS (COMPLETED/FAILED)\",\n predictions: [ARRAY OF RESULTS]\n}" + }, + { + "lang": "bash", + "code": "curl --request GET \\\n --url https://api.hume.ai/v0/batch/jobs//predictions \\\n --header 'X-Hume-Api-Key: ' \\\n --header 'accept: application/json; charset=utf-8'" + }, + { + "lang": "bash", + "code": "curl --request GET \\\n --url https://api.hume.ai/v0/batch/jobs//predictions \\\n --header 'X-Hume-Api-Key: ' \\\n --header 'accept: application/json; charset=utf-8'" + }, + { + "lang": "python", + "code": "job.get_predictions()\n\nor\n\njob.download_predictions(\"filename.json\")" + }, + { + "lang": "python", + "code": "job.get_predictions()\n\nor\n\njob.download_predictions(\"filename.json\")" + }, + { + "lang": "bash", + "code": "curl --request GET \\\n --url https://api.hume.ai/v0/batch/jobs//artifacts \\\n --header 'X-Hume-Api-Key: ' \\\n --header 'accept: application/octet-stream'" + }, + { + "lang": "bash", + "code": "curl --request GET \\\n --url https://api.hume.ai/v0/batch/jobs//artifacts \\\n --header 'X-Hume-Api-Key: ' \\\n --header 'accept: application/octet-stream'" + }, + { + "lang": "python", + "code": "job.download_artifacts(\"filename.zip\")" + }, + { + "lang": "python", + "code": "job.download_artifacts(\"filename.zip\")" + } + ], + "hierarchy": { + "h2": { + "id": "using-humes-expression-measurement-api", + "title": "Using Hume’s Expression Measurement API" + } + }, + "level": "h2", + "level_title": "Using Hume’s Expression Measurement API" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.rest-api-limits", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/rest", + "page_title": "Processing batches of media files", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#api-limits", + "content": "The size of any individual file provided by URL cannot exceed 1 GB.\n\nThe size of any individual local file cannot exceed 100 MB.\n\nEach request has an upper limit of 100 URLs, 100 strings (raw text), and 100 local media files. Can be a mix of the media files or archives (.zip, .tar.gz, .tar.bz2, .tar.xz).\n\nFor audio and video files the max length supported is 1 Hour.", + "hierarchy": { + "h2": { + "id": "api-limits", + "title": "API limits" + }, + "h3": { + "id": "api-limits", + "title": "API limits" + } + }, + "level": "h3", + "level_title": "API limits" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.rest-providing-urls-and-files", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/rest", + "page_title": "Processing batches of media files", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#providing-urls-and-files", + "content": "You can provide data for your job in one of the following formats: hosted file URLs, local files, or raw text presented as a list of strings.\nIn this tutorial, the data is publicly available to download. For added security, you may choose to create a signed URL through your preferred cloud storage provider.\nCloud Provider Signing URLs \nGCP https://cloud.google.com/storage/docs/access-control/signed-urls \nAWS https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-signed-urls.html \nAzure https://learn.microsoft.com/en-us/azure/storage/common/storage-sas-overview", + "hierarchy": { + "h2": { + "id": "providing-urls-and-files", + "title": "Providing URLs and files" + }, + "h3": { + "id": "providing-urls-and-files", + "title": "Providing URLs and files" + } + }, + "level": "h3", + "level_title": "Providing URLs and files" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.websocket", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/websocket", + "page_title": "Real-time measurement streaming", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "content": "WebSocket-based streaming facilitates continuous data flow between your application and Hume's models, providing immediate feedback and insights." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.websocket-key-features", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/websocket", + "page_title": "Real-time measurement streaming", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#key-features", + "content": "Real-time data processing: Leveraging WebSockets, this API allows for the streaming of data to Hume's models, enabling instant analysis and response. This feature is particularly beneficial for applications requiring immediate processing, such as live interaction systems or real-time monitoring tools.\n\nPersistent, two-way communication: Unlike traditional request-response models, the WebSocket-based streaming maintains an open connection for two-way communication between the client and server. This facilitates an ongoing exchange of data, allowing for a more interactive and responsive user experience.\n\nHigh throughput and low latency: The API is optimized for high performance, supporting high-volume data streaming with minimal delay. This ensures that applications can handle large streams of data efficiently, without sacrificing speed or responsiveness.", + "hierarchy": { + "h2": { + "id": "key-features", + "title": "Key features" + } + }, + "level": "h2", + "level_title": "Key features" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.websocket-applications-and-use-cases", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/websocket", + "page_title": "Real-time measurement streaming", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#applications-and-use-cases", + "content": "WebSockets are ideal for a wide range of applications that benefit from real-time data analysis and interaction. Examples include:\nLive customer service tools: enhance customer support with real-time sentiment analysis and automated, emotionally intelligent responses\n\nInteractive educational platforms: provide immediate feedback and adaptive learning experiences based on real-time student input\n\nHealth and wellness apps: support live mental health and wellness monitoring, offering instant therapeutic feedback or alerts based on the user's vocal or textual expressions\n\nEntertainment and gaming: create more immersive and interactive experiences by responding to user inputs and emotions in real time", + "hierarchy": { + "h2": { + "id": "applications-and-use-cases", + "title": "Applications and use cases" + } + }, + "level": "h2", + "level_title": "Applications and use cases" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.websocket-getting-started-with-websocket-streaming", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/websocket", + "page_title": "Real-time measurement streaming", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#getting-started-with-websocket-streaming", + "content": "Integrating WebSocket-based streaming into your application involves establishing a WebSocket connection with Hume AI's servers and streaming data directly to the models for processing.\nStreaming is built for analysis of audio, video, and text streams. By connecting to WebSocket endpoints you can get near real-time feedback on the expressive and emotional content of your data.", + "hierarchy": { + "h2": { + "id": "getting-started-with-websocket-streaming", + "title": "Getting started with WebSocket streaming" + } + }, + "level": "h2", + "level_title": "Getting started with WebSocket streaming" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.websocket-install-the-hume-python-sdk", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/websocket", + "page_title": "Real-time measurement streaming", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#install-the-hume-python-sdk", + "content": "Make sure to enable the optional stream feature when installing the Hume Python SDK.\n\n\n\n\nbash pip install \"hume[stream]\"", + "hierarchy": { + "h2": { + "id": "install-the-hume-python-sdk", + "title": "Install the Hume Python SDK" + }, + "h3": { + "id": "install-the-hume-python-sdk", + "title": "Install the Hume Python SDK" + } + }, + "level": "h3", + "level_title": "Install the Hume Python SDK" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.websocket-emotional-language-from-text", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/websocket", + "page_title": "Real-time measurement streaming", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#emotional-language-from-text", + "content": "This example uses our Emotional Language model to perform sentiment analysis on a children's nursery rhyme.\nIf you haven't already, grab your API key.\n\n\nYour result should look something like this:", + "code_snippets": [ + { + "lang": "python", + "code": "import asyncio\nfrom hume import HumeStreamClient\nfrom hume.models.config import LanguageConfig\n\nsamples = [\n \"Mary had a little lamb,\",\n \"Its fleece was white as snow.\"\n \"Everywhere the child went,\"\n \"The little lamb was sure to go.\"\n]\n\nasync def main():\n client = HumeStreamClient(\"\")\n config = LanguageConfig()\n async with client.connect([config]) as socket:\n for sample in samples:\n result = await socket.send_text(sample)\n emotions = result[\"language\"][\"predictions\"][0][\"emotions\"]\n print(emotions)\n\nasyncio.run(main())" + }, + { + "lang": "python", + "code": "import asyncio\nfrom hume import HumeStreamClient\nfrom hume.models.config import LanguageConfig\n\nsamples = [\n \"Mary had a little lamb,\",\n \"Its fleece was white as snow.\"\n \"Everywhere the child went,\"\n \"The little lamb was sure to go.\"\n]\n\nasync def main():\n client = HumeStreamClient(\"\")\n config = LanguageConfig()\n async with client.connect([config]) as socket:\n for sample in samples:\n result = await socket.send_text(sample)\n emotions = result[\"language\"][\"predictions\"][0][\"emotions\"]\n print(emotions)\n\nasyncio.run(main())" + }, + { + "lang": "python", + "code": "[\n {'name': 'Admiration', 'score': 0.06379243731498718},\n {'name': 'Adoration', 'score': 0.07222934812307358},\n {'name': 'Aesthetic Appreciation', 'score': 0.02808445133268833},\n {'name': 'Amusement', 'score': 0.027589013800024986},\n ......\n {'name': 'Surprise (positive)', 'score': 0.030542362481355667},\n {'name': 'Sympathy', 'score': 0.03246130049228668},\n {'name': 'Tiredness', 'score': 0.03606246039271355},\n {'name': 'Triumph', 'score': 0.01235896535217762}\n]" + }, + { + "lang": "python", + "code": "[\n {'name': 'Admiration', 'score': 0.06379243731498718},\n {'name': 'Adoration', 'score': 0.07222934812307358},\n {'name': 'Aesthetic Appreciation', 'score': 0.02808445133268833},\n {'name': 'Amusement', 'score': 0.027589013800024986},\n ......\n {'name': 'Surprise (positive)', 'score': 0.030542362481355667},\n {'name': 'Sympathy', 'score': 0.03246130049228668},\n {'name': 'Tiredness', 'score': 0.03606246039271355},\n {'name': 'Triumph', 'score': 0.01235896535217762}\n]" + } + ], + "hierarchy": { + "h2": { + "id": "emotional-language-from-text", + "title": "Emotional language from text" + }, + "h3": { + "id": "emotional-language-from-text", + "title": "Emotional language from text" + } + }, + "level": "h3", + "level_title": "Emotional language from text" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.websocket-facial-expressions-from-an-image", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/websocket", + "page_title": "Real-time measurement streaming", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#facial-expressions-from-an-image", + "content": "This example uses our Facial Expression model to get expression measurements from an image.", + "code_snippets": [ + { + "lang": "python", + "code": "import asyncio\n\nfrom hume import HumeStreamClient, StreamSocket\nfrom hume.models.config import FaceConfig\n\nasync def main():\nclient = HumeStreamClient(\"\")\nconfig = FaceConfig(identify_faces=True)\nasync with client.connect([config]) as socket:\nresult = await socket.send_file(\"\")\nprint(result)\n\nasyncio.run(main())\n" + }, + { + "lang": "python", + "code": "import asyncio\n\nfrom hume import HumeStreamClient, StreamSocket\nfrom hume.models.config import FaceConfig\n\nasync def main():\nclient = HumeStreamClient(\"\")\nconfig = FaceConfig(identify_faces=True)\nasync with client.connect([config]) as socket:\nresult = await socket.send_file(\"\")\nprint(result)\n\nasyncio.run(main())\n" + } + ], + "hierarchy": { + "h2": { + "id": "facial-expressions-from-an-image", + "title": "Facial expressions from an image" + }, + "h3": { + "id": "facial-expressions-from-an-image", + "title": "Facial expressions from an image" + } + }, + "level": "h3", + "level_title": "Facial expressions from an image" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.websocket-speech-prosody-from-an-audio-or-video-file", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/websocket", + "page_title": "Real-time measurement streaming", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#speech-prosody-from-an-audio-or-video-file", + "content": "This example uses our Speech Prosody model to get expression measurements from an audio or video file.", + "code_snippets": [ + { + "lang": "python", + "code": "import asyncio\n\nfrom hume import HumeStreamClient, StreamSocket\nfrom hume.models.config import ProsodyConfig\n\nasync def main():\n client = HumeStreamClient(\"\")\n config = ProsodyConfig()\n async with client.connect([config]) as socket:\n result = await socket.send_file(\"\")\n print(result)\n\nasyncio.run(main())" + }, + { + "lang": "python", + "code": "import asyncio\n\nfrom hume import HumeStreamClient, StreamSocket\nfrom hume.models.config import ProsodyConfig\n\nasync def main():\n client = HumeStreamClient(\"\")\n config = ProsodyConfig()\n async with client.connect([config]) as socket:\n result = await socket.send_file(\"\")\n print(result)\n\nasyncio.run(main())" + } + ], + "hierarchy": { + "h2": { + "id": "speech-prosody-from-an-audio-or-video-file", + "title": "Speech prosody from an audio or video file" + }, + "h3": { + "id": "speech-prosody-from-an-audio-or-video-file", + "title": "Speech prosody from an audio or video file" + } + }, + "level": "h3", + "level_title": "Speech prosody from an audio or video file" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.websocket-streaming-with-your-own-websockets-client", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/websocket", + "page_title": "Real-time measurement streaming", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#streaming-with-your-own-websockets-client", + "content": "To call the API from your own WebSockets client you'll need the API endpoint, a JSON message, and an API key header/param. More information can be found in the Expression Measurement API reference.\nTo get started, you can use a WebSocket client of your choice to connect to the models endpoint:\n\n\nurl wss://api.hume.ai/v0/stream/models Make sure you configure the socket connection headers with your personal API key\n\n\n\n\nThe default WebSockets implementation in your browser may not have support for headers. If that's the case you can set\nthe apiKey query parameter.\nAnd finally, send the following JSON message on the socket:\n\n\nYou should receive a JSON response that looks something like this:", + "code_snippets": [ + { + "lang": "http", + "code": "X-Hume-Api-Key: " + }, + { + "lang": "http", + "code": "X-Hume-Api-Key: " + }, + { + "lang": "json", + "code": "{\n \"models\": {\n \"language\": {}\n },\n \"raw_text\": true,\n \"data\": \"Mary had a little lamb\"\n}" + }, + { + "lang": "json", + "code": "{\n \"models\": {\n \"language\": {}\n },\n \"raw_text\": true,\n \"data\": \"Mary had a little lamb\"\n}" + }, + { + "lang": "json", + "code": "{\n \"language\": {\n \"predictions\": [\n {\n \"text\": \"Mary\",\n \"position\": { \"begin\": 0, \"end\": 4 },\n \"emotions\": [\n { \"name\": \"Anger\", \"score\": 0.012025930918753147 },\n { \"name\": \"Joy\", \"score\": 0.056471485644578934 },\n { \"name\": \"Sadness\", \"score\": 0.031556881964206696 },\n ]\n },\n {\n \"text\": \"had\",\n \"position\": { \"begin\": 5, \"end\": 8 },\n \"emotions\": [\n { \"name\": \"Anger\", \"score\": 0.0016927534015849233 },\n { \"name\": \"Joy\", \"score\": 0.02388327568769455 },\n { \"name\": \"Sadness\", \"score\": 0.018137391656637192 },\n ...\n ]\n },\n ...\n ]\n }\n}" + }, + { + "lang": "json", + "code": "{\n \"language\": {\n \"predictions\": [\n {\n \"text\": \"Mary\",\n \"position\": { \"begin\": 0, \"end\": 4 },\n \"emotions\": [\n { \"name\": \"Anger\", \"score\": 0.012025930918753147 },\n { \"name\": \"Joy\", \"score\": 0.056471485644578934 },\n { \"name\": \"Sadness\", \"score\": 0.031556881964206696 },\n ]\n },\n {\n \"text\": \"had\",\n \"position\": { \"begin\": 5, \"end\": 8 },\n \"emotions\": [\n { \"name\": \"Anger\", \"score\": 0.0016927534015849233 },\n { \"name\": \"Joy\", \"score\": 0.02388327568769455 },\n { \"name\": \"Sadness\", \"score\": 0.018137391656637192 },\n ...\n ]\n },\n ...\n ]\n }\n}" + } + ], + "hierarchy": { + "h2": { + "id": "streaming-with-your-own-websockets-client", + "title": "Streaming with your own WebSockets client" + } + }, + "level": "h2", + "level_title": "Streaming with your own WebSockets client" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.websocket-sending-images-or-audio", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/websocket", + "page_title": "Real-time measurement streaming", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#sending-images-or-audio", + "content": "The WebSocket endpoints of the Expression Measurement API require that you encode your media using base64. Here's a quick example of base64 encoding data in Python:", + "code_snippets": [ + { + "lang": "python", + "code": "import base64\nfrom pathlib import Path\n\ndef encode_data(filepath: Path) -> str:\nwith Path(filepath).open('rb') as fp:\nbytes_data = base64.b64encode(fp.read())\nencoded_data = bytes_data.decode(\"utf-8\")\nreturn encoded_data\n\nfilepath = \"\"\nencoded_data = encode_data(filepath)\nprint(encoded_data)\n" + }, + { + "lang": "python", + "code": "import base64\nfrom pathlib import Path\n\ndef encode_data(filepath: Path) -> str:\nwith Path(filepath).open('rb') as fp:\nbytes_data = base64.b64encode(fp.read())\nencoded_data = bytes_data.decode(\"utf-8\")\nreturn encoded_data\n\nfilepath = \"\"\nencoded_data = encode_data(filepath)\nprint(encoded_data)\n" + } + ], + "hierarchy": { + "h2": { + "id": "sending-images-or-audio", + "title": "Sending images or audio" + }, + "h3": { + "id": "sending-images-or-audio", + "title": "Sending images or audio" + } + }, + "level": "h3", + "level_title": "Sending images or audio" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.websocket-faqs", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/websocket", + "page_title": "Real-time measurement streaming", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#faqs", + "content": "WebSockets are a communication protocol that enables real-time, two-way communication between a client and a server\nover a single, long-lived connection. They provide a persistent connection that allows both the client and the server\nto initiate communication at any time.\n\n\nStreaming will disconnect every minute to ensure unused connections are released. You will need to reconnect by\nbuilding reconnect logic into your application. Implementation of reconnect logic will depend on the language and\nframework of your client application.\n\n\nWebSocket connections can experience disruptions due to network issues or other factors. Implement error handling\nmechanisms to gracefully handle connection failures. This includes handling connection timeouts, connection drops, and\nintermittent connection issues. Implement reconnection logic to automatically attempt to reconnect and resume\ncommunication when a connection is lost.\n\n\nHume WebSockets endpoints can return errors in response to invalid requests, authentication failures, or other issues.\nImplement proper error handling to interpret and handle these errors in your application. Provide meaningful error\nmessages to users and handle any exceptional scenarios gracefully. To prevent unknowingly initiating too many errors\nwe have put a limit on how many of the same errors you can have in a row. For a full list of the error responses you\ncan expect, please see our API errors page.\n\n\nThe benefits of using a the WebSocket is the persistent connection. The open socket should be kept open until the\napplication is done utilizing the service and then closed. Avoid opening a new connection for each file or payload you\nsend to the API. To ensure that context does not leak across multiple unrelated files you can use the\nreset_stream parameter.", + "hierarchy": { + "h2": { + "id": "faqs", + "title": "FAQs" + } + }, + "level": "h2", + "level_title": "FAQs" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.expression-measurement-api.faq", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/expression-measurement-api/faq", + "page_title": "Expression Measurement API FAQ", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/docs/expression-measurement-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "content": "Our models capture the widest-ever range of facial, speech, vocal, and language modulations with distinct emotional meanings. We label each of their outputs with emotion terms like “amusement” and “doubt,” not because they always correspond to those emotional experiences (they must not, given that they often differ from one modality to another), but because scientific studies show that these kinds of labels are the most precise language we have for describing expressions.\nOur models generate JSON or CSV output files with values typically ranging from 0 to 1 for each output in different segments of the input file (though values out of the 0-1 range are possible). Higher values indicate greater intensity of facial movements or vocal modulations that are most strongly associated with the emotion label corresponding to the output.\nA given expression will contain a blend of various emotions, and our models identify features that are associated with each emotional dimension. The score for each dimension is proportional to the likelihood that a human would perceive that emotion in the expression.\nSpecifically, the scores reflect the likelihood that an average human perceiver would use that emotion dimension to describe a given expression. The models were trained on human intensity ratings gathered using the methods described in this paper: Deep learning reveals what vocal bursts express in different cultures.\nWhile our models measure nuanced expressions that people most typically describe with emotion labels, it's important to remember that they are not a direct readout of what someone is experiencing. Emotional experience is subjective and its expression is multimodal and context-dependent. Moreover, at any given time, our facial expression outputs might be quite different than our vocal expression outputs. Therefore, it's important to follow best practices when interpreting outputs.\n\n\nThere are many different ways to use our platform. That said, successful research and applications of our models generally follow four steps: exploration, prediction, improvement, and testing.\nExploration: Researchers and developers generally begin by exploring patterns in their data.\n\n\nAre there apparent differences across participants or users in a study?\n\nDo patterns in expression vary systematically over time?\n\nAre there different patterns in expression associated with different stages of research or different product experiences?\n\n\nPrediction: A great way to evaluate and start building on our APIs is to use them to predict metrics that you already know are important.\n\n\nAre key outcomes like mental health or customer satisfaction better predicted by language and expression than by language alone?\n\nIf patterns in expression predict important outcomes, how do these patterns in expression vary over time and reveal critical moments for a user or participant?\n\n\nImprovement: The goal is often to use measures of expression to directly improve how the application works.\n\n\nSometimes, being able to predict an important metric is enough to make a decision. For example, if you can predict whether two people will get along based on their expressions and language, then your application can pair them up.\n\nMore formally, you can apply statistics or machine learning to the data you gather to improve how the application works.\n\nYou can incorporate our API outputs into an out-of-the-box large language model, simply by converting them into text (e.g., \"The user sounds calm but a little frustrated\") and feeding them in as prompts.\n\nYou can use expressions to teach an AI model. For example, if your application involves a large language model, such as an AI tutor, you can use measures of expression that predict student performance and well-being to directly fine-tune the AI to improve over time.\n\n\nTesting: After you've incorporated measures of expression into your application, they can be part of every A/B test you perform. You can now monitor the effects of changes to your application not just on engagement and retention, but also on how much users laugh or sigh in frustration, or show signs of interest or boredom.\n\n\n\n\nAs you build expression-related signals, metrics, analyses, models, or\nfeedback into an application, remember to use scientific best\npractices and\nfollow the ethics guidelines of\nthehumeinitiative.org.\n\n\nOur speech prosody model measures the tune, rhythm, and timbre of speech, whereas our language model measures the tone of the words being spoken. When using either model, we offer the flexibility to annotate emotional expressions at several levels of granularity, ranging from individual words to entire conversational turns. It is important to note that independent of granularity, our language model still takes into account up to 50 previous tokens (word or sub-words) of speech; otherwise, it would not be able to capture how the meaning of the words is affected by context.\nWord: At the word level, our model provides a separate output for each word, offering the most granular insight into emotional expression during speech.\nSentence: At the sentence level of granularity, we annotate the emotional tone of each spoken sentence with our prosody and language models.\nUtterance: Utterance-level granularity is between word- and sentence-level. It takes into account natural pauses or breaks in speech, providing more rapidly updated measures of emotional expression within a flowing conversation. For text inputs, utterance-level granularity will produce results identical to sentence-level granularity.\nConversational Turn: Conversational turn-level analysis is a lower level of granularity. It outputs a single output for each turn; that is, the full sequence of words and sentences spoken uninterrupted by each person. This approach provides a higher-level view of the emotional dynamics in a multi-participant dialogue. For text inputs, specifying conversational turn-level granularity for our Language model will produce results for entire passage.\n\n\nRemember, each level of granularity has its unique advantages, and choosing\nthe right one depends on the requirements of your specific application.\n\n\nState-of-the-art face detection and identification algorithms still occasionally make errors. For instance, our algorithm sometimes detects faces in shadows or reflections. Other times, our algorithm falsely attributes a new identity to someone who has already been in the video, sometimes due to changes in lighting or occlusion. These errors can result in additional face IDs. We are still working to fine-tune our algorithm to minimize errors in the contexts that our customers care about.\n\n\nOur vocal burst model detects vocalizations such as laughs, screams, sighs, gasps, “mms,” “uhs,” and “mhms.” Natural speech generally contains a few vocal bursts every minute, but scripted speech has fewer vocal bursts. If no vocal bursts are detected, it may be because there are no vocal bursts in the file. However, if you hear vocal bursts that aren't being detected by the algorithm, note that we are also in the process of improving our vocal burst detection algorithm, so please stay tuned for updates.\n\n\nWe've documented this issue thoroughly in our API errors page.\n\n\nYou can specify any of the following:\nzh, da, nl, en, en-AU, en-IN, en-NZ, en-GB, fr, fr-CA, de, hi, hi-Latn, id, it, ja, ko, no, pl, pt, pt-BR, pt-PT, ru, es, es-419, sv, ta, tr, or uk.\n\n\nWe support over 50 languages. Among these, 20 languages have additional support for transcription.\nLanguage Tag Language Text Transcription \nar Arabic \n\n \nbg Bulgarian \n\n \nca Catalan \n\n \ncs Czech \n\n \nda Danish \n\n \n\n \nde German \n\n \n\n \nel Greek \n\n \nen English* \n\n \n\n \nes Spanish \n\n \n\n \net Estonian \n\n \nfa Farsi \n\n \nfi Finnish \n\n \nfr French \n\n \n\n \nfr-ca French (Canada) \n\n \n\n \ngl Galician \n\n \ngu Gujarati \n\n \nhe Hebrew \n\n \nhi Hindi \n\n \n\n \nhr Croatian \n\n \nhu Hungarian \n\n \nhy Armenian \n\n \nid Indonesian \n\n \n\n \nit Italian \n\n \n\n \nja Japanese \n\n \n\n \nka Georgian \n\n \nko Korean \n\n \n\n \nku Kurdish \n\n \nlt Lithuanian \n\n \nlv Latvian \n\n \nmk FYRO Macedonian \n\n \nmn Mongolian \n\n \nmr Marathi \n\n \nms Malay \n\n \nmy Burmese \n\n \nnb Norwegian (Bokmål) \n\n \nnl Dutch \n\n \n\n \npl Polish \n\n \n\n \npt Portuguese \n\n \n\n \npt-br Portuguese (Brazil) \n\n \n\n \nro Romanian \n\n \nru Russian \n\n \n\n \nsk Slovak \n\n \nsl Slovenian \n\n \nsq Albanian \n\n \nsr Serbian \n\n \nsv Swedish \n\n \n\n \nth Thai \n\n \ntr Turkish \n\n \n\n \nuk Ukrainian \n\n \n\n \nur Urdu \n\n \nvi Vietnamese \n\n \nzh-cn Chinese \n\n \n\n \nzh-tw Chinese (Taiwan) \n\n \n\n \n\n\n\nEnglish is a primary language, and will yield more accurate predictions than\ninputs in other supported languages. Currently, our NER model only supports\nthe English language." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.custom-models-api.overview", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/custom-models-api/overview", + "page_title": "Custom Models API", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/docs/custom-models-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "description": "Predict preferences more accurately than any LLM.", + "content": "Combined with words, expressions provide a wealth of information about our state of mind in any given context like customer satisfaction or frustration, patient health and well-being, student comprehension and confusion, and so much more.\nHume’s Custom Models API unlocks these insights at the click of a button, integrating patterns of facial expression, vocal expression, and language into a single custom model to predict whatever outcome you specify. This works by taking advantage not only of our state-of-the-art expression AI models, but also specialized language-expression embeddings that we have trained on conversational data.\nThe algorithm that drives our Custom Models API is pretrained on huge volumes of data. That means it already recognizes most patterns of expression and language that people form. All you have to do is add your labels.\nYou can access our Custom Models API through our no code platform detailed in the next section or through our API. Once you create your initial labeled dataset, your labels will be used to train a custom model that you own and only your account can access. You’ll be able to run the model on any new file through our Playground and Custom Models API. You’ll also get statistics on the accuracy of your custom model." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.custom-models-api.creating-your-dataset", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/custom-models-api/creating-your-dataset", + "page_title": "Creating your dataset", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/docs/custom-models-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "content": "In this guide we'll walk you through the process of creating a dataset using the Hume API. In future sections you'll use a dataset to train your own model.\n\n\nUploading media files to Hume\nUpload media files to Hume that you want to exist in your custom dataset. These should be images, videos, audio, or text files.\n\n\nThe API response will show an array of files newly registered with the Hume.\n\n\nMaking your dataset file\nWe will create a CSV file that has a column for media file IDs and another column for labels.\nThe file ID column is required and must be named file_id. The label column can be named whatever you want. And you can even have multiple label columns, but only one will be used for training your model.\nHere we'll add a label column called expressions and an extra column just for housekeeping called file_name.\nfile_name file_id expressions \nneutral_face.jpeg b3cd5662-ea89-4f00-8eae-86218a556027 Neutral \npositive_face.jpeg 44bc2ac8-41d5-401e-8c88-df179b993be7 Positive \n\nRegistering your dataset\nNow that we have our media files registered and a CSV associating those files with labels, we can register our dataset.\n\n\nSuccess! Your dataset is registered.", + "code_snippets": [ + { + "lang": "bash", + "code": "curl --location 'https://api.hume.ai/v0/registry/files'\n--request POST\n--header 'X-Hume-Api-Key: '\n--header 'Content-Type: application/json'\n--data '[\n {\n \"file\": {\n \"name\": \"\",\n \"uri\": \"\",\n \"hume_storage\": true,\n \"data_type\": \"image/png\",\n \"metadata\": {}\n }\n },\n {\n \"file\": {\n \"name\": \"\",\n \"uri\": \"\",\n \"hume_storage\": true,\n \"data_type\": \"image/png\",\n \"metadata\": {}\n }\n }\n]'" + }, + { + "lang": "bash", + "code": "curl --location 'https://api.hume.ai/v0/registry/files'\n--request POST\n--header 'X-Hume-Api-Key: '\n--header 'Content-Type: application/json'\n--data '[\n {\n \"file\": {\n \"name\": \"\",\n \"uri\": \"\",\n \"hume_storage\": true,\n \"data_type\": \"image/png\",\n \"metadata\": {}\n }\n },\n {\n \"file\": {\n \"name\": \"\",\n \"uri\": \"\",\n \"hume_storage\": true,\n \"data_type\": \"image/png\",\n \"metadata\": {}\n }\n }\n]'" + }, + { + "lang": "json", + "code": "[\n {\n \"file\": {\n \"id\": \"9f045781-3ecd-4f34-ba9c-969139c32256\",\n \"name\": \"\",\n \"uri\": \"\",\n \"upload_uri\": null,\n \"thumbnail_uri\": null,\n \"user_id\": \"\",\n \"data_type\": \"image/png\",\n \"created_on\": 1695851622,\n \"modified_on\": 1695851622,\n \"metadata\": {},\n \"hume_storage\": true,\n \"hume_storage_upload_timestamp\": null\n },\n \"attributes\": []\n },\n {\n \"file\": {\n \"id\": \"7f02f481-4sf4-dsf3-ba9c-345639c32256\",\n \"name\": \"\",\n ...\n }\n }\n]" + }, + { + "lang": "json", + "code": "[\n {\n \"file\": {\n \"id\": \"9f045781-3ecd-4f34-ba9c-969139c32256\",\n \"name\": \"\",\n \"uri\": \"\",\n \"upload_uri\": null,\n \"thumbnail_uri\": null,\n \"user_id\": \"\",\n \"data_type\": \"image/png\",\n \"created_on\": 1695851622,\n \"modified_on\": 1695851622,\n \"metadata\": {},\n \"hume_storage\": true,\n \"hume_storage_upload_timestamp\": null\n },\n \"attributes\": []\n },\n {\n \"file\": {\n \"id\": \"7f02f481-4sf4-dsf3-ba9c-345639c32256\",\n \"name\": \"\",\n ...\n }\n }\n]" + }, + { + "lang": "bash", + "code": "curl --location 'https://api.hume.ai/v0/registry/datasets'\n--request POST\n--header 'X-Hume-Api-Key: '\n--form 'name=\"Negative, Neutral, & Positive Facial Expressions\"'\n--form 'labels_file=@\"/labels-file.csv\"'" + }, + { + "lang": "bash", + "code": "curl --location 'https://api.hume.ai/v0/registry/datasets'\n--request POST\n--header 'X-Hume-Api-Key: '\n--form 'name=\"Negative, Neutral, & Positive Facial Expressions\"'\n--form 'labels_file=@\"/labels-file.csv\"'" + }, + { + "lang": "json", + "code": "{\n \"id\": \"8d6ddf39-d9ff-4f9c-9dbe-d6e288d8ddd7\", // Dataset ID\n \"name\": \"Negative, Neutral, & Positive Facial Expressions\",\n \"latest_version\": {\n \"id\": \"d153f723-8a13-48d2-ba74-2a6c333ff0db\", // Dataset Version ID\n \"labels_file_uri\": \"\",\n \"dataset_id\": \"8d6ddf39-d9ff-4f9c-9dbe-d6e288d8ddd7\",\n \"dataset_version\": 0,\n \"created_on\": 1695854279\n },\n \"modified_on\": 1695854279,\n \"metadata\": null\n}" + }, + { + "lang": "json", + "code": "{\n \"id\": \"8d6ddf39-d9ff-4f9c-9dbe-d6e288d8ddd7\", // Dataset ID\n \"name\": \"Negative, Neutral, & Positive Facial Expressions\",\n \"latest_version\": {\n \"id\": \"d153f723-8a13-48d2-ba74-2a6c333ff0db\", // Dataset Version ID\n \"labels_file_uri\": \"\",\n \"dataset_id\": \"8d6ddf39-d9ff-4f9c-9dbe-d6e288d8ddd7\",\n \"dataset_version\": 0,\n \"created_on\": 1695854279\n },\n \"modified_on\": 1695854279,\n \"metadata\": null\n}" + } + ] + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.custom-models-api.training-a-custom-model", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/custom-models-api/training-a-custom-model", + "page_title": "Training a custom model", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/docs/custom-models-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "content": "In this guide we will walk you through training your own custom model.\n\n\nStarting a training job\nHere we kick off a training job using a dataset that's already been registered for you. The resulting model will classify facial expressions as negative, positive, or neutral.\nNote that we've set target_feature to \"Affect\". This refers to the name of the column that we want to predict from our dataset.\n\n\nYou'll get back a job ID that you can use to check the status of your training job.\n\n\nChecking the status of your training job\nUsing the job ID from the previous step, you can get details about the current status of your training job.\n\n\nIt may take a few minutes for your model to be ready, but once training is complete you will see the status as COMPLETED and you'll have access to your new model.\n\n\nTesting your custom model\nYour custom model is ready to use!\nYou can test your model by sending a request to the Custom Models inference endpoint with URLs of images to classify. The model we trained is a facial expression classifier, so test URLs should point to images of faces.\n\n\nJust like before, we get back a job ID that we can use to check the status of our job.\n\n\nChecking the status of your inference job\nUse the job ID from the previous step to check on the status of your model inference job.\n\n\nOnce the model is done predicting the classes of the images you provided, you'll get a COMPLETED status.\n\n\nGetting model predictions\nFinally, you can request the actual model predictions from the inference job. The JSON result will show the predicted class for each image you provided.", + "code_snippets": [ + { + "lang": "bash", + "code": "curl --location https://api.hume.ai/v0/batch/jobs/tl/train \\\n --request POST \\\n --header \"X-Hume-Api-Key: $API_KEY\" \\\n --header 'Content-Type: application/json' \\\n --data '{\n \"custom_model\": {\n \"name\": \"Negative, Neutral, & Positive Facial Expressions\",\n \"description\": \"Is Facial Expression Negative, Neutral or Positive\"\n },\n \"dataset\": {\n \"id\": \"ef7955ce-1755-4942-8615-bc16e654e7e5\"\n },\n \"target_feature\": \"Affect\",\n \"task\": {\n \"type\": \"classification\"\n }\n}'" + }, + { + "lang": "bash", + "code": "curl --location https://api.hume.ai/v0/batch/jobs/tl/train \\\n --request POST \\\n --header \"X-Hume-Api-Key: $API_KEY\" \\\n --header 'Content-Type: application/json' \\\n --data '{\n \"custom_model\": {\n \"name\": \"Negative, Neutral, & Positive Facial Expressions\",\n \"description\": \"Is Facial Expression Negative, Neutral or Positive\"\n },\n \"dataset\": {\n \"id\": \"ef7955ce-1755-4942-8615-bc16e654e7e5\"\n },\n \"target_feature\": \"Affect\",\n \"task\": {\n \"type\": \"classification\"\n }\n}'" + }, + { + "lang": "json", + "code": "{\n \"job_id\": \"\"\n}" + }, + { + "lang": "json", + "code": "{\n \"job_id\": \"\"\n}" + }, + { + "lang": "bash", + "code": "curl --location --globoff https://api.hume.ai/v0/batch/jobs/$JOB_ID \\\n --header \"X-Hume-Api-Key: $API_KEY\"" + }, + { + "lang": "bash", + "code": "curl --location --globoff https://api.hume.ai/v0/batch/jobs/$JOB_ID \\\n --header \"X-Hume-Api-Key: $API_KEY\"" + }, + { + "lang": "json", + "code": "{\n \"type\": \"TRAINING\",\n \"job_id\": \"\",\n \"user_id\": \"\",\n \"request\": {\n \"custom_model\": {\n \"name\": \"Negative, Neutral, & Positive Facial Expressions\",\n \"description\": \"Is Facial Expression Negative, Neutral or Positive\",\n },\n \"dataset\": {\n \"id\": \"ef7955ce-1755-4942-8615-bc16e654e7e5\"\n },\n \"target_feature\": \"interaction\",\n \"task\": {\n \"type\": \"classification\"\n }\n },\n \"state\": {\n \"status\": \"COMPLETED\",\n \"created_timestamp_ms\": 42,\n \"started_timestamp_ms\": 32,\n \"ended_timestamp_ms\": 23,\n \t\t\"custom_model\": {\n\t\t\t\"id\": \"\"\n \t}\n }\n}" + }, + { + "lang": "json", + "code": "{\n \"type\": \"TRAINING\",\n \"job_id\": \"\",\n \"user_id\": \"\",\n \"request\": {\n \"custom_model\": {\n \"name\": \"Negative, Neutral, & Positive Facial Expressions\",\n \"description\": \"Is Facial Expression Negative, Neutral or Positive\",\n },\n \"dataset\": {\n \"id\": \"ef7955ce-1755-4942-8615-bc16e654e7e5\"\n },\n \"target_feature\": \"interaction\",\n \"task\": {\n \"type\": \"classification\"\n }\n },\n \"state\": {\n \"status\": \"COMPLETED\",\n \"created_timestamp_ms\": 42,\n \"started_timestamp_ms\": 32,\n \"ended_timestamp_ms\": 23,\n \t\t\"custom_model\": {\n\t\t\t\"id\": \"\"\n \t}\n }\n}" + }, + { + "lang": "bash", + "code": "curl --location https://api.hume.ai/v0/batch/jobs/tl/inference \\\n --request POST \\\n --header X-Hume-Api-Key: $API_KEY \\\n --header 'Content-Type: application/json' \\\n --data '{\n \"custom_model\": {\n \"id\": \"\"\n },\n \"urls\": [\"\"]\n }'" + }, + { + "lang": "bash", + "code": "curl --location https://api.hume.ai/v0/batch/jobs/tl/inference \\\n --request POST \\\n --header X-Hume-Api-Key: $API_KEY \\\n --header 'Content-Type: application/json' \\\n --data '{\n \"custom_model\": {\n \"id\": \"\"\n },\n \"urls\": [\"\"]\n }'" + }, + { + "lang": "json", + "code": "{\n \"job_id\": \"\"\n}" + }, + { + "lang": "json", + "code": "{\n \"job_id\": \"\"\n}" + }, + { + "lang": "bash", + "code": "curl --location --globoff https://api.hume.ai/v0/batch/jobs/$JOB_ID \\\n --header \"X-Hume-Api-Key: $API_KEY\"" + }, + { + "lang": "bash", + "code": "curl --location --globoff https://api.hume.ai/v0/batch/jobs/$JOB_ID \\\n --header \"X-Hume-Api-Key: $API_KEY\"" + }, + { + "lang": "json", + "code": "{\n \"type\": \"INFERENCE\",\n \"job_id\": \"\",\n \"user_id\": \"\",\n \"request\": {},\n \"state\": {\n \"status\": \"COMPLETED\",\n \"created_timestamp_ms\": 42\n }\n}" + }, + { + "lang": "json", + "code": "{\n \"type\": \"INFERENCE\",\n \"job_id\": \"\",\n \"user_id\": \"\",\n \"request\": {},\n \"state\": {\n \"status\": \"COMPLETED\",\n \"created_timestamp_ms\": 42\n }\n}" + }, + { + "lang": "bash", + "code": "curl --request GET \\\n --url https://api.hume.ai/v0/batch/jobs/$JOB_ID/predictions \\\n --header \"X-Hume-Api-Key: $API_KEY\" \\\n --header \"accept: application/json; charset=utf-8\"" + }, + { + "lang": "bash", + "code": "curl --request GET \\\n --url https://api.hume.ai/v0/batch/jobs/$JOB_ID/predictions \\\n --header \"X-Hume-Api-Key: $API_KEY\" \\\n --header \"accept: application/json; charset=utf-8\"" + } + ] + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.custom-models-api.evaluating-a-custom-model", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/custom-models-api/evaluating-a-custom-model", + "page_title": "Evaluating your model", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/docs/custom-models-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "content": "Each custom model you train has a corresponding details page, viewable from the Hume website. The model details page displays metrics and visualizations to evaluate your model’s performance. This document serves to help you interpret those metrics and provide guidance on ways to improve your custom model.\n\n\nCustom model details\n\n\nLimitations of model validation metrics\nModel validation metrics are estimates based on a split of your dataset into training and evaluation parts. The larger the training set, the more reliable the metrics. However, it’s important to remember that these metrics are indicative and do not guarantee performance on unseen data." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.custom-models-api.evaluating-a-custom-model-assessing-good-performance", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/custom-models-api/evaluating-a-custom-model", + "page_title": "Evaluating your custom model", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/docs/custom-models-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#assessing-good-performance", + "content": "Task-specific variances and performance metrics: with expression analysis, the complexity of your task determines the range of model performance, which in the case of classification models can technically vary from zero to perfect accuracy. Depending on the complexity of your task, less than perfect performance may still be very useful to serve as an indication of likelihood for your given target.\n\nInfluence of number of classes: prediction gets more difficult as the number of classes in your dataset increases, particularly when distinction between classes is more subtle. Inherently the level of chance will be higher with a lower number of classes. For example, for 3-classes your low-end performance is 33% accuracy vs 50% for a binary problem.\n\nApplication-specific requirements: when establishing acceptable accuracy for a model, it’s important to consider the sensitivity and impact of its application. An appropriate accuracy threshold varies with the specific demands and potential consequences of the model’s use, requiring a nuanced understanding of how accuracy levels intersect with the objectives and risks of each unique application.\n\n\n\n\nHow is it possible that my model achieved 100% accuracy?\nAchieving 100% accuracy is possible, however it is important to consider, especially in small datasets, that this might indicate model overfitting, caused by feature leakage or other data anomalies. Feature leakage occurs when your model inadvertently learns from data that explicitly includes label information (e.g., sentences of ‘I feel happy’ for a target label ‘happy’) leading to skewed results. To ensure more reliable performance, it’s advisable to use larger datasets and check that your data does not unintentionally contain explicit information about the labels.", + "hierarchy": { + "h3": { + "id": "assessing-good-performance", + "title": "Assessing 'good' performance" + } + }, + "level": "h3", + "level_title": "Assessing 'good' performance" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.custom-models-api.evaluating-a-custom-model-advanced-evaluation-metrics", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/custom-models-api/evaluating-a-custom-model", + "page_title": "Evaluating your custom model", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/docs/custom-models-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#advanced-evaluation-metrics", + "content": "In addition to accuracy, advanced metrics for a deeper evaluation of your custom model’s performance are also provided.\n\n\nAdvanced evaluation metrics\nTerm Definition \nAccuracy A fundamental metric in model performance evaluation which measures the proportion of correct predictions (true positives and true negatives) against the total number made. It’s straightforward and particularly useful for balanced datasets. However, accuracy can be misleading in imbalanced datasets where one class predominates, as a model might seem accurate by mainly predicting the majority class, neglecting the minority. This limitation underscores the importance of using additional metrics like precision, recall, and F1 score for a more nuanced assessment of model performance across different classes. \nPrecision Score which measures how often the model detects positives correctly. (e.g., When your model identifies a customer’s expression as 'satisfied', how often is the customer actually satisfied? Low precision would mean the model often misinterprets other expressions as satisfaction, leading to incorrect categorization.) \nRecall Score which measures how often the model correctly identifies actual positives. (e.g., Of all the genuine expressions of satisfaction, how many does your model accurately identify as 'satisfied'?\" Low recall implies the model is missing out on correctly identifying many true instances of customer satisfaction, failing to recognize them accurately.) \nF1 A metric that combines precision and recall, providing a balanced measure of a model’s accuracy, particularly useful in scenarios with class imbalance or when specific decision thresholds are vital. \nAverage Precision A metric that calculates the weighted average of precision at each threshold, providing a comprehensive measure of a model’s performance across different levels of recall. \nRoc Auc (Area under the ROC curve) a comprehensive measure of a model’s ability to distinguish between classes across all possible thresholds, making it ideal for overall performance evaluation and comparative analysis of different models.", + "hierarchy": { + "h3": { + "id": "advanced-evaluation-metrics", + "title": "Advanced evaluation metrics" + } + }, + "level": "h3", + "level_title": "Advanced evaluation metrics" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.custom-models-api.evaluating-a-custom-model-improving-model-performance", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/custom-models-api/evaluating-a-custom-model", + "page_title": "Evaluating your custom model", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/docs/custom-models-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#improving-model-performance", + "content": "Increase data quantity: adding more data will often help a model to learn a broader range of the given target’s representation, increasing the likelihood of capturing outliers from diverse patterns and scenarios.\n\nImprove label quality: ensure that each data point in your dataset is well-labeled with clear, accurate, and consistent annotations. Properly defined labels are essential for reducing misinterpretations and confusion, allowing the model to accurately represent and learn from the dataset’s true characteristics. Ensuring balance in the distribution of labels is important to ensure that the model is not biased towards a specific label.\n\nEnhance data quality: refine your dataset to ensure it is free from noise and irrelevant information. High-quality data (in terms of your target) enhances the model’s ability to make precise predictions and learn effectively from relevant features, critical in complex datasets.\n\nIncorporate clear audio data: when working with models analyzing vocal expressions, ensure audio files include clear, audible spoken language. This enhances the model’s ability to accurately interpret and learn from vocal nuances. Explore various segmentation strategies which evaluate the effect that environmental sound may have on your model’s performance.", + "hierarchy": { + "h3": { + "id": "improving-model-performance", + "title": "Improving model performance" + } + }, + "level": "h3", + "level_title": "Improving model performance" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.custom-models-api.faq", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/custom-models-api/faq", + "page_title": "Custom Models API FAQ", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/docs/custom-models-api" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "content": "Custom Models become essential when raw embeddings from Hume’s expression measurement models require further tailoring for specific applications. Here are scenarios where Custom Models offer significant advantages:\nSpecialized contexts: In environments with unique characteristics or requirements, Custom Models enable the creation of context-specific labels, ensuring more relevant and accurate insights. If your project demands a particular set of labels that are not covered by Hume’s emotional expression labels, Custom Models enable you to create and apply these labels, ensuring that the analysis aligns with your specific objectives.\n\nIterative model improvement: In evolving fields or scenarios where data and requirements change over time, Custom Models offer the flexibility to iteratively improve and adapt the model with new data and labels.\n\n\n\n\nIn labeling, regression involves assigning continuous numerical values, while classification involves categorizing data into discrete labels. During training, regression models learn to predict numerical values, whereas classification models learn to categorize data points into predefined classes.\nClassification use cases\nEmotion Categorization: Classification excels in distinguishing distinct emotional states, like identifying happiness, sadness, or surprise based on linguistic or physical expression cues.\n\nBinary Emotional Analysis: Useful in binary scenarios such as detecting presence or absence of specific emotional reactions, like engagement or disengagement in a learning environment.\n\nMulti-Emotional Identification: Perfect for classifying a range of emotions in complex scenarios, like understanding varied customer reactions from satisfied to dissatisfied based on their verbal and non-verbal feedback.\n\n\nRegression use cases\nIntensity Measurement: Regression is apt for quantifying the intensity or degree of emotional responses, such as assessing the level of stress or joy from vocal or facial cues.\n\nEmotional Progression Tracking: Ideal for monitoring the fluctuation of emotional states over time, like tracking the development of engagement or anxiety in therapy sessions.\n\n\nIn essence, regression models in emotional expression analysis assign continuous values representing intensities or degrees, while classification models categorize expressions into distinct states or reactions.\n\n\nOur custom model pipeline is designed to accommodate a wide range of data types, including audio, videos, and text, automatically integrating multimodal patterns of expression and language. However, not all datasets are created equal. For best results, we recommend using a dataset that meets certain standards:\nDataset size\nIdeally, use a dataset consisting of a minimum of 20 files, but more data is always better for model performance.\nMedia type consistency\nAll files within a dataset should be of the same media type (video, audio, image, text...etc.)\nIt's generally wise to maintain a consistent naming convention and file format for your dataset. At minimum, ensure files have appropriate extensions, such as .wav, .mp3, .aif, .mov, or .mp4.\nClassification vs regression tasks\nDepending on your model's objective (classification or regression), you can use different labeling approaches.\nClassification labels: use either strings or integers as labels (e.g., \"confused,\" \"focused\"). We limit the number of categorical labels to 50, and you must have at least two (binary).\n\nRegression targets: use either integers or decimals as targets. A model trained on a regression task with predict a continuous numerical value.\n\n\nLabel consistency\nWe recommend that your labels follow a consistent format; e.g, do not mix integers and strings. Furthermore, be sure to check for any typos in your labels, as these will be considered as separate classes, e.g, “happy” vs. “hapy.”\nClass imbalance\nIf possible, it helps to have a balanced distribution of labels in your dataset. For example, if you have 50 files and two classes, the best case is to have 25 samples per class. Generally, you need at least 10 samples per class to train a useful model, but more data per class is always better.\n\n\nOur custom models support the same range of languages as our expression\nmeasurement models. You can find a complete list of supported languages\nhere." + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.billing", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/billing", + "page_title": "Billing", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.billing-postpaid-billing", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/billing", + "page_title": "Billing", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#postpaid-billing", + "content": "We're in the process of transitioning to postpaid billing, a flexible pay-as-you-go system. You pay only for the services used, without needing to purchase credits upfront. This option is not available for all Hume users quite yet. You can check Usage & Billing to see if postpaid billing is available for your account and reach out to our support team if you're interested in getting early access.", + "hierarchy": { + "h2": { + "id": "postpaid-billing", + "title": "Postpaid billing" + } + }, + "level": "h2", + "level_title": "Postpaid billing" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.billing-how-it-works", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/billing", + "page_title": "Billing", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#how-it-works", + "content": "Joining the platform: When you sign up and start using our API, you'll initially be using the free credits given to every new account.\n\nCredit card requirement: Once you've exhausted your credit balance, you'll need to add your credit card information to continue.\n\n\n\n\nSubcribe to postpaid billing before depleting your credit balance to ensure uninterrupted service.\nMonthly limit and notifications:\nYou'll have a default monthly limit of $100.\n\nIf you hit the $100 limit, API calls will return an error, and you'll be prompted to apply for a limit increase.\n\n\n\nBilling notifications:\nOn the first of each month, you'll receive an invoice for the previous month’s usage.\n\nIf your credit card is successfully added, it will be charged automatically.\n\nYou'll get a confirmation email for successful transactions or an alert if a transaction fails.\n\n\n\nFailure to pay: If payment isn't received within 7 days of the invoice date, API access will be suspended until the outstanding balance is settled.", + "hierarchy": { + "h2": { + "id": "how-it-works", + "title": "How it works" + } + }, + "level": "h2", + "level_title": "How it works" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.billing-managing-your-account", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/billing", + "page_title": "Billing", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#managing-your-account", + "content": "Usage information: To view your monthly usage details, visit Usage & Billing. There you can track your API usage and see how much of your monthly limit has been utilized.\nNote: After your prepaid credits are used, further usage accrues to your monthly cost. You'll be charged this amount on the first of the following month. Your monthly cost is updated daily at 08:00 UTC.\n\n\n\nBilling portal: To manage your billing details, navigate to Usage & Billing and select Manage payments and view invoices. There you can update your payment method, view past invoices, and keep track of upcoming charges.", + "hierarchy": { + "h2": { + "id": "managing-your-account", + "title": "Managing your account" + } + }, + "level": "h2", + "level_title": "Managing your account" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.billing-pricing", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/billing", + "page_title": "Billing", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#pricing", + "content": "Find up-to-date pricing information at hume.ai/pricing.", + "hierarchy": { + "h2": { + "id": "pricing", + "title": "Pricing" + }, + "h3": { + "id": "pricing", + "title": "Pricing" + } + }, + "level": "h3", + "level_title": "Pricing" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.billing-billing-methodology", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/billing", + "page_title": "Billing", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#billing-methodology", + "content": "Audio and video:\nOur listed prices are presented per minute for ease of understanding.\n\nHowever, we bill these services on a corresponding per-second basis to ensure precise and fair charges. This means you are only billed for the exact amount of time your audio or video content is processed.\n\n\n\nImage and text:\nImage processing charges are incurred per image.\n\nText processing is billed based on the number of words processed.", + "hierarchy": { + "h2": { + "id": "billing-methodology", + "title": "Billing methodology" + }, + "h3": { + "id": "billing-methodology", + "title": "Billing methodology" + } + }, + "level": "h3", + "level_title": "Billing methodology" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.billing-faqs", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/billing", + "page_title": "Billing", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#faqs", + "content": "After you use all your credits, there might be a delay before we switch you to a subscription or stop access, which can result in a small negative credit balance. This is normal and won't affect your subscription.\nIf you have questions about your bill or need assistance understanding the charges, please contact billing@hume.ai.", + "hierarchy": { + "h2": { + "id": "faqs", + "title": "FAQs" + } + }, + "level": "h2", + "level_title": "FAQs" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.errors", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/errors", + "page_title": "Errors", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.errors-configuration-errors", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/errors", + "page_title": "Errors", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#configuration-errors", + "content": "Configuration errors indicate that something about the API call was not configured correctly. The error message you get from the Hume APIs will often contain more information than we're able to provide on this page. For example if an audio file is too long, the error message from the API will specify the limit as well as the length of the audio received.\nError Code Description \nE0100 The WebSocket request could not be parsed as valid JSON. The Hume API requires JSON serializable payloads. \nE0101 You may be missing or improperly formatting a required field. This generic error indicates that the structure of your WebSocket request was invalid. Please see the error message you received in the API response for more details. \nE0102 The requested model was incompatible with the file format received. Some models are not compatible with every file type. For example, no facial expressions will be detected in a text file. Audio can be extracted out of some video files, but if the video has no audio, then models like Speech Prosody and Vocal Burst will not be available. \nE0200 Media provided could not be parsed into a known file format. Hume APIs support a wide range of file formats and media types including audio, video, image, text, but not all formats are supported. If you receive this error and believe your file type should be supported please reach out to our support team. \nE0201 Media could not be decoded as a Base64 encoded string. The data field in the request payload should be Base64 encoded bytes. If you want to pass raw text without encoding it you can do so with the raw_text parameter. \nE0202 No audio signal could be inferred from the media provided. This error indicates that audio models were configured, but the media provided could not be parsed into a valid audio file. \nE0203 Your audio file was too long. The limit is 5000 milliseconds. The WebSocket endpoints are intended for near real-time processing of data streams. For larger files considering using the Hume Measurement API REST endpoints. \nE0204 Your video file was too long. For best performance we recommend passing individual frames of video as images rather than full video files. \nE0205 Your image file was too large. The limit is 3,000 x 3,000 pixels. The WebSocket endpoints are intended for near real-time processing of data streams. For larger files considering using the Hume Measurement API REST endpoints. \nE0206 Your text file was too long. The limit is 10,000 characters. The WebSocket endpoints are intended for near real-time processing of data streams. For larger files considering using the Hume Measurement API REST endpoints. \nE0207 The URL you've provided appears to be incorrect. Please verify that you've entered the correct URL and try submitting it again. If you're copying and pasting, ensure that the entire URL has been copied without any missing characters. \nE0300 You've run out of credits. Go to beta.hume.ai to purchase more. \nE0301 Your monthly credit limit has been reached. With post-paid pricing, users can accrue charges up to a predetermined monthly cap. This limit ensures that users do not accumulate excessive debt without assurance of payment. If you require a higher limit, you may manually apply for a credit limit increase. Alternatively, the limit will reset at the beginning of the next month. For more information, please see our docs on billing. \nE0400 You've referenced a resource that doesn't exist in our system. Please check if the name or identifier you used is correct and try again. \nE0401 Your upload failed. Please ensure your file meets our format and size requirements, and attempt to upload it again. \nE0402 The CSV file you used to create or update a dataset is missing a header row. The header specifies what each column represents. Update your CSV file and retry your request. For more information about how to format your dataset CSV please see our tutorial on dataset creation. \nE0500 Your dataset doesn't meet the minimum sample size requirement. Please add more files to your dataset and resubmit your training job. For more information, please see our docs on dataset requirements. \nE0501 Your dataset contains a target column with empty values. Please clean your dataset so that all labels are valid categorical or numeric values and then resubmit your training job. For more information on target columns please see our docs on dataset requirements. \nE0502 Your dataset contains a target column with infinite values. Please clean your dataset so that all labels are valid categorical or numeric values and then resubmit your training job. For more information on target columns please see our tutorial on dataset creation. \nE0503 For classification tasks, your dataset must include at least two distinct classes. Please check your dataset has two unique labels in the target column. \nE0504 Some classes in your dataset don't have enough samples. To ensure that the model we produce is of the highest quality we require your dataset to be relatively balanced across classes. Please check the error message for which class should have more samples (or remove that class entirely). Please see our docs on dataset requirements for more details. \nE0505 The target column you've selected doesn't exist in the dataset. Please review the columns that exist in your dataset and select a valid column name. \nE0506 Your chosen target column is not a valid target column. Please ensure that you select a column with labels rather than the file_id column or another reserved column name. \n\n\n\nThe connection will be closed automatically after ten identical configuration\nerrors to avoid unintended looping.", + "hierarchy": { + "h2": { + "id": "configuration-errors", + "title": "Configuration errors" + } + }, + "level": "h2", + "level_title": "Configuration errors" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.errors-service-errors", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/errors", + "page_title": "Errors", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#service-errors", + "content": "If you encounter an error code starting with I (for example, error code I0100), it indicates an outage or a bug in a Hume service. Our team will already have been alerted of the internal error, but if you need immediate assistance please reach out to our support team.", + "hierarchy": { + "h2": { + "id": "service-errors", + "title": "Service errors" + } + }, + "level": "h2", + "level_title": "Service errors" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.errors-warnings", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/errors", + "page_title": "Errors", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#warnings", + "content": "Warnings indicate that the payload was configured correctly, but no results could be returned.\nError Code Description \nW0101 No vocal bursts could be detected in the media. \nW0102 No face meshes could be detected in the media. \nW0103 No faces could be detected in the media. \nW0104 No emotional language could be detected in the media. \nW0105 No speech could be detected in the media.", + "hierarchy": { + "h2": { + "id": "warnings", + "title": "Warnings" + } + }, + "level": "h2", + "level_title": "Warnings" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.errors-common-errors", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/errors", + "page_title": "Errors", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#common-errors", + "content": "Some errors will not have an associated error code, but are documented here.", + "hierarchy": { + "h2": { + "id": "common-errors", + "title": "Common errors" + } + }, + "level": "h2", + "level_title": "Common errors" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.errors-transcript-confidence-below-threshold-value", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/errors", + "page_title": "Errors", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#transcript-confidence-below-threshold-value", + "content": "This error indicates that our transcription service had difficulty identifying the language spoken in your audio file or the quality was too low. We prioritize quality and accuracy, so if it cannot transcribe with confidence, our models won't be able to process it further.\nBy default, we use an automated language detection method for our Speech Prosody, Language, and NER models. However, if you know what language is being spoken in your media samples, you can specify it via its BCP-47 tag and potentially obtain more accurate results.\nIf you see the message above there are few steps you can do to resolve the issue:\nVerify we support the language\n\nEnsure you are providing clear, high-quality audio files.\n\nSpecify the language within your request if you know the language in the audio.\n\n\n\n\n\n\n\n\n\n\nYou can specify any of the following: zh, da, nl, en, en-AU, en-IN, en-NZ,\nen-GB, fr, fr-CA, de, hi, hi-Latn, id, it, ja, ko, no, pl, pt, pt-BR, pt-PT,\nru, es, es-419, sv, ta, tr, or uk", + "code_snippets": [ + { + "lang": "python", + "code": "from hume import HumeBatchClient\nfrom hume.models.config import ProsodyConfig\n\nclient = HumeBatchClient(\"\")\nurls = [\"https://hume-tutorials.s3.amazonaws.com/faces.zip\"]\nmodel_configs = [ProsodyConfig()]\ntranscription_config = TranscriptionConfig(language=\"en\")\njob = client.submit_job(urls, model_configs, transcription_config=transcription_config)\n\nprint(job)\nprint(\"Running...\")\n\njob.await_complete()\npredictions = job.get_predictions()\nprint(prediction)" + }, + { + "lang": "python", + "code": "from hume import HumeBatchClient\nfrom hume.models.config import ProsodyConfig\n\nclient = HumeBatchClient(\"\")\nurls = [\"https://hume-tutorials.s3.amazonaws.com/faces.zip\"]\nmodel_configs = [ProsodyConfig()]\ntranscription_config = TranscriptionConfig(language=\"en\")\njob = client.submit_job(urls, model_configs, transcription_config=transcription_config)\n\nprint(job)\nprint(\"Running...\")\n\njob.await_complete()\npredictions = job.get_predictions()\nprint(prediction)" + }, + { + "lang": "json", + "code": "\"transcription\": {\n \"language\": \"en\"\n}" + }, + { + "lang": "json", + "code": "\"transcription\": {\n \"language\": \"en\"\n}" + } + ], + "hierarchy": { + "h2": { + "id": "transcript-confidence-below-threshold-value", + "title": "Transcript confidence below threshold value" + }, + "h3": { + "id": "transcript-confidence-below-threshold-value", + "title": "Transcript confidence below threshold value" + } + }, + "level": "h3", + "level_title": "Transcript confidence below threshold value" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.science", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/science", + "page_title": "About the Science", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "content": "What is it about speaking in person that allows us to understand each other so much more accurately than text alone? It isn’t what we say—it’s the way we say it. Science consistently demonstrates that expressions convey important information that is vital for social interaction and forms the building blocks of empathy.\nThat being said, expressions aren’t direct windows into the human mind. Measuring and interpreting expressive behavior is a complex and nuanced task that is the subject of ongoing scientific research.\nThe scientists at Hume AI have run some of the largest-ever psychology studies to better understand how humans express themselves. By investigating expressions around the world and what they mean to the people making them, we’ve mapped out the nuances of expression in the voice, language, and face in unprecedented detail. We’ve published this research in the world’s leading scientific journals and, for the first time, translated it into cutting-edge machine learning models.\nThese models, shaped by a new understanding of human expression, include:\nFacial Expression\n\nSpeech Prosody\n\nVocal Bursts\n\nEmotional Language" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.science-facial-expression", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/science", + "page_title": "About the science", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#facial-expression", + "content": "Facial expression is the most well-studied modality of expressive behavior, but the overwhelming focus has been on six discrete categories of facial movement or time-consuming manual annotations of facial movements (the scientifically useful, but outdated, Facial Action Coding System). Our research shows that these approaches capture less than 30% of what typical facial expressions convey.\nHume’s Facial Emotional Expression model generates 48 outputs encompassing the dimensions of emotional meaning people reliably attribute to facial expressions. As with every model, the labels for each dimension are proxies for how people tend to label the underlying patterns of behavior. They should not be treated as direct inferences of emotional experience.\nHume’s FACS 2.0 model is a new generation automated facial action coding system (FACS). With 55 outputs encompassing 26 traditional actions units (AUs) and 29 other descriptive features (e.g., smile, scowl), FACS 2.0 is even more comprehensive than manual FACS annotations.\nOur facial expression models are packaged with face detection and work on both images and videos.\nIn addition to our image-based facial expression models, we also offer an Anonymized Facemesh model for applications in which it is essential to keep personally identifiable data on-device (e.g., for compliance with local laws). Instead of face images, our facemesh model processes facial landmarks detected using Google's MediaPipe library. It achieves about 80% accuracy relative to our image-based model.\nTo read more about the team’s research on facial expressions, check out our publications in American Psychologist (2018), Nature (2021), and iScience (2024).", + "hierarchy": { + "h2": { + "id": "facial-expression", + "title": "Facial Expression" + }, + "h3": { + "id": "facial-expression", + "title": "Facial Expression" + } + }, + "level": "h3", + "level_title": "Facial Expression" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.science-speech-prosody", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/science", + "page_title": "About the science", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#speech-prosody", + "content": "Speech prosody is not about the words you say, but the way you say them. It is distinct from language (words) and from non-linguistic vocal utterances.\nOur Speech Prosody model generates 48 outputs encompassing the 48 dimensions of emotional meaning that people reliably distinguish from variations in speech prosody. As with every model, the labels for each dimension are proxies for how people tend to label the underlying patterns of behavior. They should not be treated as direct inferences of emotional experience.\nOur Speech Prosody model is packaged with speech detection and works on both audio files and videos.\nTo read more about the team’s research on speech prosody, check out our publications in Nature Human Behaviour (2019) and Proceedings of the 31st ACM International Conference on Multimedia (2023).", + "hierarchy": { + "h2": { + "id": "speech-prosody", + "title": "Speech Prosody" + }, + "h3": { + "id": "speech-prosody", + "title": "Speech Prosody" + } + }, + "level": "h3", + "level_title": "Speech Prosody" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.science-vocal-bursts", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/science", + "page_title": "About the science", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#vocal-bursts", + "content": "Non-linguistic vocal utterances, including sighs, laughs, oohs, ahhs, umms, and shrieks (to name but a few), are a particularly powerful and understudied modality of expressive behavior. Recent studies reveal that they reliably convey distinct emotional meanings that are extremely well-preserved across most cultures.\nNon-linguistic vocal utterances have different acoustic characteristics than speech emotional intonation (prosody) and need to be modeled separately.\nOur Vocal Burst Expression model generates 48 outputs encompassing the distinct dimensions of emotional meaning that people distinguish in vocal bursts. As with every model, the labels for each dimension are proxies for how people tend to label the underlying patterns of behavior. They should not be treated as direct inferences of emotional experience.\nOur Vocal Burst Description model provides a more descriptive and categorical view of nonverbal vocal expressions (“gasp,” “mhm,” etc.) intended for use cases such as audio captioning. It generates 67 descriptors, including 30 call types (“sigh,” “laugh,” “shriek,” etc.) and 37 common onomatopoeia transliterations of vocal bursts (“hmm,” “ha,” “mhm,” etc.).\nOur vocal burst models are packaged with non-linguistic vocal utterance detection and works on both audio files and videos.\nTo read more about the team’s research on vocal bursts, check out our publications in American Psychologist (2019), Interspeech 2022, ICASSP 2023, and Nature Human Behaviour (2023).", + "hierarchy": { + "h2": { + "id": "vocal-bursts", + "title": "Vocal Bursts" + }, + "h3": { + "id": "vocal-bursts", + "title": "Vocal Bursts" + } + }, + "level": "h3", + "level_title": "Vocal Bursts" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.science-emotional-language", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/science", + "page_title": "About the science", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#emotional-language", + "content": "The words we say include explicit disclosures of emotion and implicit emotional connotations. These meanings are complex and high-dimensional.\nFrom written or spoken words, our Emotional Language model generates 53 outputs encompassing different dimensions of emotion that people often perceive from language. As with every model, the labels for each dimension are proxies for how people tend to label the underlying patterns of behavior. They should not be treated as direct inferences of emotional experience.\nOur Emotional Language model is packaged with speech transcription and works on audio files, videos, and text.\nOur Named Entity Recognition (NER) model can also identify topics or entities (people, places, organizations, etc.) mentioned in speech or text and the tone of language they are associated with, as identified by our emotional language model.", + "hierarchy": { + "h2": { + "id": "emotional-language", + "title": "Emotional Language" + }, + "h3": { + "id": "emotional-language", + "title": "Emotional Language" + } + }, + "level": "h3", + "level_title": "Emotional Language" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.science-published-research", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/science", + "page_title": "About the science", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#published-research", + "content": "You can access a comprehensive list of our published research papers along with PDFs for download here.", + "hierarchy": { + "h2": { + "id": "published-research", + "title": "Published Research" + } + }, + "level": "h2", + "level_title": "Published Research" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.use-case-guidelines", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/use-case-guidelines", + "page_title": "Use case guidelines", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.use-case-guidelines-ethical-guidelines", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/use-case-guidelines", + "page_title": "Use case guidelines", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#ethical-guidelines", + "content": "Understanding expressive communication is essential to building technologies that address our needs and improve our well-being. But technologies that recognize language and nonverbal behavior can also pose risks. That’s why we require that all commercial applications incorporating our APIs adhere to the ethical guidelines of The Hume Initiative.", + "hierarchy": { + "h2": { + "id": "ethical-guidelines", + "title": "Ethical guidelines" + } + }, + "level": "h2", + "level_title": "Ethical guidelines" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.use-case-guidelines-scientific-best-practices", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/use-case-guidelines", + "page_title": "Use case guidelines", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#scientific-best-practices", + "content": "Use inductive methods to identify the expressive signals that matter for your application. Even if you are interested in a specific emotion like “anger,” how that emotion is expressed depends on setting: anger on a football field sounds different than anger on a customer service call. Our models succinctly compress the representation of emotional expression so that, even with limited data, you can examine how their outputs can be used in your specific research or application setting. You can do this by using statistical methods like regression or classification, or by examining the distribution of expressions in your data using our Playground.\n\nNever assume a one-to-one mapping between emotional experience and expression. The outputs of our models should be treated as measurements of complex expressive behavior. We provide labels to our outputs indicating what these dimensions of expression are often reported to mean, but these labels should not be interpreted as direct inferences of how someone is feeling at any given time. Rather, “a full understanding of emotional expression and experience requires an appreciation of a wide degree of variability in display behavior, subjective experience, patterns of appraisal, and physiological response, both within and across emotion categories” (Cowen et al., 2019).\n\nNever overlook the nuances in emotional expression. For instance, avoid the temptation to focus on just the top label. We provide interactive visualizations in our Playground to help you map out complex patterns in real-life emotional behavior. These visualizations are informed by recent advances in emotion science, departing from reductive models that long “anchored the science of emotion to a predominant focus on prototypical facial expressions of the “basic six”: anger, disgust, fear, sadness, surprise, and happiness,” and embracing how “new discoveries reveal that the two most commonly studied models of emotion—the basic six and the affective circumplex (comprising valence and arousal)—each capture at most 30% of the variance in the emotional experiences people reliably report and in the distinct expressions people reliably recognize.” (Cowen et al., 2019)\n\nAccount for culture-specific meanings and display tendencies. Studies have routinely observed subtle cultural differences in the meaning of expressions as well as broader “variations in the frequency and intensity with which different expressions were displayed” (Cowen et al., 2022). Given these differences, empathic AI applications should be tested in each population in which they are deployed and fine-tuned when necessary.\nRead about the science behind our models if you’d like to delve deeper into how they work.", + "hierarchy": { + "h2": { + "id": "scientific-best-practices", + "title": "Scientific best practices" + } + }, + "level": "h2", + "level_title": "Scientific best practices" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.privacy", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/privacy", + "page_title": "Privacy", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.privacy-privacy-policy", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/privacy", + "page_title": "Privacy", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#privacy-policy", + "content": "Our Privacy Policy governs how we collect and use personal information submitted to our products.", + "hierarchy": { + "h2": { + "id": "privacy-policy", + "title": "Privacy Policy" + } + }, + "level": "h2", + "level_title": "Privacy Policy" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.privacy-api-data-usage-policy", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/privacy", + "page_title": "Privacy", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#api-data-usage-policy", + "content": "Our API Data Usage Policy details how and when we store API data.", + "hierarchy": { + "h2": { + "id": "api-data-usage-policy", + "title": "API Data Usage Policy" + } + }, + "level": "h2", + "level_title": "API Data Usage Policy" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.docs.docs.resources.privacy-consumer-services-faq", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/docs/resources/privacy", + "page_title": "Privacy", + "breadcrumb": [ + { + "title": "Resources", + "pathname": "/docs/resources" + } + ], + "tab": { + "title": "Documentation" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#consumer-services-faq", + "content": "Our Consumer Services FAQ explains how and when we store data processed by our frontend applications like our Playground.\n\n\n\n\nFor non-API consumer products like our Playground and Demo, we may use content such as images, video files, audio files, and text files to improve our services. You can request to opt out of having your content used to improve our services at any time by contacting us on Discord with your request. This opt-out will apply on a going-forward basis only.\nPlease note that for our API product, Hume AI will not use data submitted by customers via our API to train or improve our models.\n\n\nYou can delete your account by submitting a user account deletion request in your Profile page on the Hume playground. Once you submit your deletion request, we will delete your account within 30 days. Please note that for security reasons, once you delete your account, you may not re-sign up for an account with the same email address.\n\n\nWe share content with a select group of trusted service providers that help us provide our services. We share the minimum amount of content we need in order to accomplish this purpose and our service providers are subject to strict confidentiality and security obligations. Please see our Privacy Policy for more information on who we may share your content with.\n\n\nContent is stored on Hume AI systems and our trusted service providers' systems in the US and around the world.\n\n\nA limited number of authorized Hume AI personnel, may view and access user content only as needed for these reasons: (1) investigating abuse or a security incident; (2) to provide support to you if you reach out to us with questions about your account; (3) or to comply with legal obligations. Access to content is subject to technical access controls and limited only to authorized personnel on a need-to-know basis. Additionally, we monitor and log all access to user content and authorized personnel must undergo security and privacy training prior to accessing any user content.\n\n\nNo. We do not sell your data or share your content with third parties for marketing purposes.\n\n\nPlease message the moderators on our Discord Server.", + "hierarchy": { + "h2": { + "id": "consumer-services-faq", + "title": "Consumer Services FAQ" + } + }, + "level": "h2", + "level_title": "Consumer Services FAQ" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog-evi-api-additions", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#evi-api-additions", + "content": "Added support for streaming custom language model responses in parts. Developers can send text chunks to start generating audio responses much faster.\nThe Custom Language Model endpoint now expects text to be formatted in the following payload:\n\nAdded support for pausing and resuming EVI responses with with pause_assistant_message and resume_assistant_message. Sending a pause message stops EVI from generating and speaking Assistant messages. Sending a resume message allows EVI to continue responding to the User messages.", + "code_snippets": [ + { + "code": "# send this to add text\n{\"type\": \"assistant_input\", \"text\": \"\"}\n\n# send this message when you're done speaking\n{\"type\": \"assistant_end\"}" + } + ], + "hierarchy": { + "h2": { + "id": "evi-api-additions", + "title": "EVI API additions" + }, + "h3": { + "id": "evi-api-additions", + "title": "EVI API additions" + } + }, + "level": "h3", + "level_title": "EVI API additions" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog-evi-api-modifications", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#evi-api-modifications", + "content": "Increased the limit for tool descriptions from 100 chars to 512 chars\n\nSet the maximum length for tool_name to 64 chars", + "hierarchy": { + "h2": { + "id": "evi-api-modifications", + "title": "EVI API modifications" + }, + "h3": { + "id": "evi-api-modifications", + "title": "EVI API modifications" + } + }, + "level": "h3", + "level_title": "EVI API modifications" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog-evi-api-additions-1", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#evi-api-additions-1", + "content": "Added support for built-in tools, starting with web search: Using built-in tools\n\nAdded support for phone calling through a Twilio integration: Phone calling\n\nAdded DACHER voice to the voice configuration options\n\nAdded support for the gpt-4o language model", + "hierarchy": { + "h2": { + "id": "evi-api-additions-1", + "title": "EVI API additions" + }, + "h3": { + "id": "evi-api-additions-1", + "title": "EVI API additions" + } + }, + "level": "h3", + "level_title": "EVI API additions" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog-evi-api-modifications-1", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#evi-api-modifications-1", + "content": "Increased the limit for tool descriptions from 100 chars to 512 chars", + "hierarchy": { + "h2": { + "id": "evi-api-modifications-1", + "title": "EVI API modifications" + }, + "h3": { + "id": "evi-api-modifications-1", + "title": "EVI API modifications" + } + }, + "level": "h3", + "level_title": "EVI API modifications" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog-evi-api-additions-2", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#evi-api-additions-2", + "content": "Added support for three open-source models through the Groq language model provider: Gemma 7B (gemma-7b-it), Llama 3 8B (llama3-8b-8192), and Llama 3 70B (llama3-70b-8192)\n\nAdded support for Llama 30 70B language model through the Fireworks language model provider (accounts/fireworks/models/llama-v3-70b-instruct)\n\nAdded a custom_session_id field in the SessionSettings message, and documentation for using it: Custom Session ID", + "hierarchy": { + "h2": { + "id": "evi-api-additions-2", + "title": "EVI API additions" + }, + "h3": { + "id": "evi-api-additions-2", + "title": "EVI API additions" + } + }, + "level": "h3", + "level_title": "EVI API additions" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog-evi-api-modifications-2", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#evi-api-modifications-2", + "content": "Disabled short response generation for custom language models\n\nAdded error codes for when Hume credits run out while using EVI. Users will receive either the E0300 error code if they are out of credits or E0301 if they are blocked via subscription. The WebSocket connection will also be closed with code 1008", + "hierarchy": { + "h2": { + "id": "evi-api-modifications-2", + "title": "EVI API modifications" + }, + "h3": { + "id": "evi-api-modifications-2", + "title": "EVI API modifications" + } + }, + "level": "h3", + "level_title": "EVI API modifications" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog-bugs-bashed", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#bugs-bashed", + "content": "Fixed an issue with the from_text field in UserMessage. It is now set to True if any part of the UserMessage is from a developer-provided UserInput message", + "hierarchy": { + "h2": { + "id": "bugs-bashed", + "title": "Bugs bashed" + }, + "h3": { + "id": "bugs-bashed", + "title": "Bugs bashed" + } + }, + "level": "h3", + "level_title": "Bugs bashed" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog-evi-api-additions-3", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#evi-api-additions-3", + "content": "Added support for Tools through our tool use feature\n\nAdded ToolErrorMessage as a supported input type", + "hierarchy": { + "h2": { + "id": "evi-api-additions-3", + "title": "EVI API additions" + }, + "h3": { + "id": "evi-api-additions-3", + "title": "EVI API additions" + } + }, + "level": "h3", + "level_title": "EVI API additions" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog-bugs-bashed-1", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#bugs-bashed-1", + "content": "Added an error that returns status 400 if a Config, Tool, or Prompt is created with a name or versionDescription that's too long or non-ASCII. Names must be under 75 chars, versionDescription must be under 256 chars, description for Tools must be under 100 chars, fallback_content for Tools must be under 2048 chars, and model_resource for LanguageModels must be under 1024 chars\n\nFixed several edge cases and bugs involving Tool calls, including supporting only single tool calls with EVI (no parallel tool calling)", + "hierarchy": { + "h2": { + "id": "bugs-bashed-1", + "title": "Bugs bashed" + }, + "h3": { + "id": "bugs-bashed-1", + "title": "Bugs bashed" + } + }, + "level": "h3", + "level_title": "Bugs bashed" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog-evi-api-additions-4", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#evi-api-additions-4", + "content": "Added support for reading language model type from EVI configs\n\nAdded support for reading language model temperature from EVI configs\n\nAdded system prompt to SessionSettings message to allow dynamic prompt updating", + "hierarchy": { + "h2": { + "id": "evi-api-additions-4", + "title": "EVI API additions" + }, + "h3": { + "id": "evi-api-additions-4", + "title": "EVI API additions" + } + }, + "level": "h3", + "level_title": "EVI API additions" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog-evi-api-changes", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#evi-api-changes", + "content": "Renamed TextInput message to UserInput to indicate this is text to be added to the chat history as a User message and used as context by the LLM\n\nRenamed TtsInput message to AssistantInput to make it clear that this is input text to be spoken by EVI and added to the chat history as an Assistant message\n\nMoved audio configuration options to SessionSettings message", + "hierarchy": { + "h2": { + "id": "evi-api-changes", + "title": "EVI API changes" + }, + "h3": { + "id": "evi-api-changes", + "title": "EVI API changes" + } + }, + "level": "h3", + "level_title": "EVI API changes" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.changelog.changelog.changelog-bugs-bashed-2", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/changelog", + "page_title": "Changelog", + "breadcrumb": [], + "tab": { + "title": "Changelog" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "type": "markdown", + "hash": "#bugs-bashed-2", + "content": "Fixed chats staying open after errors, chats will now end upon exceptions\n\nAdded an error thrown if config uses both custom_model and prompt, because custom language models do not use prompts\n\nFixed issue where erroring when sending errors would cause the API to get stuck\n\nAdded clearer errors for custom language models\n\nAdded unable to configure audio service error\n\nAdded an error to invalidate outdated language model responses", + "hierarchy": { + "h2": { + "id": "bugs-bashed-2", + "title": "Bugs bashed" + }, + "h3": { + "id": "bugs-bashed-2", + "title": "Bugs bashed" + } + }, + "level": "h3", + "level_title": "Bugs bashed" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_tools.endpoint_tools.list-tools", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/tools/list-tools", + "page_title": "List tools", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Tools", + "pathname": "/reference/empathic-voice-interface-evi/tools" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_tools.list-tools", + "method": "GET", + "endpoint_path": "/v0/evi/tools", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_tools.endpoint_tools.create-tool", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/tools/create-tool", + "page_title": "Create tool", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Tools", + "pathname": "/reference/empathic-voice-interface-evi/tools" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_tools.create-tool", + "method": "POST", + "endpoint_path": "/v0/evi/tools", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_tools.endpoint_tools.list-tool-versions", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/tools/list-tool-versions", + "page_title": "List tool versions", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Tools", + "pathname": "/reference/empathic-voice-interface-evi/tools" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_tools.list-tool-versions", + "method": "GET", + "endpoint_path": "/v0/evi/tools/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_tools.endpoint_tools.create-tool-version", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/tools/create-tool-version", + "page_title": "Create tool version", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Tools", + "pathname": "/reference/empathic-voice-interface-evi/tools" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_tools.create-tool-version", + "method": "POST", + "endpoint_path": "/v0/evi/tools/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_tools.endpoint_tools.delete-tool", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/tools/delete-tool", + "page_title": "Delete tool", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Tools", + "pathname": "/reference/empathic-voice-interface-evi/tools" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_tools.delete-tool", + "method": "DELETE", + "endpoint_path": "/v0/evi/tools/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_tools.endpoint_tools.update-tool-name", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/tools/update-tool-name", + "page_title": "Update tool name", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Tools", + "pathname": "/reference/empathic-voice-interface-evi/tools" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_tools.update-tool-name", + "method": "PATCH", + "endpoint_path": "/v0/evi/tools/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_tools.endpoint_tools.get-tool-version", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/tools/get-tool-version", + "page_title": "Get tool version", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Tools", + "pathname": "/reference/empathic-voice-interface-evi/tools" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_tools.get-tool-version", + "method": "GET", + "endpoint_path": "/v0/evi/tools/:id/version/:version", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_tools.endpoint_tools.delete-tool-version", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/tools/delete-tool-version", + "page_title": "Delete tool version", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Tools", + "pathname": "/reference/empathic-voice-interface-evi/tools" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_tools.delete-tool-version", + "method": "DELETE", + "endpoint_path": "/v0/evi/tools/:id/version/:version", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_tools.endpoint_tools.update-tool-description", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/tools/update-tool-description", + "page_title": "Update tool description", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Tools", + "pathname": "/reference/empathic-voice-interface-evi/tools" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_tools.update-tool-description", + "method": "PATCH", + "endpoint_path": "/v0/evi/tools/:id/version/:version", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_prompts.endpoint_prompts.list-prompts", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/prompts/list-prompts", + "page_title": "List prompts", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Prompts", + "pathname": "/reference/empathic-voice-interface-evi/prompts" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_prompts.list-prompts", + "method": "GET", + "endpoint_path": "/v0/evi/prompts", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_prompts.endpoint_prompts.create-prompt", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/prompts/create-prompt", + "page_title": "Create prompt", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Prompts", + "pathname": "/reference/empathic-voice-interface-evi/prompts" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_prompts.create-prompt", + "method": "POST", + "endpoint_path": "/v0/evi/prompts", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_prompts.endpoint_prompts.list-prompt-versions", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/prompts/list-prompt-versions", + "page_title": "List prompt versions", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Prompts", + "pathname": "/reference/empathic-voice-interface-evi/prompts" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_prompts.list-prompt-versions", + "method": "GET", + "endpoint_path": "/v0/evi/prompts/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_prompts.endpoint_prompts.create-prompt-verison", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/prompts/create-prompt-verison", + "page_title": "Create prompt version", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Prompts", + "pathname": "/reference/empathic-voice-interface-evi/prompts" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_prompts.create-prompt-verison", + "method": "POST", + "endpoint_path": "/v0/evi/prompts/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_prompts.endpoint_prompts.delete-prompt", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/prompts/delete-prompt", + "page_title": "Delete prompt", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Prompts", + "pathname": "/reference/empathic-voice-interface-evi/prompts" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_prompts.delete-prompt", + "method": "DELETE", + "endpoint_path": "/v0/evi/prompts/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_prompts.endpoint_prompts.update-prompt-name", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/prompts/update-prompt-name", + "page_title": "Update prompt name", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Prompts", + "pathname": "/reference/empathic-voice-interface-evi/prompts" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_prompts.update-prompt-name", + "method": "PATCH", + "endpoint_path": "/v0/evi/prompts/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_prompts.endpoint_prompts.get-prompt-version", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/prompts/get-prompt-version", + "page_title": "Get prompt version", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Prompts", + "pathname": "/reference/empathic-voice-interface-evi/prompts" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_prompts.get-prompt-version", + "method": "GET", + "endpoint_path": "/v0/evi/prompts/:id/version/:version", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_prompts.endpoint_prompts.delete-prompt-version", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/prompts/delete-prompt-version", + "page_title": "Delete prompt version", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Prompts", + "pathname": "/reference/empathic-voice-interface-evi/prompts" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_prompts.delete-prompt-version", + "method": "DELETE", + "endpoint_path": "/v0/evi/prompts/:id/version/:version", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_prompts.endpoint_prompts.update-prompt-description", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/prompts/update-prompt-description", + "page_title": "Update prompt description", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Prompts", + "pathname": "/reference/empathic-voice-interface-evi/prompts" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_prompts.update-prompt-description", + "method": "PATCH", + "endpoint_path": "/v0/evi/prompts/:id/version/:version", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_configs.endpoint_configs.list-configs", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/configs/list-configs", + "page_title": "List configs", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Configs", + "pathname": "/reference/empathic-voice-interface-evi/configs" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_configs.list-configs", + "method": "GET", + "endpoint_path": "/v0/evi/configs", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_configs.endpoint_configs.create-config", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/configs/create-config", + "page_title": "Create config", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Configs", + "pathname": "/reference/empathic-voice-interface-evi/configs" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_configs.create-config", + "method": "POST", + "endpoint_path": "/v0/evi/configs", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_configs.endpoint_configs.list-config-versions", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/configs/list-config-versions", + "page_title": "List config versions", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Configs", + "pathname": "/reference/empathic-voice-interface-evi/configs" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_configs.list-config-versions", + "method": "GET", + "endpoint_path": "/v0/evi/configs/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_configs.endpoint_configs.create-config-version", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/configs/create-config-version", + "page_title": "Create config version", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Configs", + "pathname": "/reference/empathic-voice-interface-evi/configs" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_configs.create-config-version", + "method": "POST", + "endpoint_path": "/v0/evi/configs/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_configs.endpoint_configs.delete-config", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/configs/delete-config", + "page_title": "Delete config", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Configs", + "pathname": "/reference/empathic-voice-interface-evi/configs" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_configs.delete-config", + "method": "DELETE", + "endpoint_path": "/v0/evi/configs/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_configs.endpoint_configs.update-config-name", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/configs/update-config-name", + "page_title": "Update config name", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Configs", + "pathname": "/reference/empathic-voice-interface-evi/configs" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_configs.update-config-name", + "method": "PATCH", + "endpoint_path": "/v0/evi/configs/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_configs.endpoint_configs.get-config-version", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/configs/get-config-version", + "page_title": "Get config version", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Configs", + "pathname": "/reference/empathic-voice-interface-evi/configs" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_configs.get-config-version", + "method": "GET", + "endpoint_path": "/v0/evi/configs/:id/version/:version", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_configs.endpoint_configs.delete-config-version", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/configs/delete-config-version", + "page_title": "Delete config version", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Configs", + "pathname": "/reference/empathic-voice-interface-evi/configs" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_configs.delete-config-version", + "method": "DELETE", + "endpoint_path": "/v0/evi/configs/:id/version/:version", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_configs.endpoint_configs.update-config-description", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/configs/update-config-description", + "page_title": "Update config description", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Configs", + "pathname": "/reference/empathic-voice-interface-evi/configs" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_configs.update-config-description", + "method": "PATCH", + "endpoint_path": "/v0/evi/configs/:id/version/:version", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_chats.endpoint_chats.list-chats", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/chats/list-chats", + "page_title": "List chats", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Chats", + "pathname": "/reference/empathic-voice-interface-evi/chats" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_chats.list-chats", + "method": "GET", + "endpoint_path": "/v0/evi/chats", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_chats.endpoint_chats.list-chat-events", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/chats/list-chat-events", + "page_title": "List chat events", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Chats", + "pathname": "/reference/empathic-voice-interface-evi/chats" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_chats.list-chat-events", + "method": "GET", + "endpoint_path": "/v0/evi/chats/:id", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_chatGroups.endpoint_chatGroups.list-chat-groups", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/chat-groups/list-chat-groups", + "page_title": "List chat_groups", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Chat Groups", + "pathname": "/reference/empathic-voice-interface-evi/chat-groups" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_chatGroups.list-chat-groups", + "method": "GET", + "endpoint_path": "/v0/evi/chat_groups", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_chatGroups.endpoint_chatGroups.list-chat-group-events", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/chat-groups/list-chat-group-events", + "page_title": "List chat events from a specific chat_group", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Chat Groups", + "pathname": "/reference/empathic-voice-interface-evi/chat-groups" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "endpoint_chatGroups.list-chat-group-events", + "method": "GET", + "endpoint_path": "/v0/evi/chat_groups/:id/events", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.empathic-voice-interface-evi.subpackage_chat.subpackage_chat.chat", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/empathic-voice-interface-evi/chat/chat", + "page_title": "Chat", + "breadcrumb": [ + { + "title": "Empathic Voice Interface (EVI)", + "pathname": "/reference/empathic-voice-interface-evi" + }, + { + "title": "Chat", + "pathname": "/reference/empathic-voice-interface-evi/chat" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "websocket", + "api_definition_id": "8b03e44c-d4e2-4613-a138-e25b8c65c3cb", + "api_endpoint_id": "subpackage_chat.chat", + "method": "GET", + "endpoint_path": "[object Object]", + "environments": [ + { + "id": "Default", + "url": "wss://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.expression-measurement-api.subpackage_batch.endpoint_batch.list-jobs", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/expression-measurement-api/batch/list-jobs", + "page_title": "List jobs", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/reference/expression-measurement-api" + }, + { + "title": "Batch", + "pathname": "/reference/expression-measurement-api/batch" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "e810f56e-df97-4af2-bdaa-659077e1948e", + "api_endpoint_id": "endpoint_batch.list-jobs", + "method": "GET", + "endpoint_path": "/v0/batch/jobs", + "description": "Sort and filter jobs.", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.expression-measurement-api.subpackage_batch.endpoint_batch.start-inference-job", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/expression-measurement-api/batch/start-inference-job", + "page_title": "Start inference job", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/reference/expression-measurement-api" + }, + { + "title": "Batch", + "pathname": "/reference/expression-measurement-api/batch" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "e810f56e-df97-4af2-bdaa-659077e1948e", + "api_endpoint_id": "endpoint_batch.start-inference-job", + "method": "POST", + "endpoint_path": "/v0/batch/jobs", + "description": "Start a new measurement inference job.", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.expression-measurement-api.subpackage_batch.endpoint_batch.get-job-details", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/expression-measurement-api/batch/get-job-details", + "page_title": "Get job details", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/reference/expression-measurement-api" + }, + { + "title": "Batch", + "pathname": "/reference/expression-measurement-api/batch" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "e810f56e-df97-4af2-bdaa-659077e1948e", + "api_endpoint_id": "endpoint_batch.get-job-details", + "method": "GET", + "endpoint_path": "/v0/batch/jobs/:id", + "description": "Get the request details and state of a given job.", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.expression-measurement-api.subpackage_batch.endpoint_batch.get-job-predictions", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/expression-measurement-api/batch/get-job-predictions", + "page_title": "Get job predictions", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/reference/expression-measurement-api" + }, + { + "title": "Batch", + "pathname": "/reference/expression-measurement-api/batch" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "e810f56e-df97-4af2-bdaa-659077e1948e", + "api_endpoint_id": "endpoint_batch.get-job-predictions", + "method": "GET", + "endpoint_path": "/v0/batch/jobs/:id/predictions", + "description": "Get the JSON predictions of a completed measurement or custom models inference job.", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.expression-measurement-api.subpackage_batch.endpoint_batch.get-job-artifacts", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/expression-measurement-api/batch/get-job-artifacts", + "page_title": "Get job artifacts", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/reference/expression-measurement-api" + }, + { + "title": "Batch", + "pathname": "/reference/expression-measurement-api/batch" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "e810f56e-df97-4af2-bdaa-659077e1948e", + "api_endpoint_id": "endpoint_batch.get-job-artifacts", + "method": "GET", + "endpoint_path": "/v0/batch/jobs/:id/artifacts", + "description": "Get the artifacts ZIP of a completed measurement or custom models inference job.", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.expression-measurement-api.subpackage_batch.endpoint_batch.start-inference-job-from-local-file", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/expression-measurement-api/batch/start-inference-job-from-local-file", + "page_title": "Start inference job from local file", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/reference/expression-measurement-api" + }, + { + "title": "Batch", + "pathname": "/reference/expression-measurement-api/batch" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "e810f56e-df97-4af2-bdaa-659077e1948e", + "api_endpoint_id": "endpoint_batch.start-inference-job-from-local-file", + "method": "POST", + "endpoint_path": "/v0/batch/jobs", + "description": "Start a new batch inference job.", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.expression-measurement-api.subpackage_stream.subpackage_stream.stream", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/expression-measurement-api/stream/stream", + "page_title": "Stream", + "breadcrumb": [ + { + "title": "Expression Measurement API", + "pathname": "/reference/expression-measurement-api" + }, + { + "title": "Stream", + "pathname": "/reference/expression-measurement-api/stream" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "websocket", + "api_definition_id": "e810f56e-df97-4af2-bdaa-659077e1948e", + "api_endpoint_id": "subpackage_stream.stream", + "method": "GET", + "endpoint_path": "[object Object]", + "environments": [ + { + "id": "Default", + "url": "wss://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_files.endpoint_files.list-files", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/files/list-files", + "page_title": "List files", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Files", + "pathname": "/reference/custom-models-api/files" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_files.list-files", + "method": "GET", + "endpoint_path": "/v0/registry/files", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_files.endpoint_files.create-files", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/files/create-files", + "page_title": "Create files", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Files", + "pathname": "/reference/custom-models-api/files" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_files.create-files", + "method": "POST", + "endpoint_path": "/v0/registry/files", + "description": "Returns 201 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_files.endpoint_files.upload-file", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/files/upload-file", + "page_title": "Upload file", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Files", + "pathname": "/reference/custom-models-api/files" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_files.upload-file", + "method": "POST", + "endpoint_path": "/v0/registry/files/upload", + "description": "Upload a file synchronously. Returns 201 if successful. Files must have a name. Files must specify Content-Type. Request bodies, and therefore files, are limited to 100MB", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_files.endpoint_files.get-file", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/files/get-file", + "page_title": "Get file", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Files", + "pathname": "/reference/custom-models-api/files" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_files.get-file", + "method": "GET", + "endpoint_path": "/v0/registry/files/:id", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_files.endpoint_files.delete-file", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/files/delete-file", + "page_title": "Delete file", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Files", + "pathname": "/reference/custom-models-api/files" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_files.delete-file", + "method": "DELETE", + "endpoint_path": "/v0/registry/files/:id", + "description": "Returns 204 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_files.endpoint_files.update-file-name", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/files/update-file-name", + "page_title": "Update file name", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Files", + "pathname": "/reference/custom-models-api/files" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_files.update-file-name", + "method": "PATCH", + "endpoint_path": "/v0/registry/files/:id", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_files.endpoint_files.get-file-predictions", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/files/get-file-predictions", + "page_title": "Get file predictions", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Files", + "pathname": "/reference/custom-models-api/files" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_files.get-file-predictions", + "method": "GET", + "endpoint_path": "/v0/registry/files/:id/predictions", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_datasets.endpoint_datasets.list-datasets", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/datasets/list-datasets", + "page_title": "List datasets", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Datasets", + "pathname": "/reference/custom-models-api/datasets" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_datasets.list-datasets", + "method": "GET", + "endpoint_path": "/v0/registry/datasets", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_datasets.endpoint_datasets.create-dataset", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/datasets/create-dataset", + "page_title": "Create dataset", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Datasets", + "pathname": "/reference/custom-models-api/datasets" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_datasets.create-dataset", + "method": "POST", + "endpoint_path": "/v0/registry/datasets", + "description": "Returns 201 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_datasets.endpoint_datasets.get-dataset", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/datasets/get-dataset", + "page_title": "Get dataset", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Datasets", + "pathname": "/reference/custom-models-api/datasets" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_datasets.get-dataset", + "method": "GET", + "endpoint_path": "/v0/registry/datasets/:id", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_datasets.endpoint_datasets.create-dataset-version", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/datasets/create-dataset-version", + "page_title": "Create dataset version", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Datasets", + "pathname": "/reference/custom-models-api/datasets" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_datasets.create-dataset-version", + "method": "POST", + "endpoint_path": "/v0/registry/datasets/:id", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_datasets.endpoint_datasets.delete-dataset", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/datasets/delete-dataset", + "page_title": "Delete dataset", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Datasets", + "pathname": "/reference/custom-models-api/datasets" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_datasets.delete-dataset", + "method": "DELETE", + "endpoint_path": "/v0/registry/datasets/:id", + "description": "Returns 204 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_datasets.endpoint_datasets.list-dataset-versions", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/datasets/list-dataset-versions", + "page_title": "List dataset versions", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Datasets", + "pathname": "/reference/custom-models-api/datasets" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_datasets.list-dataset-versions", + "method": "GET", + "endpoint_path": "/v0/registry/datasets/:id/versions", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_datasets.endpoint_datasets.list-dataset-files", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/datasets/list-dataset-files", + "page_title": "List dataset files", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Datasets", + "pathname": "/reference/custom-models-api/datasets" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_datasets.list-dataset-files", + "method": "GET", + "endpoint_path": "/v0/registry/datasets/:id/files", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_datasets.endpoint_datasets.get-dataset-version", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/datasets/get-dataset-version", + "page_title": "Get dataset version", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Datasets", + "pathname": "/reference/custom-models-api/datasets" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_datasets.get-dataset-version", + "method": "GET", + "endpoint_path": "/v0/registry/datasets/version/:id", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_datasets.endpoint_datasets.list-dataset-version-files", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/datasets/list-dataset-version-files", + "page_title": "List dataset version files", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Datasets", + "pathname": "/reference/custom-models-api/datasets" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_datasets.list-dataset-version-files", + "method": "GET", + "endpoint_path": "/v0/registry/datasets/version/:id/files", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_models.endpoint_models.list-models", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/models/list-models", + "page_title": "List models", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Models", + "pathname": "/reference/custom-models-api/models" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_models.list-models", + "method": "GET", + "endpoint_path": "/v0/registry/models", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_models.endpoint_models.get-model-details", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/models/get-model-details", + "page_title": "Get model details", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Models", + "pathname": "/reference/custom-models-api/models" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_models.get-model-details", + "method": "GET", + "endpoint_path": "/v0/registry/models/:id", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_models.endpoint_models.update-model-name", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/models/update-model-name", + "page_title": "Update model name", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Models", + "pathname": "/reference/custom-models-api/models" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_models.update-model-name", + "method": "PATCH", + "endpoint_path": "/v0/registry/models/:id", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_models.endpoint_models.list-model-versions", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/models/list-model-versions", + "page_title": "List model versions", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Models", + "pathname": "/reference/custom-models-api/models" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_models.list-model-versions", + "method": "GET", + "endpoint_path": "/v0/registry/models/version", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_models.endpoint_models.get-model-version", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/models/get-model-version", + "page_title": "Get model version", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Models", + "pathname": "/reference/custom-models-api/models" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_models.get-model-version", + "method": "GET", + "endpoint_path": "/v0/registry/models/version/:id", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_models.endpoint_models.update-model-description", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/models/update-model-description", + "page_title": "Update model description", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Models", + "pathname": "/reference/custom-models-api/models" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_models.update-model-description", + "method": "PATCH", + "endpoint_path": "/v0/registry/models/version/:id", + "description": "Returns 200 if successful", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_jobs.endpoint_jobs.start-training-job", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/jobs/start-training-job", + "page_title": "Start training job", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Jobs", + "pathname": "/reference/custom-models-api/jobs" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_jobs.start-training-job", + "method": "POST", + "endpoint_path": "/v0/registry/v0/batch/jobs/tl/train", + "description": "Start a new custom models training job.", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + }, + { + "objectID": "hume:dev.hume.ai:root.uv.reference.reference.custom-models-api.subpackage_jobs.endpoint_jobs.start-custom-models-inference-job", + "org_id": "hume", + "domain": "dev.hume.ai", + "pathname": "/reference/custom-models-api/jobs/start-custom-models-inference-job", + "page_title": "Start custom models inference job", + "breadcrumb": [ + { + "title": "Custom Models API", + "pathname": "/reference/custom-models-api" + }, + { + "title": "Jobs", + "pathname": "/reference/custom-models-api/jobs" + } + ], + "tab": { + "title": "API Reference" + }, + "visible_by": [ + "role/everyone" + ], + "authed": false, + "api_type": "http", + "api_definition_id": "bcb7826e-ffd6-4440-b3de-6bf37440c3e2", + "api_endpoint_id": "endpoint_jobs.start-custom-models-inference-job", + "method": "POST", + "endpoint_path": "/v0/registry/v0/batch/jobs/tl/inference", + "description": "Start a new custom models inference job.", + "environments": [ + { + "id": "Default", + "url": "https://api.hume.ai" + } + ], + "default_environment_id": "Default", + "type": "api-reference" + } +] \ No newline at end of file diff --git a/packages/ui/fern-docs-search-server/src/algolia/__test__/cohere.test.ts b/packages/ui/fern-docs-search-server/src/algolia/__test__/cohere.test.ts new file mode 100644 index 0000000000..1688e1f35a --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/algolia/__test__/cohere.test.ts @@ -0,0 +1,27 @@ +import fs from "fs"; +import path from "path"; +import { createMarkdownRecords } from "../records/create-markdown-records.js"; +import { BaseRecord } from "../types.js"; + +const base: BaseRecord = { + objectID: "test", + org_id: "test", + domain: "test", + pathname: "/test", + page_title: "test", + breadcrumb: [], + visible_by: [], + authed: false, +}; + +describe("cohere", () => { + it("should work", () => { + const fixture = fs.readFileSync(path.join(__dirname, "fixtures/cohere.mdx"), "utf8"); + const result = createMarkdownRecords({ + base, + markdown: fixture, + }); + + expect(JSON.stringify(result, null, 2)).toMatchFileSnapshot(path.join(__dirname, "__snapshots__/cohere.json")); + }); +}); diff --git a/packages/ui/fern-docs-search-server/src/algolia/__test__/fixtures/cohere.mdx b/packages/ui/fern-docs-search-server/src/algolia/__test__/fixtures/cohere.mdx new file mode 100644 index 0000000000..9c2cbfed47 --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/algolia/__test__/fixtures/cohere.mdx @@ -0,0 +1,405 @@ +--- +title: Elasticsearch and Cohere +slug: docs/elasticsearch-and-cohere +hidden: false +description: >- + Learn how to create a semantic search pipeline with Elasticsearch and Cohere's + generative AI capabilities. +image: ../../../assets/images/f1cc130-cohere_meta_image.jpg +keywords: 'Cohere integrations, Elasticsearch' +createdAt: 'Sun Apr 07 2024 20:15:08 GMT+0000 (Coordinated Universal Time)' +updatedAt: 'Thu May 30 2024 15:56:35 GMT+0000 (Coordinated Universal Time)' +--- + + + +[Elasticsearch](https://www.elastic.co/search-labs/blog/elasticsearch-cohere-embeddings-support) has all the tools developers need to build next generation search experiences with generative AI, and it supports native integration with [Cohere](https://www.elastic.co/search-labs/blog/elasticsearch-cohere-embeddings-support) through their [inference API](https://www.elastic.co/guide/en/elasticsearch/reference/master/semantic-search-inference.html). + +Use Elastic if you’d like to build with: + +- A vector database +- Deploy multiple ML models +- Perform text, vector and hybrid search +- Search with filters, facet, aggregations +- Apply document and field level security +- Run on-prem, cloud, or serverless (preview) + +This guide uses a dataset of Wikipedia articles to set up a pipeline for semantic search. It will cover: + +- Creating an Elastic inference processor using Cohere embeddings +- Creating an Elasticsearch index with embeddings +- Performing hybrid search on the Elasticsearch index and reranking results +- Performing basic RAG + +To see the full code sample, refer to this [notebook](https://github.com/cohere-ai/notebooks/blob/main/notebooks/Cohere_Elastic_Guide.ipynb). You can also find an integration guide [here](https://www.elastic.co/search-labs/integrations/cohere). + +## Prerequisites + +This tutorial assumes you have the following: + +- An Elastic Cloud account through [Elastic Cloud](https://www.elastic.co/guide/en/cloud/current/ec-getting-started.html), available with a [free trial](https://cloud.elastic.co/registration?utm_source=github&utm_content=elasticsearch-labs-notebook) +- A Cohere production API Key. Get your API Key at this [link](https://dashboard.cohere.com/welcome/login?redirect_uri=%2Fapi-keys) if you don't have one +- Python 3.7 or higher + +Note: While this tutorial integrates Cohere with an Elastic Cloud [serverless](https://docs.elastic.co/serverless/elasticsearch/get-started) project, you can also integrate with your self-managed Elasticsearch deployment or Elastic Cloud deployment by simply switching from the [serverless](https://docs.elastic.co/serverless/elasticsearch/clients) to the general [language client](https://www.elastic.co/guide/en/elasticsearch/client/index.html). + +## Create an Elastic Serverless deployment + +If you don't have an Elastic Cloud deployment, sign up [here](https://www.google.com/url?q=https%3A%2F%2Fcloud.elastic.co%2Fregistration%3Futm_source%3Dgithub%26utm_content%3Delasticsearch-labs-notebook) for a free trial and request access to Elastic Serverless + +## Install the required packages + +Install and import the required Python Packages: + +- `elasticsearch_serverless` +- `cohere`: ensure you are on version 5.2.5 or later + +To install the packages, use the following code + +```python PYTHON +!pip install elasticsearch_serverless==0.2.0.20231031 +!pip install cohere==5.2.5 +``` + +After the instalation has finished, find your endpoint URL and create your API key in the Serverless dashboard. + +## Import the required packages + +Next, we need to import the modules we need. 🔐 NOTE: getpass enables us to securely prompt the user for credentials without echoing them to the terminal, or storing it in memory. + +```python PYTHON +from elasticsearch_serverless import Elasticsearch, helpers +from getpass import getpass +import cohere +import json +import requests +``` + +## Create an Elasticsearch client + +Now we can instantiate the Python Elasticsearch client. + +First we prompt the user for their endpoint and encoded API key. Then we create a client object that instantiates an instance of the Elasticsearch class. + +When creating your Elastic Serverless API key make sure to turn on Control security privileges, and edit cluster privileges to specify `"cluster": ["all"]`. + +```python PYTHON +ELASTICSEARCH_ENDPOINT = getpass("Elastic Endpoint: ") +ELASTIC_API_KEY = getpass("Elastic encoded API key: ") # Use the encoded API key + +client = Elasticsearch( + ELASTICSEARCH_ENDPOINT, + api_key=ELASTIC_API_KEY +) + +# Confirm the client has connected +print(client.info()) +``` + +# Build a Hybrid Search Index with Cohere and Elasticsearch + +## Create an inference endpoint + +One of the biggest pain points of building a vector search index is computing embeddings for a large corpus of data. Fortunately Elastic offers inference endpoints that can be used in ingest pipelines to automatically compute embeddings when bulk indexing operations are performed. + +To set up an inference pipeline for ingestion we first must create an inference endpoint that uses Cohere embeddings. You'll need a Cohere API key for this that you can find in your Cohere account under the [API keys section](https://dashboard.cohere.com/api-keys). + +We will create an inference endpoint that uses `embed-english-v3.0` and `int8` or `byte` compression to save on storage. + +```python PYTHON +COHERE_API_KEY = getpass("Enter Cohere API key: ") +# Delete the inference model if it already exists +client.options(ignore_status=[404]).inference.delete(inference_id="cohere_embeddings") + +client.inference.put( + task_type="text_embedding", + inference_id="cohere_embeddings", + body={ + "service": "cohere", + "service_settings": { + "api_key": COHERE_API_KEY, + "model_id": "embed-english-v3.0", + "embedding_type": "int8", + "similarity": "cosine" + }, + "task_settings": {}, + }, +) +``` + +Here's what you might see: + +``` +Enter Cohere API key: ·········· +ObjectApiResponse({'model_id': 'cohere_embeddings', 'inference_id': 'cohere_embeddings', 'task_type': 'text_embedding', 'service': 'cohere', 'service_settings': {'similarity': 'cosine', 'dimensions': 1024, 'model_id': 'embed-english-v3.0', 'rate_limit': {'requests_per_minute': 10000}, 'embedding_type': 'byte'}, 'task_settings': {}}) +``` + +## Create the Index + +The mapping of the destination index – the index that contains the embeddings that the model will generate based on your input text – must be created. The destination index must have a field with the [`semantic_text`](https://www.google.com/url?q=https%3A%2F%2Fwww.elastic.co%2Fguide%2Fen%2Felasticsearch%2Freference%2Fcurrent%2Fsemantic-text.html) field type to index the output of the Cohere model. + +Let's create an index named cohere-wiki-embeddings with the mappings we need + +```python PYTHON +client.indices.delete(index="cohere-wiki-embeddings", ignore_unavailable=True) +client.indices.create( + index="cohere-wiki-embeddings", + mappings={ + "properties": { + "text_semantic": { + "type": "semantic_text", + "inference_id": "cohere_embeddings" + }, + "text": {"type": "text", "copy_to": "text_semantic"}, + "wiki_id": {"type": "integer"}, + "url": {"type": "text"}, + "views": {"type": "float"}, + "langs": {"type": "integer"}, + "title": {"type": "text"}, + "paragraph_id": {"type": "integer"}, + "id": {"type": "integer"} + } + }, +) +``` + +You might see something like this: + +``` +ObjectApiResponse({'acknowledged': True, 'shards_acknowledged': True, 'index': 'cohere-wiki-embeddings'}) +``` + +Let's note a few important parameters from that API call: + +- `semantic_text`: A field type automatically generates embeddings for text content using an inference endpoint. +- `inference_id`: Specifies the ID of the inference endpoint to be used. In this example, the model ID is set to cohere_embeddings. +- `copy_to`: Specifies the output field which contains inference results + +## Insert Documents + +Let's insert our example wiki dataset. You need a production Cohere account to complete this step, otherwise the documentation ingest will time out due to the API request rate limits. + +```python PYTHON +url = "https://raw.githubusercontent.com/cohere-ai/notebooks/main/notebooks/data/embed_jobs_sample_data.jsonl" +response = requests.get(url) + +# Load the response data into a JSON object +jsonl_data = response.content.decode('utf-8').splitlines() + +# Prepare the documents to be indexed +documents = [] +for line in jsonl_data: + data_dict = json.loads(line) + documents.append({ + "_index": "cohere-wiki-embeddings", + "_source": data_dict, + } + ) + +# Use the bulk endpoint to index +helpers.bulk(client, documents) + +print("Done indexing documents into `cohere-wiki-embeddings` index!") +``` + +You should see this: + +``` +Done indexing documents into `cohere-wiki-embeddings` index! +``` + +## Semantic Search +After the dataset has been enriched with the embeddings, you can query the data using the semantic query provided by Elasticsearch. `semantic_text` in Elasticsearch simplifies the semantic search significantly. Learn more about how [semantic text](https://www.google.com/url?q=https%3A%2F%2Fwww.elastic.co%2Fsearch-labs%2Fblog%2Fsemantic-search-simplified-semantic-text) in Elasticsearch allows you to focus on your model and results instead of on the technical details. + +```python PYTHON +query = "When were the semi-finals of the 2022 FIFA world cup played?" + +response = client.search( + index="cohere-wiki-embeddings", + size=100, + query = { + "semantic": { + "query": "When were the semi-finals of the 2022 FIFA world cup played?", + "field": "text_semantic" + } + } +) + +raw_documents = response["hits"]["hits"] + +# Display the first 10 results +for document in raw_documents[0:10]: + print(f'Title: {document["_source"]["title"]}\nText: {document["_source"]["text"]}\n') + +# Format the documents for ranking +documents = [] +for hit in response["hits"]["hits"]: + documents.append(hit["_source"]["text"]) +``` + +Here's what that might look like: +``` +Title: 2022 FIFA World Cup +Text: The 2022 FIFA World Cup was an international football tournament contested by the men's national teams of FIFA's member associations and 22nd edition of the FIFA World Cup. It took place in Qatar from 20 November to 18 December 2022, making it the first World Cup held in the Arab world and Muslim world, and the second held entirely in Asia after the 2002 tournament in South Korea and Japan. France were the defending champions, having defeated Croatia 4–2 in the 2018 final. At an estimated cost of over $220 billion, it is the most expensive World Cup ever held to date; this figure is disputed by Qatari officials, including organising CEO Nasser Al Khater, who said the true cost was $8 billion, and other figures related to overall infrastructure development since the World Cup was awarded to Qatar in 2010. + +Title: 2022 FIFA World Cup +Text: The semi-finals were played on 13 and 14 December. Messi scored a penalty kick before Julián Álvarez scored twice to give Argentina a 3–0 victory over Croatia. Théo Hernandez scored after five minutes as France led Morocco for most of the game and later Randal Kolo Muani scored on 78 minutes to complete a 2–0 victory for France over Morocco as they reached a second consecutive final. + +Title: 2022 FIFA World Cup +Text: The quarter-finals were played on 9 and 10 December. Croatia and Brazil ended 0–0 after 90 minutes and went to extra time. Neymar scored for Brazil in the 15th minute of extra time. Croatia, however, equalised through Bruno Petković in the second period of extra time. With the match tied, a penalty shootout decided the contest, with Croatia winning the shoot-out 4–2. In the second quarter-final match, Nahuel Molina and Messi scored for Argentina before Wout Weghorst equalised with two goals shortly before the end of the game. The match went to extra time and then penalties, where Argentina would go on to win 4–3. Morocco defeated Portugal 1–0, with Youssef En-Nesyri scoring at the end of the first half. Morocco became the first African and the first Arab nation to advance as far as the semi-finals of the competition. Despite Harry Kane scoring a penalty for England, it was not enough to beat France, who won 2–1 by virtue of goals from Aurélien Tchouaméni and Olivier Giroud, sending them to their second consecutive World Cup semi-final and becoming the first defending champions to reach this stage since Brazil in 1998. + +Title: 2022 FIFA World Cup +Text: Unlike previous FIFA World Cups, which are typically played in June and July, because of Qatar's intense summer heat and often fairly high humidity, the 2022 World Cup was played in November and December. As a result, the World Cup was unusually staged in the middle of the seasons of domestic association football leagues, which started in late July or August, including all of the major European leagues, which had been obliged to incorporate extended breaks into their domestic schedules to accommodate the World Cup. Major European competitions had scheduled their respective competitions group matches to be played before the World Cup, to avoid playing group matches the following year. + +Title: 2022 FIFA World Cup +Text: The match schedule was confirmed by FIFA in July 2020. The group stage was set to begin on 21 November, with four matches every day. Later, the schedule was tweaked by moving the Qatar vs Ecuador game to 20 November, after Qatar lobbied FIFA to allow their team to open the tournament. The final was played on 18 December 2022, National Day, at Lusail Stadium. + +Title: 2022 FIFA World Cup +Text: Owing to the climate in Qatar, concerns were expressed over holding the World Cup in its traditional time frame of June and July. In October 2013, a task force was commissioned to consider alternative dates and report after the 2014 FIFA World Cup in Brazil. On 24 February 2015, the FIFA Task Force proposed that the tournament be played from late November to late December 2022, to avoid the summer heat between May and September and also avoid clashing with the 2022 Winter Olympics in February, the 2022 Winter Paralympics in March and Ramadan in April. + +Title: 2022 FIFA World Cup +Text: Of the 32 nations qualified to play at the 2022 FIFA World Cup, 24 countries competed at the previous tournament in 2018. Qatar were the only team making their debut in the FIFA World Cup, becoming the first hosts to make their tournament debut since Italy in 1934. As a result, the 2022 tournament was the first World Cup in which none of the teams that earned a spot through qualification were making their debut. The Netherlands, Ecuador, Ghana, Cameroon, and the United States returned to the tournament after missing the 2018 tournament. Canada returned after 36 years, their only prior appearance being in 1986. Wales made their first appearance in 64 years – the longest ever gap for any team, their only previous participation having been in 1958. + +Title: 2022 FIFA World Cup +Text: After UEFA were guaranteed to host the 2018 event, members of UEFA were no longer in contention to host in 2022. There were five bids remaining for the 2022 FIFA World Cup: Australia, Japan, Qatar, South Korea, and the United States. + +Title: Cristiano Ronaldo +Text: Ronaldo was named in Portugal's squad for the 2022 FIFA World Cup in Qatar, making it his fifth World Cup. On 24 November, in Portugal's opening match against Ghana, Ronaldo scored a penalty kick and became the first male player to score in five different World Cups. In the last group game against South Korea, Ronaldo received criticism from his own coach for his reaction at being substituted. He was dropped from the starting line-up for Portugal's last 16 match against Switzerland, marking the first time since Euro 2008 that he had not started a game for Portugal in a major international tournament, and the first time Portugal had started a knockout game without Ronaldo in the starting line-up at an international tournament since Euro 2000. He came off the bench late on as Portugal won 6–1, their highest tally in a World Cup knockout game since the 1966 World Cup, with Ronaldo's replacement Gonçalo Ramos scoring a hat-trick. Portugal employed the same strategy in the quarter-finals against Morocco, with Ronaldo once again coming off the bench; in the process, he equalled Bader Al-Mutawa's international appearance record, becoming the joint–most capped male footballer of all time, with 196 caps. Portugal lost 1–0, however, with Morocco becoming the first CAF nation ever to reach the World Cup semi-finals. + +Title: 2022 FIFA World Cup +Text: The final draw was held at the Doha Exhibition and Convention Center in Doha, Qatar, on 1 April 2022, 19:00 AST, prior to the completion of qualification. The two winners of the inter-confederation play-offs and the winner of the Path A of the UEFA play-offs were not known at the time of the draw. The draw was attended by 2,000 guests and was led by Carli Lloyd, Jermaine Jenas and sports broadcaster Samantha Johnson, assisted by the likes of Cafu (Brazil), Lothar Matthäus (Germany), Adel Ahmed Malalla (Qatar), Ali Daei (Iran), Bora Milutinović (Serbia/Mexico), Jay-Jay Okocha (Nigeria), Rabah Madjer (Algeria), and Tim Cahill (Australia). +``` + +## Hybrid Search +After the dataset has been enriched with the embeddings, you can query the data using hybrid search. + +Pass a semantic query, and provide the query text and the model you have used to create the embeddings. + +```python PYTHON +query = "When were the semi-finals of the 2022 FIFA world cup played?" + +response = client.search( + index="cohere-wiki-embeddings", + size=100, + query={ + "bool": { + "must": { + "multi_match": { + "query": "When were the semi-finals of the 2022 FIFA world cup played?", + "fields": ["text", "title"] + } + }, + "should": { + "semantic": { + "query": "When were the semi-finals of the 2022 FIFA world cup played?", + "field": "text_semantic" + } + }, + } + } + +) + +raw_documents = response["hits"]["hits"] + +# Display the first 10 results +for document in raw_documents[0:10]: + print(f'Title: {document["_source"]["title"]}\nText: {document["_source"]["text"]}\n') + +# Format the documents for ranking +documents = [] +for hit in response["hits"]["hits"]: + documents.append(hit["_source"]["text"]) +``` + +## Ranking + +In order to effectively combine the results from our vector and BM25 retrieval, we can use Cohere's Rerank 3 model through the inference API to provide a final, more precise, semantic reranking of our results. + +First, create an inference endpoint with your Cohere API key. Make sure to specify a name for your endpoint, and the model_id of one of the rerank models. In this example we will use Rerank 3. + +```python PYTHON +# Delete the inference model if it already exists +client.options(ignore_status=[404]).inference.delete(inference_id="cohere_rerank") + +client.inference.put( + task_type="rerank", + inference_id="cohere_rerank", + body={ + "service": "cohere", + "service_settings":{ + "api_key": COHERE_API_KEY, + "model_id": "rerank-english-v3.0" + }, + "task_settings": { + "top_n": 10, + }, + } +) +``` + +You can now rerank your results using that inference endpoint. Here we will pass in the query we used for retrieval, along with the documents we just retrieved using hybrid search. + +The inference service will respond with a list of documents in descending order of relevance. Each document has a corresponding index (reflecting to the order the documents were in when sent to the inference endpoint), and if the “return_documents” task setting is True, then the document texts will be included as well. + +In this case we will set the response to False and will reconstruct the input documents based on the index returned in the response. + +```python PYTHON +response = client.inference.inference( + inference_id="cohere_rerank", + body={ + "query": query, + "input": documents, + "task_settings": { + "return_documents": False + } + } +) + +# Reconstruct the input documents based on the index provided in the rereank response +ranked_documents = [] +for document in response.body["rerank"]: + ranked_documents.append({ + "title": raw_documents[int(document["index"])]["_source"]["title"], + "text": raw_documents[int(document["index"])]["_source"]["text"] + }) + +# Print the top 10 results +for document in ranked_documents[0:10]: + print(f"Title: {document['title']}\nText: {document['text']}\n") +``` + +## Retrieval augemented generation + +Now that we have ranked our results, we can easily turn this into a RAG system with Cohere's Chat API. Pass in the retrieved documents, along with the query and see the grounded response using Cohere's newest generative model Command R+. + +First, we will create the Cohere client. + +```python PYTHON +co = cohere.Client(COHERE_API_KEY) +``` + +Next, we can easily get a grounded generation with citations from the Cohere Chat API. We simply pass in the user query and documents retrieved from Elastic to the API, and print out our grounded response. + +```python PYTHON +response = co.chat( + message=query, + documents=ranked_documents, + model='command-r-plus-08-2024' +) + +source_documents = [] +for citation in response.citations: + for document_id in citation.document_ids: + if document_id not in source_documents: + source_documents.append(document_id) + +print(f"Query: {query}") +print(f"Response: {response.text}") +print("Sources:") +for document in response.documents: + if document['id'] in source_documents: + print(f"{document['title']}: {document['text']}") +``` + +And there you have it! A quick and easy implementation of hybrid search and RAG with Cohere and Elastic. \ No newline at end of file diff --git a/packages/ui/fern-docs-search-server/src/algolia/__test__/humanloop.test.ts b/packages/ui/fern-docs-search-server/src/algolia/__test__/humanloop.test.ts index 120f6e7a2e..9b5e20a42f 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/__test__/humanloop.test.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/__test__/humanloop.test.ts @@ -1,35 +1,11 @@ -import { ApiDefinition, DocsV2Read, FernNavigation } from "@fern-api/fdr-sdk"; import { uniq } from "es-toolkit"; -import { mapValues } from "es-toolkit/object"; -import fs from "fs"; -import path from "path"; import { createAlgoliaRecords } from "../records/create-algolia-records.js"; - -const fixturesDir = path.join(__dirname, "../../../../../fdr-sdk/src/__test__/fixtures"); - -function readFixture(fixture: string) { - const fixturePath = path.join(fixturesDir, `${fixture}.json`); - const content = fs.readFileSync(fixturePath, "utf-8"); - return JSON.parse(content) as DocsV2Read.LoadDocsForUrlResponse; -} +import { readFixture, readFixtureToRootNode } from "./test-utils.js"; describe("humanloop", () => { it("should work", () => { - const fixture = readFixture("humanloop"); - const root = FernNavigation.utils.toRootNode(fixture); - const apis = Object.fromEntries( - Object.values(fixture.definition.apis).map((api) => { - return [ - api.id, - ApiDefinition.ApiDefinitionV1ToLatest.from(api, { - useJavaScriptAsTypeScript: false, - alwaysEnableJavaScriptFetch: false, - usesApplicationJsonInFormDataValue: false, - }).migrate(), - ]; - }), - ); - const pages = mapValues(fixture.definition.pages, (page) => page.markdown); + const [fixture, snapshotFilepath] = readFixture("humanloop"); + const { root, apis, pages } = readFixtureToRootNode(fixture); const records = createAlgoliaRecords({ root, @@ -42,7 +18,7 @@ describe("humanloop", () => { const objectIDs = records.map((record) => record.objectID); - expect(JSON.stringify(records, null, 2)).toMatchFileSnapshot("__snapshots__/humanloop.json"); + expect(JSON.stringify(records, null, 2)).toMatchFileSnapshot(snapshotFilepath); expect(uniq(objectIDs).length).toBe(objectIDs.length); }); diff --git a/packages/ui/fern-docs-search-server/src/algolia/__test__/hume.test.ts b/packages/ui/fern-docs-search-server/src/algolia/__test__/hume.test.ts new file mode 100644 index 0000000000..1a129208ff --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/algolia/__test__/hume.test.ts @@ -0,0 +1,25 @@ +import { uniq } from "es-toolkit"; +import { createAlgoliaRecords } from "../records/create-algolia-records.js"; +import { readFixture, readFixtureToRootNode } from "./test-utils.js"; + +describe("hume", () => { + it("should work", () => { + const [fixture, snapshotFilepath] = readFixture("hume"); + const { root, apis, pages } = readFixtureToRootNode(fixture); + + const records = createAlgoliaRecords({ + root, + domain: "dev.hume.ai", + org_id: "hume", + pages, + apis, + authed: false, + }); + + const objectIDs = records.map((record) => record.objectID); + + expect(JSON.stringify(records, null, 2)).toMatchFileSnapshot(snapshotFilepath); + + expect(uniq(objectIDs).length).toBe(objectIDs.length); + }); +}); diff --git a/packages/ui/fern-docs-search-server/src/algolia/__test__/test-utils.ts b/packages/ui/fern-docs-search-server/src/algolia/__test__/test-utils.ts new file mode 100644 index 0000000000..78b9d18f96 --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/algolia/__test__/test-utils.ts @@ -0,0 +1,39 @@ +import { DocsV2Read } from "@fern-api/fdr-sdk"; +import * as ApiDefinition from "@fern-api/fdr-sdk/api-definition"; +import * as FernNavigation from "@fern-api/fdr-sdk/navigation"; +import { mapValues } from "es-toolkit/object"; +import fs from "fs"; +import path from "path"; + +const fixturesDir = path.join(__dirname, "../../../../../fdr-sdk/src/__test__/fixtures"); + +export function readFixture(fixture: string): [DocsV2Read.LoadDocsForUrlResponse, snapshotFilepath: string] { + const fixturePath = path.join(fixturesDir, `${fixture}.json`); + const content = fs.readFileSync(fixturePath, "utf-8"); + return [ + JSON.parse(content) as DocsV2Read.LoadDocsForUrlResponse, + path.join(__dirname, `__snapshots__/${fixture}.json`), + ]; +} + +export function readFixtureToRootNode(fixture: DocsV2Read.LoadDocsForUrlResponse): { + root: FernNavigation.RootNode; + apis: Record; + pages: Record; +} { + const root = FernNavigation.utils.toRootNode(fixture); + const apis = Object.fromEntries( + Object.values(fixture.definition.apis).map((api) => { + return [ + api.id, + ApiDefinition.ApiDefinitionV1ToLatest.from(api, { + useJavaScriptAsTypeScript: false, + alwaysEnableJavaScriptFetch: false, + usesApplicationJsonInFormDataValue: false, + }).migrate(), + ]; + }), + ); + const pages = mapValues(fixture.definition.pages, (page) => page.markdown); + return { root, apis, pages }; +} diff --git a/packages/ui/fern-docs-search-server/src/algolia/browse-all-objects-for-domain.ts b/packages/ui/fern-docs-search-server/src/algolia/browse-all-objects-for-domain.ts index 365df1c43f..7da5fa248d 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/browse-all-objects-for-domain.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/browse-all-objects-for-domain.ts @@ -4,6 +4,7 @@ export async function browseAllObjectsForDomain( algolia: Algoliasearch, domain: string, indexName: string, + attributesToRetrieve?: string[], ): Promise[]> { let response: BrowseResponse; let cursor: string | undefined; @@ -14,6 +15,7 @@ export async function browseAllObjectsForDomain( filters: `domain:${domain}`, hitsPerPage: 1000, cursor, + attributesToRetrieve, }, indexName, }); diff --git a/packages/ui/fern-docs-search-server/src/algolia/index.ts b/packages/ui/fern-docs-search-server/src/algolia/index.ts index 58b54f40e0..ddd154bc38 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/index.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/index.ts @@ -1,2 +1 @@ export * from "./get-search-api-key.js"; -export * from "./records/index.js"; diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/__test__/prepare-mdx-content.test.ts b/packages/ui/fern-docs-search-server/src/algolia/records/__test__/prepare-mdx-content.test.ts index e959281bcc..265d3a462b 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/records/__test__/prepare-mdx-content.test.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/records/__test__/prepare-mdx-content.test.ts @@ -4,7 +4,7 @@ describe("prepareMdxContent", () => { it("should remove mdxjsEsm nodes", () => { const content = "export default function MyComponent() { return
Hello, world!
; }"; const result = prepareMdxContent(content); - expect(result.content).toBe(""); + expect(result.content).toBeUndefined(); }); it("should squeeze mdxJsxElement nodes", () => { @@ -20,7 +20,7 @@ describe("prepareMdxContent", () => { it("should remove mdxExpression nodes", () => { const content = "\n{props.testing}\n"; const result = prepareMdxContent(content); - expect(result.content).toBe(""); + expect(result.content).toBeUndefined(); }); it("should stringify text in a newline delimited way nodes", () => { @@ -78,6 +78,17 @@ Be sure to save the generated token - it won't be displayed after you leave the `); }); + it("should extract code snippets", () => { + const content = ` + \`\`\`python + print("Hello, world!") + \`\`\` + `; + const result = prepareMdxContent(content); + expect(result.content).toBe(undefined); + expect(result.code_snippets).toEqual([{ lang: "python", meta: undefined, code: 'print("Hello, world!")' }]); + }); + it("should strip math nodes but keep the content", () => { const content = "$x^2$"; const result = prepareMdxContent(content); diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/archive/convert-name-to-anchor-part.ts b/packages/ui/fern-docs-search-server/src/algolia/records/archive/convert-name-to-anchor-part.ts new file mode 100644 index 0000000000..23c0be0597 --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/algolia/records/archive/convert-name-to-anchor-part.ts @@ -0,0 +1,8 @@ +import { camelCase, upperFirst } from "es-toolkit/string"; + +export function convertNameToAnchorPart(name: string | null | undefined): string | undefined { + if (name == null) { + return undefined; + } + return upperFirst(camelCase(name)); +} diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/create-parameter-records-http.ts b/packages/ui/fern-docs-search-server/src/algolia/records/archive/create-parameter-records-http.ts similarity index 100% rename from packages/ui/fern-docs-search-server/src/algolia/records/create-parameter-records-http.ts rename to packages/ui/fern-docs-search-server/src/algolia/records/archive/create-parameter-records-http.ts diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/generateAlgoliaRecords.ts b/packages/ui/fern-docs-search-server/src/algolia/records/archive/generateAlgoliaRecords.ts similarity index 100% rename from packages/ui/fern-docs-search-server/src/algolia/records/generateAlgoliaRecords.ts rename to packages/ui/fern-docs-search-server/src/algolia/records/archive/generateAlgoliaRecords.ts diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/generateEndpointRecords.ts b/packages/ui/fern-docs-search-server/src/algolia/records/archive/generateEndpointRecords.ts similarity index 100% rename from packages/ui/fern-docs-search-server/src/algolia/records/generateEndpointRecords.ts rename to packages/ui/fern-docs-search-server/src/algolia/records/archive/generateEndpointRecords.ts diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/generateWebSocketRecords.ts b/packages/ui/fern-docs-search-server/src/algolia/records/archive/generateWebSocketRecords.ts similarity index 100% rename from packages/ui/fern-docs-search-server/src/algolia/records/generateWebSocketRecords.ts rename to packages/ui/fern-docs-search-server/src/algolia/records/archive/generateWebSocketRecords.ts diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/generateWebhookRecords.ts b/packages/ui/fern-docs-search-server/src/algolia/records/archive/generateWebhookRecords.ts similarity index 100% rename from packages/ui/fern-docs-search-server/src/algolia/records/generateWebhookRecords.ts rename to packages/ui/fern-docs-search-server/src/algolia/records/archive/generateWebhookRecords.ts diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/utils.ts b/packages/ui/fern-docs-search-server/src/algolia/records/archive/to-parameter-breadcrumb.ts similarity index 57% rename from packages/ui/fern-docs-search-server/src/algolia/records/utils.ts rename to packages/ui/fern-docs-search-server/src/algolia/records/archive/to-parameter-breadcrumb.ts index 2ca5fd71c6..586f763ea2 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/records/utils.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/records/archive/to-parameter-breadcrumb.ts @@ -1,62 +1,3 @@ -import { FernDocs } from "@fern-api/fdr-sdk"; -import { isNonNullish } from "@fern-api/ui-core-utils"; -import { camelCase, upperFirst } from "es-toolkit/string"; - -export function convertNameToAnchorPart(name: string | null | undefined): string | undefined { - if (name == null) { - return undefined; - } - return upperFirst(camelCase(name)); -} - -// TODO: improve the title -// function toTitle(last: ApiDefinition.KeyPathItem): string { -// switch (last.type) { -// case "discriminatedUnionVariant": -// return last.discriminantDisplayName ?? titleCase(last.discriminantValue); -// case "enumValue": -// return last.value; -// case "extra": -// return "Extra Properties"; -// case "list": -// return "List"; -// case "mapValue": -// return "Map Value"; -// // case "meta": -// // return last.displayName ?? titleCase(last.value); -// case "objectProperty": -// return last.key; -// case "set": -// return "Set"; -// case "undiscriminatedUnionVariant": -// return last.displayName ?? `Variant ${last.idx}`; -// } -// } - -export function toDescription( - descriptions: FernDocs.MarkdownText | (FernDocs.MarkdownText | undefined)[] | undefined, -): string | undefined { - if (descriptions == null) { - return undefined; - } - if (!Array.isArray(descriptions)) { - descriptions = [descriptions]; - } - const stringDescriptions = descriptions.filter(isNonNullish); - - if (stringDescriptions.length !== descriptions.length) { - throw new Error( - "Compiled markdown detected. When generating Algolia records, you must use the unresolved (uncompiled) version of the descriptions", - ); - } - - if (stringDescriptions.length === 0) { - return undefined; - } - - return stringDescriptions.join("\n\n"); -} - // export function toParameterBreadcrumb(path: ApiDefinition.KeyPathItem[]): { // key: string; // display_name: string | undefined; diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/archive/to-title.ts b/packages/ui/fern-docs-search-server/src/algolia/records/archive/to-title.ts new file mode 100644 index 0000000000..3d715df3a9 --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/algolia/records/archive/to-title.ts @@ -0,0 +1,23 @@ +// TODO: improve the title +// function toTitle(last: ApiDefinition.KeyPathItem): string { +// switch (last.type) { +// case "discriminatedUnionVariant": +// return last.discriminantDisplayName ?? titleCase(last.discriminantValue); +// case "enumValue": +// return last.value; +// case "extra": +// return "Extra Properties"; +// case "list": +// return "List"; +// case "mapValue": +// return "Map Value"; +// // case "meta": +// // return last.displayName ?? titleCase(last.value); +// case "objectProperty": +// return last.key; +// case "set": +// return "Set"; +// case "undiscriminatedUnionVariant": +// return last.displayName ?? `Variant ${last.idx}`; +// } +// } diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/create-algolia-records.ts b/packages/ui/fern-docs-search-server/src/algolia/records/create-algolia-records.ts index 7ad6fe5ff0..4d3a6f137f 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/records/create-algolia-records.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/records/create-algolia-records.ts @@ -125,5 +125,7 @@ export function createAlgoliaRecords({ } }); - return records; + // remove all undefined values + // TODO: trim or filter out any record that is > 100kb + return JSON.parse(JSON.stringify(records)); } diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/create-api-reference-record-http.ts b/packages/ui/fern-docs-search-server/src/algolia/records/create-api-reference-record-http.ts index e3c51e1344..857a977a29 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/records/create-api-reference-record-http.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/records/create-api-reference-record-http.ts @@ -2,7 +2,7 @@ import { ApiDefinition } from "@fern-api/fdr-sdk"; import { compact, flatten } from "es-toolkit"; import { ApiReferenceRecord, EndpointBaseRecord } from "../types.js"; import { maybePrepareMdxContent } from "./prepare-mdx-content.js"; -import { toDescription } from "./utils.js"; +import { toDescription } from "./to-description.js"; interface CreateApiReferenceRecordHttpOptions { endpointBase: EndpointBaseRecord; @@ -27,6 +27,12 @@ export function createApiReferenceRecordHttp({ type: "api-reference", request_description, response_description, + /** + * This collects code snippets found inside of the request and response descriptions in markdown, but not code snippets for endpoint examples. + * We probably do want to add those code snippets, but we don't want to over-pollute the records with them because they tend to have redundant information, + * since it could reduce the quality of the search results, or make it harder to control the size of the records. + * In the future, we can create separate records for those snippets, so that examples in the api reference can be deeplinked. + */ code_snippets: code_snippets.length > 0 ? code_snippets : undefined, }; } diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/create-api-reference-record-webhook.ts b/packages/ui/fern-docs-search-server/src/algolia/records/create-api-reference-record-webhook.ts index 648009dc27..97dea66fd3 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/records/create-api-reference-record-webhook.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/records/create-api-reference-record-webhook.ts @@ -2,7 +2,7 @@ import { ApiDefinition } from "@fern-api/fdr-sdk"; import { compact, flatten } from "es-toolkit"; import { ApiReferenceRecord, EndpointBaseRecord } from "../types.js"; import { maybePrepareMdxContent } from "./prepare-mdx-content.js"; -import { toDescription } from "./utils.js"; +import { toDescription } from "./to-description.js"; interface CreateApiReferenceRecordWebhookOptions { endpointBase: EndpointBaseRecord; diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/create-base-record.ts b/packages/ui/fern-docs-search-server/src/algolia/records/create-base-record.ts index dcef8e0dfc..c033283f30 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/records/create-base-record.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/records/create-base-record.ts @@ -31,6 +31,8 @@ export function createBaseRecord({ .filter((n): n is Extract => FernNavigation.hasMetadata(n), ) + // Changelog months and years are no + .filter((n) => n.type !== "changelogMonth" && n.type !== "changelogYear") .map((metadata) => ({ title: metadata.title, pathname: addLeadingSlash(metadata.canonicalSlug ?? metadata.slug), diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/create-endpoint-record-http.ts b/packages/ui/fern-docs-search-server/src/algolia/records/create-endpoint-record-http.ts index e906b80f63..f2cc6368ec 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/records/create-endpoint-record-http.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/records/create-endpoint-record-http.ts @@ -2,7 +2,7 @@ import { ApiDefinition, FernNavigation } from "@fern-api/fdr-sdk"; import { compact, flatten } from "es-toolkit"; import { BaseRecord, EndpointBaseRecord } from "../types.js"; import { maybePrepareMdxContent } from "./prepare-mdx-content.js"; -import { toDescription } from "./utils.js"; +import { toDescription } from "./to-description.js"; interface CreateEndpointBaseRecordOptions { node: FernNavigation.EndpointNode; diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/create-endpoint-record-web-socket.ts b/packages/ui/fern-docs-search-server/src/algolia/records/create-endpoint-record-web-socket.ts index 9aa3f174bf..e512a9eda2 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/records/create-endpoint-record-web-socket.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/records/create-endpoint-record-web-socket.ts @@ -2,7 +2,7 @@ import { ApiDefinition, FernNavigation } from "@fern-api/fdr-sdk"; import { compact, flatten } from "es-toolkit"; import { BaseRecord, EndpointBaseRecord } from "../types.js"; import { maybePrepareMdxContent } from "./prepare-mdx-content.js"; -import { toDescription } from "./utils.js"; +import { toDescription } from "./to-description.js"; interface CreateWebSocketEndpointBaseRecordOptions { node: FernNavigation.WebSocketNode; diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/create-endpoint-record-webhook.ts b/packages/ui/fern-docs-search-server/src/algolia/records/create-endpoint-record-webhook.ts index e155f24e8d..fd967cc03a 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/records/create-endpoint-record-webhook.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/records/create-endpoint-record-webhook.ts @@ -2,7 +2,7 @@ import { ApiDefinition, FernNavigation } from "@fern-api/fdr-sdk"; import { compact, flatten } from "es-toolkit"; import { BaseRecord, EndpointBaseRecord } from "../types.js"; import { maybePrepareMdxContent } from "./prepare-mdx-content.js"; -import { toDescription } from "./utils.js"; +import { toDescription } from "./to-description.js"; interface CreateWebhookEndpointBaseRecordOptions { node: FernNavigation.WebhookNode; diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/create-markdown-records.ts b/packages/ui/fern-docs-search-server/src/algolia/records/create-markdown-records.ts index 47f5ebef77..e31b347a55 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/records/create-markdown-records.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/records/create-markdown-records.ts @@ -57,7 +57,7 @@ export function createMarkdownRecords({ base, markdown }: CreateMarkdownRecordsO hash: undefined, description: description_content, content: root_content, - code_snippets, + code_snippets: code_snippets.length > 0 ? code_snippets : undefined, page_title, }); @@ -69,13 +69,20 @@ export function createMarkdownRecords({ base, markdown }: CreateMarkdownRecordsO const { heading, content: markdownContent, parents } = section; + const h1 = parents.find((p) => p.depth === 1); + const h2 = parents.find((p) => p.depth === 2); + const h3 = parents.find((p) => p.depth === 3); + const h4 = parents.find((p) => p.depth === 4); + const h5 = parents.find((p) => p.depth === 5); + const h6 = parents.find((p) => p.depth === 6); + const hierarchy: Record<`h${1 | 2 | 3 | 4 | 5 | 6}`, { id: string; title: string } | undefined> = { - h1: parents[0]?.depth === 1 ? { id: heading.id, title: heading.title } : undefined, - h2: parents[0]?.depth === 2 ? { id: heading.id, title: heading.title } : undefined, - h3: parents[0]?.depth === 3 ? { id: heading.id, title: heading.title } : undefined, - h4: parents[0]?.depth === 4 ? { id: heading.id, title: heading.title } : undefined, - h5: parents[0]?.depth === 5 ? { id: heading.id, title: heading.title } : undefined, - h6: parents[0]?.depth === 6 ? { id: heading.id, title: heading.title } : undefined, + h1: h1 ? { id: h1.id, title: h1.title } : undefined, + h2: h2 ? { id: h2.id, title: h2.title } : undefined, + h3: h3 ? { id: h3.id, title: h3.title } : undefined, + h4: h4 ? { id: h4.id, title: h4.title } : undefined, + h5: h5 ? { id: h5.id, title: h5.title } : undefined, + h6: h6 ? { id: h6.id, title: h6.title } : undefined, }; hierarchy[`h${heading.depth}`] = { id: heading.id, title: heading.title }; diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/index.ts b/packages/ui/fern-docs-search-server/src/algolia/records/index.ts deleted file mode 100644 index 587b3ae87f..0000000000 --- a/packages/ui/fern-docs-search-server/src/algolia/records/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from "./generateAlgoliaRecords.js"; diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/prepare-mdx-content.ts b/packages/ui/fern-docs-search-server/src/algolia/records/prepare-mdx-content.ts index c3045f2ee9..68c9d0b6ec 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/records/prepare-mdx-content.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/records/prepare-mdx-content.ts @@ -1,11 +1,11 @@ import { isMdxExpression, isMdxJsxElement, mdastToString, toTree, visit } from "@fern-ui/fern-docs-mdx"; interface PreparedMdxContent { - content: string; - code_snippets?: { lang: string | undefined; meta: string | undefined; code: string }[]; + content: string | undefined; + code_snippets: { lang: string | undefined; meta: string | undefined; code: string }[] | undefined; } -export function maybePrepareMdxContent(content: string | undefined): Partial { +export function maybePrepareMdxContent(content: string | undefined): PreparedMdxContent { if (content == null) { return { content: undefined, code_snippets: undefined }; } @@ -46,8 +46,13 @@ export function prepareMdxContent(content: string): PreparedMdxContent { return true; }); + const stringifiedContent = mdastToString(tree, { + includeHtml: false, + includeImageAlt: true, + preserveNewlines: true, + }).trim(); return { - content: mdastToString(tree, { includeHtml: false, includeImageAlt: true, preserveNewlines: true }).trim(), + content: stringifiedContent.length > 0 ? stringifiedContent : undefined, code_snippets: code_snippets.length > 0 ? code_snippets : undefined, }; } diff --git a/packages/ui/fern-docs-search-server/src/algolia/records/to-description.ts b/packages/ui/fern-docs-search-server/src/algolia/records/to-description.ts new file mode 100644 index 0000000000..afab65c9bd --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/algolia/records/to-description.ts @@ -0,0 +1,26 @@ +import { FernDocs } from "@fern-api/fdr-sdk"; +import { isNonNullish } from "@fern-api/ui-core-utils"; + +export function toDescription( + descriptions: FernDocs.MarkdownText | (FernDocs.MarkdownText | undefined)[] | undefined, +): string | undefined { + if (descriptions == null) { + return undefined; + } + if (!Array.isArray(descriptions)) { + descriptions = [descriptions]; + } + const stringDescriptions = descriptions.filter(isNonNullish); + + if (stringDescriptions.length !== descriptions.length) { + throw new Error( + "Compiled markdown detected. When generating Algolia records, you must use the unresolved (uncompiled) version of the descriptions", + ); + } + + if (stringDescriptions.length === 0) { + return undefined; + } + + return stringDescriptions.join("\n\n"); +} diff --git a/packages/ui/fern-docs-search-server/src/algolia/set-index-settings.ts b/packages/ui/fern-docs-search-server/src/algolia/set-index-settings.ts new file mode 100644 index 0000000000..1114e32145 --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/algolia/set-index-settings.ts @@ -0,0 +1,20 @@ +import { Algoliasearch } from "algoliasearch"; +import { DISTINCT_FACET_ATTRIBUTES, SEARCHABLE_ATTRIBUTES } from "./types.js"; + +export async function setIndexSettings( + client: Algoliasearch, + indexName: string, +): Promise<{ taskID: number; updatedAt: string }> { + return client.setSettings({ + indexName, + indexSettings: { + searchableAttributes: [...SEARCHABLE_ATTRIBUTES], + attributesForFaceting: DISTINCT_FACET_ATTRIBUTES.map( + (attribute) => `afterDistinct(filterOnly(${attribute}))`, + ), + unretrievableAttributes: ["org_id", "visible_by", "authed"], + attributeForDistinct: "pathname", + enableRules: true, + }, + }); +} diff --git a/packages/ui/fern-docs-search-server/src/algolia/types.ts b/packages/ui/fern-docs-search-server/src/algolia/types.ts index 7ab95db0a3..b7051c6ee3 100644 --- a/packages/ui/fern-docs-search-server/src/algolia/types.ts +++ b/packages/ui/fern-docs-search-server/src/algolia/types.ts @@ -2,13 +2,10 @@ import { z } from "zod"; // in order of priority: export const SEARCHABLE_ATTRIBUTES = [ - "level_title", "page_title", - "description", - "payload_description", - "request_description", - "response_description", - "content", + "level_title", + "description,payload_description,request_description,response_description", + "unordered(content)", "endpoint_path", "parameter_name", @@ -22,17 +19,15 @@ export const SEARCHABLE_ATTRIBUTES = [ "status_code", "parameter_type", + // make code snippets searchable + "unordered(code_snippets.code)", + // hierarchy (in descending order of priority) - "hierarchy.h6.title", - "hierarchy.h5.title", - "hierarchy.h4.title", - "hierarchy.h3.title", - "hierarchy.h2.title", - "hierarchy.h1.title", + "hierarchy.h6.title,hierarchy.h5.title,hierarchy.h4.title,hierarchy.h3.title,hierarchy.h2.title,hierarchy.h1.title", ] as const; // these are metadata fields that we do not want to include in the search hits: -export const UNRETRIEVABLE_ATTRIBUTES = ["org_id", "domain", "visible_by", "authed"] as const; +export const DISTINCT_FACET_ATTRIBUTES = ["org_id", "domain", "visible_by", "authed"] as const; export const BaseRecordSchema = z.object({ objectID: z.string().describe("The unique identifier of this record"), @@ -49,21 +44,10 @@ export const BaseRecordSchema = z.object({ .optional() .describe("The description of the page. This should be rendered unless a highlighted snippet is returned"), code_snippets: z - .array( - z.object({ - lang: z.string().optional(), - meta: z.string().optional(), - code: z.string(), - }), - ) + .array(z.object({ lang: z.string().optional(), meta: z.string().optional(), code: z.string() })) .optional(), breadcrumb: z - .array( - z.object({ - title: z.string(), - pathname: z.string().optional(), - }), - ) + .array(z.object({ title: z.string(), pathname: z.string().optional() })) .describe("The breadcrumb of this record"), product: z.object({ id: z.string(), title: z.string() }).optional(), version: z.object({ id: z.string(), title: z.string() }).optional(), @@ -165,4 +149,3 @@ export type ChangelogRecord = z.infer; export type ApiReferenceRecord = z.infer; export type ParameterRecord = z.infer; export type AlgoliaRecord = z.infer; -export type VisibleAlgoliaRecord = Omit; diff --git a/packages/ui/fern-docs-search-server/src/fdr/load-docs-with-url.ts b/packages/ui/fern-docs-search-server/src/fdr/load-docs-with-url.ts index 4bab2475a2..aa0b93f1f9 100644 --- a/packages/ui/fern-docs-search-server/src/fdr/load-docs-with-url.ts +++ b/packages/ui/fern-docs-search-server/src/fdr/load-docs-with-url.ts @@ -3,7 +3,22 @@ import { withDefaultProtocol } from "@fern-api/ui-core-utils"; import { mapValues } from "es-toolkit/object"; interface LoadDocsWithUrlPayload { + /** + * FDR environment to use. (either `https://registry-dev2.buildwithfern.com` or `https://registry.buildwithfern.com`) + */ + environment: string; + + /** + * The shared secret token use to authenticate with FDR. + */ + fernToken: string; + + /** + * The domain to load docs for. + */ domain: string; + + // feature flags isBatchStreamToggleDisabled?: boolean; isApiScrollingDisabled?: boolean; useJavaScriptAsTypeScript?: boolean; @@ -21,8 +36,8 @@ interface LoadDocsWithUrlResponse { export async function loadDocsWithUrl(payload: LoadDocsWithUrlPayload): Promise { const client = new FdrClient({ - environment: "https://registry-dev2.buildwithfern.com", - token: process.env.FERN_TOKEN, + environment: payload.environment, + token: payload.fernToken, }); const docs = await client.docs.v2.read.getDocsForUrl({ url: ApiDefinition.Url(payload.domain) }); diff --git a/packages/ui/fern-docs-search-server/src/index.ts b/packages/ui/fern-docs-search-server/src/index.ts deleted file mode 100644 index 01e223aea4..0000000000 --- a/packages/ui/fern-docs-search-server/src/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from "./algolia/index.js"; diff --git a/packages/ui/fern-docs-search-server/src/tasks/algolia-index-settings-task.ts b/packages/ui/fern-docs-search-server/src/tasks/algolia-index-settings-task.ts new file mode 100644 index 0000000000..d8232cff90 --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/tasks/algolia-index-settings-task.ts @@ -0,0 +1,28 @@ +import { setIndexSettings } from "@/algolia/set-index-settings.js"; +import { algoliasearch } from "algoliasearch"; +import { assert } from "ts-essentials"; + +interface AlgoliaIndexSettingsTaskOptions { + indexName: string; + appId: string; + writeApiKey: string; +} + +interface AlgoliaIndexSettingsTaskResult { + taskID: number; + updatedAt: string; +} + +export async function algoliaIndexSettingsTask({ + indexName, + appId, + writeApiKey, +}: AlgoliaIndexSettingsTaskOptions): Promise { + assert(!!appId, "appId is required"); + assert(!!writeApiKey, "writeApiKey is required"); + + const algolia = algoliasearch(appId, writeApiKey); + + // TODO: add retry loop + return setIndexSettings(algolia, indexName); +} diff --git a/packages/ui/fern-docs-search-server/src/tasks/algolia-indexer-task.ts b/packages/ui/fern-docs-search-server/src/tasks/algolia-indexer-task.ts new file mode 100644 index 0000000000..e0ee7f3686 --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/tasks/algolia-indexer-task.ts @@ -0,0 +1,110 @@ +import { browseAllObjectsForDomain } from "@/algolia/browse-all-objects-for-domain.js"; +import { createAlgoliaRecords } from "@/algolia/records/create-algolia-records.js"; +import { loadDocsWithUrl } from "@/fdr/load-docs-with-url.js"; +import { algoliasearch } from "algoliasearch"; +import { assert } from "ts-essentials"; + +interface AlgoliaIndexerPayload { + /** + * The Algolia app ID. + */ + appId: string; + + /** + * The Algolia admin API key. + */ + writeApiKey: string; + + /** + * The FDR environment to use. (either `https://registry-dev2.buildwithfern.com` or `https://registry.buildwithfern.com`) + */ + environment: string; + + /** + * The shared secret token use to authenticate with FDR. + */ + fernToken: string; + + /** + * The domain to load docs for. + */ + domain: string; + + /** + * The Algolia index name to use. + */ + indexName: string; + + /** + * Whether the docs are authed or not. + */ + authed?: boolean; + + // feature flags for v1 -> v2 migration + isBatchStreamToggleDisabled?: boolean; + isApiScrollingDisabled?: boolean; + useJavaScriptAsTypeScript?: boolean; + alwaysEnableJavaScriptFetch?: boolean; + usesApplicationJsonInFormDataValue?: boolean; +} + +interface AlgoliaIndexerTaskResponse { + taskID: number; + deletedObjectIDs: string[]; + addedObjectIDs: string[]; + updatedObjectIDs: string[]; +} + +export async function algoliaIndexerTask(payload: AlgoliaIndexerPayload): Promise { + assert(!!payload.appId, "appId is required"); + assert(!!payload.writeApiKey, "writeApiKey is required"); + + const algolia = algoliasearch(payload.appId, payload.writeApiKey); + + // load the docs + const { org_id, root, pages, apis, domain } = await loadDocsWithUrl(payload); + + // create new records (this is the target state of the index) + const targetRecords = createAlgoliaRecords({ root, domain, org_id, pages, apis, authed: payload.authed ?? false }); + + // browse the existing records (what is currently in the index) + const existingObjectIDs = (await browseAllObjectsForDomain(algolia, domain, payload.indexName, ["objectID"])) + .map((object) => object.objectID) + .filter((objectID): objectID is string => typeof objectID === "string"); + + // generate a map of the created, updated, and deleted records by their objectIDs + // the idea is that we want to delete old records a + const targetRecordsByID = new Map(targetRecords.map((record) => [record.objectID, record])); + const updatedObjectIDs = existingObjectIDs.filter((objectID) => targetRecordsByID.has(objectID)); + const deletedObjectIDs = existingObjectIDs.filter((objectID) => !targetRecordsByID.has(objectID)); + + const updatedObjectIDsSet = new Set(updatedObjectIDs); + const addedObjectIDs = Array.from(targetRecordsByID.keys()).filter( + (objectID) => !updatedObjectIDsSet.has(objectID), + ); + + // TODO: add retry loop + const response = await algolia.batch({ + indexName: payload.indexName, + batchWriteParams: { + requests: [ + ...deletedObjectIDs.map((objectID) => ({ action: "deleteObject" as const, body: { objectID } })), + ...updatedObjectIDs.map((objectID) => ({ + action: "updateObject" as const, + body: targetRecordsByID.get(objectID) ?? { objectID }, + })), + ...addedObjectIDs.map((objectID) => ({ + action: "addObject" as const, + body: targetRecordsByID.get(objectID) ?? { objectID }, + })), + ], + }, + }); + + return { + taskID: response.taskID, + deletedObjectIDs, + addedObjectIDs, + updatedObjectIDs, + }; +} diff --git a/packages/ui/fern-docs-search-server/src/tasks/index.ts b/packages/ui/fern-docs-search-server/src/tasks/index.ts new file mode 100644 index 0000000000..2ef45b6ac2 --- /dev/null +++ b/packages/ui/fern-docs-search-server/src/tasks/index.ts @@ -0,0 +1,2 @@ +export * from "./algolia-index-settings-task.js"; +export * from "./algolia-indexer-task.js"; diff --git a/packages/ui/fern-docs-search-server/src/trigger/algolia-indexer-task.ts b/packages/ui/fern-docs-search-server/src/trigger/algolia-indexer-task.ts deleted file mode 100644 index 33c5052224..0000000000 --- a/packages/ui/fern-docs-search-server/src/trigger/algolia-indexer-task.ts +++ /dev/null @@ -1,69 +0,0 @@ -import { browseAllObjectsForDomain } from "@/algolia/browse-all-objects-for-domain.js"; -import { createAlgoliaRecords } from "@/algolia/records/create-algolia-records.js"; -import { SEARCHABLE_ATTRIBUTES, UNRETRIEVABLE_ATTRIBUTES } from "@/algolia/types.js"; -import { loadDocsWithUrl } from "@/fdr/load-docs-with-url.js"; -import { logger, task } from "@trigger.dev/sdk/v3"; -import { algoliasearch } from "algoliasearch"; -import { z } from "zod"; - -const algoliaIndexerPayloadSchema = z.object({ - domain: z.string(), - - // whether the docs are authed or not - authed: z.boolean().optional(), - - // feature flags for v1 -> v2 migration - isBatchStreamToggleDisabled: z.boolean().optional(), - isApiScrollingDisabled: z.boolean().optional(), - useJavaScriptAsTypeScript: z.boolean().optional(), - alwaysEnableJavaScriptFetch: z.boolean().optional(), - usesApplicationJsonInFormDataValue: z.boolean().optional(), -}); - -export const algoliaIndexerTask = task({ - id: "algolia-indexer", - maxDuration: 300, // 5 minutes - run: async (unparsedPayload: any, { ctx }) => { - const payload = algoliaIndexerPayloadSchema.parse(unparsedPayload); - logger.log("Indexing algolia", { payload, ctx }); - - const { org_id, root, pages, apis, domain } = await loadDocsWithUrl(payload); - - const records = createAlgoliaRecords({ root, domain, org_id, pages, apis, authed: payload.authed ?? false }); - - if (process.env.ALGOLIA_APP_ID == null || process.env.ALGOLIA_ADMIN_API_KEY == null) { - throw new Error("ALGOLIA_APP_ID and ALGOLIA_ADMIN_API_KEY must be set"); - } - - const algolia = algoliasearch(process.env.ALGOLIA_APP_ID, process.env.ALGOLIA_ADMIN_API_KEY); - - const existingRecords = (await browseAllObjectsForDomain(algolia, domain, "fern-docs-search")) - .map((object) => object.objectID) - .filter((objectID): objectID is string => typeof objectID === "string"); - - await algolia.setSettings({ - indexName: "fern-docs-search", - indexSettings: { - searchableAttributes: [...SEARCHABLE_ATTRIBUTES], - attributesForFaceting: UNRETRIEVABLE_ATTRIBUTES.map( - (attribute) => `afterDistinct(filterOnly(${attribute}))`, - ), - unretrievableAttributes: [...UNRETRIEVABLE_ATTRIBUTES], - attributeForDistinct: "pathname", - enableRules: true, - }, - }); - - const response = await algolia.batch({ - indexName: "fern-docs-search", - batchWriteParams: { - requests: [ - ...existingRecords.map((objectID) => ({ action: "deleteObject" as const, body: { objectID } })), - ...records.map((record) => ({ action: "addObject" as const, body: record })), - ], - }, - }); - - return response.taskID; - }, -}); diff --git a/packages/ui/fern-docs-search-server/trigger.config.ts b/packages/ui/fern-docs-search-server/trigger.config.ts deleted file mode 100644 index 7f976defbf..0000000000 --- a/packages/ui/fern-docs-search-server/trigger.config.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { defineConfig } from "@trigger.dev/sdk/v3"; - -export default defineConfig({ - project: "proj_entuizwlvwskmwxdodva", - runtime: "node", - logLevel: "log", - // Set the maxDuration to 300 seconds for all tasks. See https://trigger.dev/docs/runs/max-duration - // maxDuration: 300, - retries: { - enabledInDev: true, - default: { - maxAttempts: 3, - minTimeoutInMs: 1000, - maxTimeoutInMs: 10000, - factor: 2, - randomize: true, - }, - }, - dirs: ["./src/trigger"], -}); diff --git a/packages/ui/fern-docs-search-server/tsconfig.json b/packages/ui/fern-docs-search-server/tsconfig.json index d82e1eee4e..61ada3d785 100644 --- a/packages/ui/fern-docs-search-server/tsconfig.json +++ b/packages/ui/fern-docs-search-server/tsconfig.json @@ -8,7 +8,8 @@ "rootDir": "./src", "baseUrl": "./src", "paths": { - "@/*": ["./*"] + "@/algolia/*": ["./algolia/*"], + "@/fdr/*": ["./fdr/*"] } }, "include": ["./src/**/*"], diff --git a/packages/ui/fern-docs-search-server/tsup.config.ts b/packages/ui/fern-docs-search-server/tsup.config.ts new file mode 100644 index 0000000000..563d71009f --- /dev/null +++ b/packages/ui/fern-docs-search-server/tsup.config.ts @@ -0,0 +1,10 @@ +import { defineConfig } from "tsup"; + +export default defineConfig({ + entry: ["src/algolia/index.ts", "src/algolia/types.ts", "src/tasks/index.ts"], + format: ["esm"], + dts: false, + splitting: false, + sourcemap: true, + clean: true, +}); diff --git a/packages/ui/fern-docs-search-ui/package.json b/packages/ui/fern-docs-search-ui/package.json index 7b61d2c8ea..089bb125ce 100644 --- a/packages/ui/fern-docs-search-ui/package.json +++ b/packages/ui/fern-docs-search-ui/package.json @@ -18,10 +18,12 @@ "@types/node": "^18.7.18", "@types/react": "^18.0.20", "@types/react-dom": "^18.2.18", + "autoprefixer": "^10.4.16", "eslint": "^8.56.0", "eslint-config-next": "14.2.15", "next": "^14", "postcss": "^8.4.33", + "postcss-import": "^16.0.1", "react": "^18.2.0", "react-dom": "^18.2.0", "tailwindcss": "^3.4.3", @@ -30,6 +32,8 @@ "dependencies": { "@algolia/autocomplete-core": "^1.17.6", "@fern-ui/fern-docs-search-server": "workspace:*", + "@fern-ui/fern-http-method-tag": "workspace:*", + "@fern-ui/react-commons": "workspace:*", "@radix-ui/react-radio-group": "^1.1.3", "@radix-ui/react-visually-hidden": "^1.1.0", "algoliasearch": "^5.10.2", diff --git a/packages/ui/fern-docs-search-ui/postcss.config.mjs b/packages/ui/fern-docs-search-ui/postcss.config.mjs index 1a69fd2a45..2fab0b7661 100644 --- a/packages/ui/fern-docs-search-ui/postcss.config.mjs +++ b/packages/ui/fern-docs-search-ui/postcss.config.mjs @@ -1,8 +1,10 @@ /** @type {import('postcss-load-config').Config} */ const config = { - plugins: { - tailwindcss: {}, - }, + plugins: { + "postcss-import": {}, + tailwindcss: {}, + autoprefixer: {}, + }, }; export default config; diff --git a/packages/ui/fern-docs-search-ui/src/app/actions/reindex.ts b/packages/ui/fern-docs-search-ui/src/app/actions/reindex.ts new file mode 100644 index 0000000000..b16e9ab645 --- /dev/null +++ b/packages/ui/fern-docs-search-ui/src/app/actions/reindex.ts @@ -0,0 +1,28 @@ +"use server"; + +import { algoliaAppId, algoliaWriteApiKey, fdrEnvironment, fernToken } from "@/server/env-variables"; +import { algoliaIndexSettingsTask, algoliaIndexerTask } from "@fern-ui/fern-docs-search-server/tasks"; + +const INDEX_NAME = "fern-docs-search"; + +export const handleReindex = async () => { + console.log("Reindexing"); + + await algoliaIndexSettingsTask({ + appId: algoliaAppId(), + writeApiKey: algoliaWriteApiKey(), + indexName: INDEX_NAME, + }); + + const response = await algoliaIndexerTask({ + appId: algoliaAppId(), + writeApiKey: algoliaWriteApiKey(), + indexName: INDEX_NAME, + environment: fdrEnvironment(), + fernToken: fernToken(), + domain: "docs.cohere.com", + authed: false, + }); + + console.debug(response); +}; diff --git a/packages/ui/fern-docs-search-ui/src/app/component.tsx b/packages/ui/fern-docs-search-ui/src/app/component.tsx new file mode 100644 index 0000000000..4572085900 --- /dev/null +++ b/packages/ui/fern-docs-search-ui/src/app/component.tsx @@ -0,0 +1,28 @@ +"use client"; + +import { DesktopInstantSearch } from "@/components/desktop/DesktopInstantSearch"; +import { createDefaultLinkComponent } from "@/components/shared/LinkComponent"; +import type { ReactElement } from "react"; + +export function DesktopInstantSearchWrapper({ + appId, + apiKey, + domain, +}: { + appId: string; + apiKey: string; + domain: string; +}): ReactElement { + const handleSubmit = ({ pathname, hash }: { pathname: string; hash: string }) => { + window.open(`https://${domain}${pathname}${hash}`, "_blank", "noopener,noreferrer"); + }; + + return ( + + ); +} diff --git a/packages/ui/fern-docs-search-ui/src/app/indexer/page.tsx b/packages/ui/fern-docs-search-ui/src/app/indexer/page.tsx new file mode 100644 index 0000000000..d76b9ac430 --- /dev/null +++ b/packages/ui/fern-docs-search-ui/src/app/indexer/page.tsx @@ -0,0 +1,12 @@ +"use client"; + +import { ReactElement } from "react"; +import { handleReindex } from "../actions/reindex"; + +export default async function IndexerPage(): Promise { + return ( +
+ +
+ ); +} diff --git a/packages/ui/fern-docs-search-ui/src/app/layout.tsx b/packages/ui/fern-docs-search-ui/src/app/layout.tsx index 3f0b48fe33..3f5458ba85 100644 --- a/packages/ui/fern-docs-search-ui/src/app/layout.tsx +++ b/packages/ui/fern-docs-search-ui/src/app/layout.tsx @@ -1,3 +1,4 @@ +import "@fern-ui/fern-http-method-tag/index.css"; import type { Metadata } from "next"; import "./globals.css"; diff --git a/packages/ui/fern-docs-search-ui/src/app/page.tsx b/packages/ui/fern-docs-search-ui/src/app/page.tsx index 629463384f..14cb71cc76 100644 --- a/packages/ui/fern-docs-search-ui/src/app/page.tsx +++ b/packages/ui/fern-docs-search-ui/src/app/page.tsx @@ -1,16 +1,18 @@ "use server"; -import { DesktopInstantSearch } from "@/components/desktop/DesktopInstantSearch"; -import { algoliaAdminApiKey, algoliaAppId, algoliaSearchApikey } from "@/server/env-variables"; +import { algoliaAppId, algoliaSearchApikey, algoliaWriteApiKey } from "@/server/env-variables"; import { withSearchApiKey } from "@/server/with-search-api-key"; import { ReactElement } from "react"; +import { DesktopInstantSearchWrapper } from "./component"; + +const domain = "docs.cohere.com"; export default async function Home(): Promise { const apiKey = withSearchApiKey({ appId: algoliaAppId(), - adminApiKey: algoliaAdminApiKey(), - parentApiKey: algoliaSearchApikey(), - domain: "fern.docs.dev.buildwithfern.com", + writeApiKey: algoliaWriteApiKey(), + searchApiKey: algoliaSearchApikey(), + domain, roles: [], authed: false, }); @@ -18,7 +20,7 @@ export default async function Home(): Promise { return (
- +
); diff --git a/packages/ui/fern-docs-search-ui/src/components/desktop/DesktopInstantSearch.tsx b/packages/ui/fern-docs-search-ui/src/components/desktop/DesktopInstantSearch.tsx index 19b7aaf2b8..82615140c5 100644 --- a/packages/ui/fern-docs-search-ui/src/components/desktop/DesktopInstantSearch.tsx +++ b/packages/ui/fern-docs-search-ui/src/components/desktop/DesktopInstantSearch.tsx @@ -1,10 +1,18 @@ -"use client"; - +import { AlgoliaRecord } from "@fern-ui/fern-docs-search-server/types"; import { liteClient as algoliasearch } from "algoliasearch/lite"; import "instantsearch.css/themes/reset.css"; -import { useEffect, useRef, type ReactElement } from "react"; -import { Configure } from "react-instantsearch"; +import { + FormEvent, + FormHTMLAttributes, + PropsWithChildren, + forwardRef, + useEffect, + useRef, + type ReactElement, +} from "react"; +import { Configure, useHits } from "react-instantsearch"; import { InstantSearchNext } from "react-instantsearch-nextjs"; +import { LinkComponentType } from "../shared/LinkComponent"; import { SegmentedHits } from "../shared/SegmentedHits"; import { useTrapFocus } from "../shared/useTrapFocus"; import { DesktopSearchBox } from "./DesktopSearchBox"; @@ -12,9 +20,16 @@ import { DesktopSearchBox } from "./DesktopSearchBox"; interface DesktopInstantSearchProps { appId: string; apiKey: string; + LinkComponent: LinkComponentType; + onSubmit: (hit: { pathname: string; hash: string }) => void; } -export function DesktopInstantSearch({ appId, apiKey }: DesktopInstantSearchProps): ReactElement { +export function DesktopInstantSearch({ + appId, + apiKey, + LinkComponent, + onSubmit, +}: DesktopInstantSearchProps): ReactElement { const ref = useRef(algoliasearch(appId, apiKey)); const formRef = useRef(null); const inputRef = useRef(null); @@ -35,23 +50,16 @@ export function DesktopInstantSearch({ appId, apiKey }: DesktopInstantSearchProp -
{ - event.preventDefault(); - }} + onSubmit={onSubmit} > -
{ - inputRef.current?.focus(); - }} - > +
inputRef.current?.focus()}>
- - + +
); } + +interface DesktopSearchFormProps extends Omit, "onSubmit"> { + onSubmit: (hit: { pathname: string; hash: string }) => void; +} + +const DesktopSearchForm = forwardRef>( + ({ children, onSubmit, ...props }, ref): ReactElement => { + const { items } = useHits(); + const handleSubmit = (event: FormEvent) => { + event.preventDefault(); + const radioGroup = event.currentTarget.elements.namedItem("fern-docs-search-selected-hit"); + if (radioGroup instanceof RadioNodeList) { + const objectID = radioGroup.value; + const hit = items.find((hit) => hit.objectID === objectID); + if (hit) { + onSubmit({ + pathname: hit.pathname ?? "", + hash: hit.hash ?? "", + }); + } + } + }; + return ( +
+ {children} +
+ ); + }, +); diff --git a/packages/ui/fern-docs-search-ui/src/components/shared/ArrowTurnDownLeftIcon.tsx b/packages/ui/fern-docs-search-ui/src/components/shared/ArrowTurnDownLeftIcon.tsx new file mode 100644 index 0000000000..9e061d17e2 --- /dev/null +++ b/packages/ui/fern-docs-search-ui/src/components/shared/ArrowTurnDownLeftIcon.tsx @@ -0,0 +1,23 @@ +import { ReactElement, SVGProps, forwardRef } from "react"; + +export const ArrowTurnDownLeftIcon = forwardRef>( + (props, ref): ReactElement> => ( + + ), +); diff --git a/packages/ui/fern-docs-search-ui/src/components/shared/HitContent.tsx b/packages/ui/fern-docs-search-ui/src/components/shared/HitContent.tsx index 321a1d4ee2..82be1abfaf 100644 --- a/packages/ui/fern-docs-search-ui/src/components/shared/HitContent.tsx +++ b/packages/ui/fern-docs-search-ui/src/components/shared/HitContent.tsx @@ -1,13 +1,10 @@ -import { - ApiReferenceRecord, - ChangelogRecord, - ParameterRecord, -} from "@fern-ui/fern-docs-search-server/src/algolia/types"; +import { ParameterRecord } from "@fern-ui/fern-docs-search-server/types"; +import { HttpMethodTag } from "@fern-ui/fern-http-method-tag"; import { Hit } from "algoliasearch/lite"; import { ReactElement } from "react"; import { Highlight, Snippet } from "react-instantsearch"; import { MarkRequired, UnreachableCaseError } from "ts-essentials"; -import { AlgoliaRecordHit, MarkdownRecordHit } from "../types"; +import { AlgoliaRecordHit, ApiReferenceRecordHit, ChangelogRecordHit, MarkdownRecordHit } from "../types"; const headingLevels = ["h1", "h2", "h3", "h4", "h5", "h6"] as const; @@ -23,13 +20,19 @@ function HierarchyBreadcrumb({ if (!level) { return null; } + const breadcrumb: string[] = []; if (pageTitle) { breadcrumb.push(pageTitle); } - headingLevels.slice(0, headingLevels.indexOf(level)); + headingLevels.slice(0, headingLevels.indexOf(level)).forEach((headingLevel) => { + const title = hierarchy?.[headingLevel]?.title; + if (title) { + breadcrumb.push(title); + } + }); return (
@@ -47,7 +50,8 @@ function MarkdownHitContent({ hit }: { hit: MarkdownRecordHit }): ReactElement { return (
}): ReactElement { +function ChangelogHitContent({ hit }: { hit: ChangelogRecordHit }): ReactElement { return
{hit.page_title ?? hit.objectID}
; } -function ApiReferenceHitContent({ hit }: { hit: MarkRequired }): ReactElement { - return
{hit.page_title ?? hit.objectID}
; +function ApiReferenceHitContent({ hit }: { hit: ApiReferenceRecordHit }): ReactElement { + const attribute = hit._highlightResult?.request_description + ? "request_description" + : hit._highlightResult?.response_description + ? "response_description" + : hit._highlightResult?.payload_description + ? "payload_description" + : hit._highlightResult?.description + ? "description" + : undefined; + return ( +
+ +
+ + {hit.endpoint_path} +
+ {attribute && ( + + )} +
+ ); } function ParameterHitContent({ hit }: { hit: MarkRequired }): ReactElement { @@ -83,12 +121,12 @@ export function HitContent({ hit }: { hit: MarkRequired; case "changelog": - return , "type">} />; + return ; case "api-reference": - return , "type">} />; + return ; case "parameter": return , "type">} />; default: - throw new UnreachableCaseError(hit.type); + throw new UnreachableCaseError(hit); } } diff --git a/packages/ui/fern-docs-search-ui/src/components/shared/LinkComponent.tsx b/packages/ui/fern-docs-search-ui/src/components/shared/LinkComponent.tsx new file mode 100644 index 0000000000..bf1cc3080b --- /dev/null +++ b/packages/ui/fern-docs-search-ui/src/components/shared/LinkComponent.tsx @@ -0,0 +1,21 @@ +import { ComponentType, PropsWithChildren } from "react"; + +export type LinkComponentType = ComponentType< + PropsWithChildren<{ hit: { pathname: string; hash: string }; className?: string }> +>; + +export const createDefaultLinkComponent = + (domain: string): LinkComponentType => + ({ hit, children, className }) => { + return ( + + {children} + + ); + }; diff --git a/packages/ui/fern-docs-search-ui/src/components/shared/SegmentedHits.tsx b/packages/ui/fern-docs-search-ui/src/components/shared/SegmentedHits.tsx index 6663a095b4..0b91cfddf4 100644 --- a/packages/ui/fern-docs-search-ui/src/components/shared/SegmentedHits.tsx +++ b/packages/ui/fern-docs-search-ui/src/components/shared/SegmentedHits.tsx @@ -1,14 +1,21 @@ -import { VisibleAlgoliaRecord } from "@fern-ui/fern-docs-search-server/src/algolia/types"; +import { AlgoliaRecord } from "@fern-ui/fern-docs-search-server/types"; import * as RadioGroup from "@radix-ui/react-radio-group"; import { last, uniq } from "es-toolkit/array"; -import Link from "next/link"; -import { ReactElement } from "react"; +import { ReactElement, RefObject, useDeferredValue } from "react"; import { useHits } from "react-instantsearch"; import { MarkRequired } from "ts-essentials"; import { AlgoliaRecordHit } from "../types"; +import { ArrowTurnDownLeftIcon } from "./ArrowTurnDownLeftIcon"; import { HitContent } from "./HitContent"; +import { LinkComponentType } from "./LinkComponent"; +import { SegmentedHitsRadioGroup } from "./SegmentedHitsRadioGroup"; -function Hit({ hit }: { hit: AlgoliaRecordHit }): ReactElement | null { +interface HitProps { + hit: AlgoliaRecordHit; + LinkComponent: LinkComponentType; +} + +function Hit({ hit, LinkComponent }: HitProps): ReactElement | null { if (hit.type == null) { return null; } @@ -17,45 +24,89 @@ function Hit({ hit }: { hit: AlgoliaRecordHit }): ReactElement | null { value={hit.objectID} className="mx-2 p-2 rounded-md hover:bg-[#CCC]/30 data-[state=checked]:bg-[#CCC]/30 text-left block" > - - } /> - + +
+ } /> +
+ + + +
); } -export function SegmentedHits(): ReactElement { - const { items } = useHits(); +const DEFAULT_SEGMENT = "__internal_segment_default__"; - const segments: string[] = []; +export function SegmentedHits({ + inputRef, + LinkComponent, +}: { + inputRef: RefObject; + LinkComponent: LinkComponentType; +}): ReactElement { + const { items: rawHits } = useHits(); - const segmentedHits: Record = {}; + // avoid unnecessary re-renders while the user is typing + const items = useDeferredValue(rawHits); - items.forEach((item) => { - const segment = last(item.breadcrumb)?.title ?? item.tab?.title ?? item.type; + // NOTE: the items from `useHits` gets re-ordered whenever the query changes, so we should NOT memoize any of the following logic: - if (!segment) { - return; - } + // Search hits are ordered, but we want to group the search results thematically. + const segments: string[] = [DEFAULT_SEGMENT]; - segments.push(segment); + const segmentedHits: Record = { + [DEFAULT_SEGMENT]: [], + }; - if (segmentedHits[segment] == null) { - segmentedHits[segment] = []; - } + items.forEach((item) => { + // the last item in the breadcrumb is the most specific, so we use that as the segment + // if no breadcrumb exists, we use the tab title. We don't include the version or product here because they should be filtered out up-stream. + const segment = last(item.breadcrumb)?.title ?? item.tab?.title ?? DEFAULT_SEGMENT; + segments.push(segment); + segmentedHits[segment] ??= []; segmentedHits[segment].push(item); }); + const uniqueSegments = uniq(segments).filter( + (segment) => segmentedHits[segment] != null && segmentedHits[segment].length > 0, + ); + + // this will be used to determine the order of hits wrt keyboard navigation + const orderedObjectIDs = uniqueSegments.flatMap((segment) => segmentedHits[segment]?.map((hit) => hit.objectID)); + + // this will be used to "skip" over hits in the same segment when navigating with the keyboard + const segmentsIndices: { segment: string; index: number }[] = []; + uniqueSegments.forEach((segment) => { + const lastSegment = last(segmentsIndices); + if (lastSegment == null) { + segmentsIndices.push({ segment, index: 0 }); + } else { + segmentsIndices.push({ + segment, + index: lastSegment.index + (segmentedHits[lastSegment.segment]?.length ?? 0), + }); + } + }); + return ( - - {uniq(segments).map((segment) => ( -
-
{segment}
+ + {uniqueSegments.map((segment) => ( +
+ {segment !== DEFAULT_SEGMENT && ( +
{segment}
+ )} - {segmentedHits[segment]?.map((hit) => )} + {segmentedHits[segment]?.map((hit) => ( + + ))}
))} - +
); } diff --git a/packages/ui/fern-docs-search-ui/src/components/shared/SegmentedHitsRadioGroup.tsx b/packages/ui/fern-docs-search-ui/src/components/shared/SegmentedHitsRadioGroup.tsx new file mode 100644 index 0000000000..6db37af9e1 --- /dev/null +++ b/packages/ui/fern-docs-search-ui/src/components/shared/SegmentedHitsRadioGroup.tsx @@ -0,0 +1,95 @@ +import { useDeepCompareEffect, useEventCallback } from "@fern-ui/react-commons"; +import * as RadioGroup from "@radix-ui/react-radio-group"; +import { last } from "es-toolkit/array"; +import { ReactElement, ReactNode, RefObject, useEffect, useState } from "react"; + +export function SegmentedHitsRadioGroup({ + orderedObjectIDs, + segmentsIndices, + children, + inputRef, +}: { + orderedObjectIDs: string[]; + segmentsIndices: { segment: string; index: number }[]; + children: ReactNode; + inputRef: RefObject; +}): ReactElement { + const [selectedObjectID, setSelectedObjectID] = useState((): string | undefined => orderedObjectIDs[0]); + + // fall back to the first objectID if the selectedObjectID is not in the orderedObjectIDs + const value = + selectedObjectID != null && orderedObjectIDs.includes(selectedObjectID) + ? selectedObjectID + : orderedObjectIDs[0]; + + // reset the selectedObjectID to the first objectID when the orderedObjectIDs change + useDeepCompareEffect(() => { + setSelectedObjectID(orderedObjectIDs[0]); + }, [orderedObjectIDs]); + + // handle keyboard navigation + // arrow down/up: navigate to the next/previous hit in the current segment + // alt + arrow down/up: skip over 5 hits + // meta + arrow down/up: skip to the start of the next section (or if previous, the start of the current section or previous section) + const handleKeyDown = useEventCallback((event: KeyboardEvent) => { + if (event.target !== inputRef.current) { + // don't prevent default if the target is not the search input + return; + } + + try { + const index = orderedObjectIDs.indexOf(value); + const currentSegmentIndex = segmentsIndices.findLastIndex((segment) => index >= segment.index); + if (event.key === "ArrowDown") { + if (!event.altKey && !event.metaKey && !event.shiftKey && !event.ctrlKey) { + setSelectedObjectID(orderedObjectIDs[index + 1] ?? last(orderedObjectIDs)); + } else if (event.altKey) { + setSelectedObjectID(orderedObjectIDs[index + 5] ?? last(orderedObjectIDs)); + } else if (event.metaKey) { + const nextSegment = segmentsIndices[currentSegmentIndex + 1]; + setSelectedObjectID(orderedObjectIDs[nextSegment?.index ?? orderedObjectIDs.length - 1]); + } else { + // don't prevent default + return; + } + } else if (event.key === "ArrowUp") { + if (!event.altKey && !event.metaKey && !event.shiftKey && !event.ctrlKey) { + setSelectedObjectID(orderedObjectIDs[index - 1] ?? orderedObjectIDs[0]); + } else if (event.altKey) { + setSelectedObjectID(orderedObjectIDs[index - 5] ?? orderedObjectIDs[0]); + } else if (event.metaKey) { + // this is a special UX case where if you're not at the start of the current segment, meta + up will jump to the start of the current segment + // and if you're at the start of the current segment, it will jump to the start of the previous segment + const currentSegmentStartIndex = segmentsIndices[currentSegmentIndex].index; + const previousSegment = segmentsIndices[currentSegmentIndex - 1]; + const jumpedToIndex = + currentSegmentStartIndex === index ? previousSegment?.index : currentSegmentStartIndex; + setSelectedObjectID(orderedObjectIDs[jumpedToIndex ?? 0]); + } else { + // don't prevent default + return; + } + } else { + // don't prevent default for any other keys + return; + } + + event.preventDefault(); + event.stopImmediatePropagation(); + } catch (error) { + // ignore + } + }); + + // add the event listener when this component mounts + useEffect(() => { + window.addEventListener("keydown", handleKeyDown); + return () => window.removeEventListener("keydown", handleKeyDown); + }, [orderedObjectIDs]); + + return ( + + {children} + + ); +} diff --git a/packages/ui/fern-docs-search-ui/src/components/types.ts b/packages/ui/fern-docs-search-ui/src/components/types.ts index 2ab4477387..d84cefc18b 100644 --- a/packages/ui/fern-docs-search-ui/src/components/types.ts +++ b/packages/ui/fern-docs-search-ui/src/components/types.ts @@ -1,6 +1,13 @@ -import type { MarkdownRecord, VisibleAlgoliaRecord } from "@fern-ui/fern-docs-search-server/src/algolia/types"; +import type { + AlgoliaRecord, + ApiReferenceRecord, + ChangelogRecord, + MarkdownRecord, +} from "@fern-ui/fern-docs-search-server/types"; import type { BaseHit, Hit } from "instantsearch.js"; import { MarkRequired } from "ts-essentials"; -export type AlgoliaRecordHit = Hit; +export type AlgoliaRecordHit = Hit; export type MarkdownRecordHit = MarkRequired, "type">; +export type ChangelogRecordHit = MarkRequired, "type">; +export type ApiReferenceRecordHit = MarkRequired, "type">; diff --git a/packages/ui/fern-docs-search-ui/src/server/env-variables.ts b/packages/ui/fern-docs-search-ui/src/server/env-variables.ts index 6d37b12ea6..ef53fce5c8 100644 --- a/packages/ui/fern-docs-search-ui/src/server/env-variables.ts +++ b/packages/ui/fern-docs-search-ui/src/server/env-variables.ts @@ -2,14 +2,22 @@ export function algoliaAppId() { return getEnvVariable("ALGOLIA_APP_ID"); } -export function algoliaAdminApiKey() { - return getEnvVariable("ALGOLIA_ADMIN_API_KEY"); +export function algoliaWriteApiKey() { + return getEnvVariable("ALGOLIA_WRITE_API_KEY"); } export function algoliaSearchApikey() { return getEnvVariable("ALGOLIA_SEARCH_API_KEY"); } +export function fernToken() { + return getEnvVariable("FERN_TOKEN"); +} + +export function fdrEnvironment() { + return getEnvVariable("FDR_ENVIRONMENT"); +} + function assertNonNullable(value: T, key: string): asserts value is NonNullable { if (value == null) { throw new Error(`${key} is not defined`); diff --git a/packages/ui/fern-docs-search-ui/src/server/with-search-api-key.ts b/packages/ui/fern-docs-search-ui/src/server/with-search-api-key.ts index 40b94db0ce..0bd1cfb590 100644 --- a/packages/ui/fern-docs-search-ui/src/server/with-search-api-key.ts +++ b/packages/ui/fern-docs-search-ui/src/server/with-search-api-key.ts @@ -1,19 +1,19 @@ -import { getSearchApiKey } from "@fern-ui/fern-docs-search-server"; +import { getSearchApiKey } from "@fern-ui/fern-docs-search-server/algolia"; import { algoliasearch } from "algoliasearch"; interface WithSearchApiKeyOptions { appId: string; - adminApiKey: string; - parentApiKey: string; + writeApiKey: string; + searchApiKey: string; domain: string; roles: string[]; authed: boolean; } -export function withSearchApiKey({ appId, adminApiKey, parentApiKey, domain, roles, authed }: WithSearchApiKeyOptions) { +export function withSearchApiKey({ appId, writeApiKey, searchApiKey, domain, roles, authed }: WithSearchApiKeyOptions) { return getSearchApiKey({ - client: algoliasearch(appId, adminApiKey), - parentApiKey, + client: algoliasearch(appId, writeApiKey), + parentApiKey: searchApiKey, domain, roles, authed, diff --git a/packages/ui/fern-docs-search-ui/tsconfig.json b/packages/ui/fern-docs-search-ui/tsconfig.json index 34ee7e2161..8b9d92393b 100644 --- a/packages/ui/fern-docs-search-ui/tsconfig.json +++ b/packages/ui/fern-docs-search-ui/tsconfig.json @@ -6,12 +6,12 @@ "lib": ["dom", "dom.iterable", "esnext"], "allowJs": true, "skipLibCheck": true, - "strict": false, + "strict": true, "noEmit": true, "incremental": true, - "module": "esnext", + "module": "ESNext", "esModuleInterop": true, - "moduleResolution": "node", + "moduleResolution": "Bundler", "resolveJsonModule": true, "isolatedModules": true, "jsx": "preserve", @@ -20,7 +20,12 @@ "@/*": ["./src/*"] } }, - "references": [{ "path": "./tsconfig.server.json" }], + "references": [ + { "path": "./tsconfig.server.json" }, + { "path": "../fern-docs-search-server" }, + { "path": "../fern-http-method-tag" }, + { "path": "../../commons/react/react-commons" } + ], "include": ["next-env.d.ts", ".next/types/**/*.ts", "**/*.ts", "**/*.tsx"], "exclude": ["node_modules", "src/server/**/*"] } diff --git a/packages/ui/fern-http-method-tag/HttpMethodTag.tsx b/packages/ui/fern-http-method-tag/HttpMethodTag.tsx new file mode 100644 index 0000000000..552fb4dcfe --- /dev/null +++ b/packages/ui/fern-http-method-tag/HttpMethodTag.tsx @@ -0,0 +1,139 @@ +import { + amber, + amberA, + amberDark, + amberDarkA, + blue, + blueA, + blueDark, + blueDarkA, + green, + greenA, + greenDark, + greenDarkA, + red, + redA, + redDark, + redDarkA, +} from "@radix-ui/colors"; +import { clsx } from "clsx"; +import { PropsWithChildren, forwardRef } from "react"; + +export type HttpMethod = "GET" | "DELETE" | "POST" | "PUT" | "PATCH"; +export type ColorScheme = "blue" | "green" | "amber" | "red"; +export type Size = "sm" | "lg"; + +export const TagSizes: { [key: string]: Size } = { + Small: "sm", + Large: "lg", +}; + +const METHOD_COLOR_SCHEMES: Record = { + GET: "green", + DELETE: "red", + POST: "blue", + PUT: "amber", + PATCH: "amber", +}; + +const SUBTLE_BACKGROUND_COLORS: Record = { + blue: blueA.blueA3, + green: greenA.greenA3, + amber: amberA.amberA3, + red: redA.redA3, +}; + +const SUBTLE_BACKGROUND_COLORS_DARK: Record = { + blue: blueDarkA.blueA3, + green: greenDarkA.greenA3, + amber: amberDarkA.amberA3, + red: redDarkA.redA3, +}; + +const SOLID_BACKGROUND_COLORS: Record = { + blue: blueA.blueA10, + green: greenA.greenA10, + amber: amberA.amberA10, + red: redA.redA10, +}; + +const SOLID_BACKGROUND_COLORS_DARK: Record = { + blue: blueDark.blue1, + green: greenDark.green1, + amber: amberDark.amber1, + red: redDark.red1, +}; + +const SUBTLE_TEXT_COLORS: Record = { + blue: blueA.blueA11, + green: greenA.greenA11, + amber: amberA.amberA11, + red: redA.redA11, +}; + +const SUBTLE_TEXT_COLORS_DARK: Record = { + blue: blueDark.blue11, + green: greenDark.green11, + amber: amberDark.amber11, + red: redDark.red11, +}; + +const SOLID_TEXT_COLORS: Record = { + blue: blue.blue1, + green: green.green1, + amber: amber.amber1, + red: red.red1, +}; + +const SOLID_TEXT_COLORS_DARK: Record = { + blue: blueDark.blue12, + green: greenDark.green12, + amber: amberDark.amber12, + red: redDark.red12, +}; + +export interface HttpMethodTagProps extends PropsWithChildren { + size?: Size; + variant?: "subtle" | "solid"; + method: HttpMethod; + className?: string; + skeleton?: boolean; +} + +/** + * The `FernTag` component is used for items that need to be labeled, categorized, or organized using keywords that describe them. + */ +export const HttpMethodTag = forwardRef( + ({ children, size = "lg", method, variant = "subtle", className, skeleton }, ref) => { + const colorScheme = METHOD_COLOR_SCHEMES[method] ?? "blue"; + children ??= method === "DELETE" ? "DEL" : method; + + const backgroundColor = (variant === "subtle" ? SUBTLE_BACKGROUND_COLORS : SOLID_BACKGROUND_COLORS)[ + colorScheme + ]; + const backgroundColorDark = ( + variant === "subtle" ? SUBTLE_BACKGROUND_COLORS_DARK : SOLID_BACKGROUND_COLORS_DARK + )[colorScheme]; + const textColor = (variant === "subtle" ? SUBTLE_TEXT_COLORS : SOLID_TEXT_COLORS)[colorScheme]; + const textColorDark = (variant === "subtle" ? SUBTLE_TEXT_COLORS_DARK : SOLID_TEXT_COLORS_DARK)[colorScheme]; + + return ( + + {skeleton ? {children} : children} + + ); + }, +); + +HttpMethodTag.displayName = "HttpMethodTag"; diff --git a/packages/ui/fern-http-method-tag/index.css b/packages/ui/fern-http-method-tag/index.css new file mode 100644 index 0000000000..ed1d28509f --- /dev/null +++ b/packages/ui/fern-http-method-tag/index.css @@ -0,0 +1,38 @@ +.fern-http-method-tag { + align-items: center; + display: inline-flex; + font-family: + ui-monospace, + SFMono-Regular, + Menlo, + Monaco, + Consolas, + Liberation Mono, + Courier New, + monospace; + justify-content: center; + line-height: 1rem; + text-transform: uppercase; + font-weight: 600; +} + +.fern-http-method-tag.small { + border-radius: 0.375rem; + font-size: 0.625rem; + height: 1rem; + padding-left: 0.375rem; + padding-right: 0.375rem; +} + +.fern-http-method-tag.large { + border-radius: 0.5rem; + font-size: 0.75rem; + height: 1.5rem; + padding: 0.25rem 0.5rem; +} + +:is(.dark) .fern-http-method-tag, +:is([data-theme="dark"]) .fern-http-method-tag { + background-color: var(--background-color-dark); + color: var(--color-dark); +} diff --git a/packages/ui/fern-http-method-tag/index.ts b/packages/ui/fern-http-method-tag/index.ts new file mode 100644 index 0000000000..3aab178ecd --- /dev/null +++ b/packages/ui/fern-http-method-tag/index.ts @@ -0,0 +1 @@ +export * from "./HttpMethodTag"; diff --git a/packages/ui/fern-http-method-tag/package.json b/packages/ui/fern-http-method-tag/package.json new file mode 100644 index 0000000000..ee1da62825 --- /dev/null +++ b/packages/ui/fern-http-method-tag/package.json @@ -0,0 +1,31 @@ +{ + "name": "@fern-ui/fern-http-method-tag", + "version": "0.0.0", + "private": true, + "type": "module", + "main": "index.ts", + "peerDependencies": { + "next": "^14", + "react": "^18.2.0", + "react-dom": "^18.2.0" + }, + "exports": { + "./index.css": "./index.css", + ".": "./index.ts" + }, + "devDependencies": { + "@types/node": "^18.7.18", + "@types/react": "^18.0.20", + "@types/react-dom": "^18.2.18", + "eslint": "^8.56.0", + "eslint-config-next": "14.2.15", + "postcss": "^8.4.33", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "typescript": "5.4.3" + }, + "dependencies": { + "@radix-ui/colors": "^3.0.0", + "clsx": "^2.1.1" + } +} diff --git a/packages/ui/fern-http-method-tag/tsconfig.json b/packages/ui/fern-http-method-tag/tsconfig.json new file mode 100644 index 0000000000..4ff24d0fcb --- /dev/null +++ b/packages/ui/fern-http-method-tag/tsconfig.json @@ -0,0 +1,21 @@ +{ + "$schema": "https://json.schemastore.org/tsconfig", + "compilerOptions": { + "composite": true, + "target": "ES2017", + "lib": ["dom", "dom.iterable", "esnext"], + "allowJs": true, + "skipLibCheck": true, + "strict": true, + "noEmit": false, + "incremental": true, + "module": "ESNext", + "esModuleInterop": true, + "moduleResolution": "Bundler", + "resolveJsonModule": true, + "isolatedModules": true, + "jsx": "preserve" + }, + "include": ["index.ts", "HttpMethodTag.tsx"], + "exclude": ["node_modules"] +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 680031f242..962e970ed3 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -447,7 +447,7 @@ importers: version: 3.3.2 simple-git: specifier: ^3.24.0 - version: 3.24.0 + version: 3.24.0(supports-color@8.1.1) stylelint: specifier: ^16.1.0 version: 16.5.0(typescript@5.4.3) @@ -2280,9 +2280,6 @@ importers: '@fern-ui/fern-docs-utils': specifier: workspace:* version: link:../fern-docs-utils - '@trigger.dev/sdk': - specifier: ^3.0.13 - version: 3.0.13 algoliasearch: specifier: ^5.10.2 version: 5.10.2 @@ -2292,6 +2289,9 @@ importers: pnpm: specifier: ^9.12.1 version: 9.12.1 + ts-essentials: + specifier: ^10.0.1 + version: 10.0.1(typescript@5.4.3) zod: specifier: ^3.23.8 version: 3.23.8 @@ -2299,9 +2299,6 @@ importers: '@fern-platform/configs': specifier: workspace:* version: link:../../configs - '@trigger.dev/build': - specifier: ^3.0.13 - version: 3.0.13(typescript@5.4.3) '@types/node': specifier: ^18.7.18 version: 18.19.33 @@ -2320,6 +2317,9 @@ importers: stylelint: specifier: ^16.1.0 version: 16.5.0(typescript@5.4.3) + tsup: + specifier: ^8.0.2 + version: 8.0.2(@swc/core@1.5.7)(postcss@8.4.31)(ts-node@10.9.2(@swc/core@1.5.7)(@types/node@18.19.33)(typescript@5.4.3))(typescript@5.4.3) typescript: specifier: 5.4.3 version: 5.4.3 @@ -2335,6 +2335,12 @@ importers: '@fern-ui/fern-docs-search-server': specifier: workspace:* version: link:../fern-docs-search-server + '@fern-ui/fern-http-method-tag': + specifier: workspace:* + version: link:../fern-http-method-tag + '@fern-ui/react-commons': + specifier: workspace:* + version: link:../../commons/react/react-commons '@radix-ui/react-radio-group': specifier: ^1.1.3 version: 1.1.3(@types/react-dom@18.3.0)(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -2372,6 +2378,9 @@ importers: '@types/react-dom': specifier: ^18.2.18 version: 18.3.0 + autoprefixer: + specifier: ^10.4.16 + version: 10.4.19(postcss@8.4.31) eslint: specifier: ^8.56.0 version: 8.57.0 @@ -2384,6 +2393,9 @@ importers: postcss: specifier: 8.4.31 version: 8.4.31 + postcss-import: + specifier: ^16.0.1 + version: 16.1.0(postcss@8.4.31) react: specifier: ^18.2.0 version: 18.3.1 @@ -2489,6 +2501,46 @@ importers: specifier: ^1.5.0 version: 1.6.0(@edge-runtime/vm@3.2.0)(@types/node@18.19.33)(jsdom@24.0.0)(less@4.2.0)(sass@1.77.0)(stylus@0.62.0)(terser@5.31.0) + packages/ui/fern-http-method-tag: + dependencies: + '@radix-ui/colors': + specifier: ^3.0.0 + version: 3.0.0 + clsx: + specifier: ^2.1.1 + version: 2.1.1 + next: + specifier: npm:@fern-api/next@14.2.9-fork.2 + version: '@fern-api/next@14.2.9-fork.2(@babel/core@7.24.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.47.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.0)' + devDependencies: + '@types/node': + specifier: ^18.7.18 + version: 18.19.33 + '@types/react': + specifier: ^18.0.20 + version: 18.3.3 + '@types/react-dom': + specifier: ^18.2.18 + version: 18.3.0 + eslint: + specifier: ^8.56.0 + version: 8.57.0 + eslint-config-next: + specifier: 14.2.15 + version: 14.2.15(eslint@8.57.0)(typescript@5.4.3) + postcss: + specifier: 8.4.31 + version: 8.4.31 + react: + specifier: ^18.2.0 + version: 18.3.1 + react-dom: + specifier: ^18.2.0 + version: 18.3.1(react@18.3.1) + typescript: + specifier: 5.4.3 + version: 5.4.3 + packages/ui/fontawesome-cdn: dependencies: '@fortawesome/fontawesome-svg-core': @@ -3034,7 +3086,7 @@ importers: version: 3.21.0(serverless@3.38.0) simple-git: specifier: ^3.24.0 - version: 3.24.0 + version: 3.24.0(supports-color@8.1.1) tmp-promise: specifier: ^3.0.3 version: 3.0.3 @@ -4572,24 +4624,11 @@ packages: resolution: {integrity: sha512-cpSTY52ycffnWsKTtd79+yyrsLYahU1njqSV5C3bAbEPWK+h3XWtzfDB4rjDpQGhX3n1wCMeLZ8Ak6uE5zFTyw==, tarball: https://npm.fontawesome.com/@fortawesome/sharp-thin-svg-icons/-/6.5.2/sharp-thin-svg-icons-6.5.2.tgz} engines: {node: '>=6'} - '@google-cloud/precise-date@4.0.0': - resolution: {integrity: sha512-1TUx3KdaU3cN7nfCdNf+UVqA/PSX29Cjcox3fZZBtINlRrXVTmUkQnCKv2MbBUbCopbK4olAT1IHl76uZyCiVA==} - engines: {node: '>=14.0.0'} - '@graphql-typed-document-node/core@3.2.0': resolution: {integrity: sha512-mB9oAsNCm9aM3/SOv4YtBMqZbYj10R7dkq8byBqxGY/ncFwhf2oQzMV+LCRlWoDSEBJ3COiR1yeDvMtsoOsuFQ==} peerDependencies: graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@grpc/grpc-js@1.12.2': - resolution: {integrity: sha512-bgxdZmgTrJZX50OjyVwz3+mNEnCTNkh3cIqGPWVNeW9jX6bn1ZkU80uPd+67/ZpIJIjRQ9qaHCjhavyoWYxumg==} - engines: {node: '>=12.10.0'} - - '@grpc/proto-loader@0.7.13': - resolution: {integrity: sha512-AiXO/bfe9bmxBjxxtYxFAXGZvMaN5s8kO+jBHAJCON8rJoB5YS/D6X7ZNc6XQkuHNmyl4CYaMI1fJ/Gn27RGGw==} - engines: {node: '>=6'} - hasBin: true - '@hapi/hoek@9.3.0': resolution: {integrity: sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==} @@ -4907,9 +4946,6 @@ packages: '@jridgewell/trace-mapping@0.3.9': resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} - '@js-sdsl/ordered-map@4.4.2': - resolution: {integrity: sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==} - '@kwsites/file-exists@1.1.1': resolution: {integrity: sha512-m9/5YGR18lIwxSFDwfE3oA7bWuq9kdau6ugN4H2rJeyhFQZcG9AgSHkQtSD15a8WvTgfz9aikZMrKPHvbpqFiw==} @@ -5131,132 +5167,10 @@ packages: '@open-draft/until@2.1.0': resolution: {integrity: sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==} - '@opentelemetry/api-logs@0.52.1': - resolution: {integrity: sha512-qnSqB2DQ9TPP96dl8cDubDvrUyWc0/sK81xHTK8eSUspzDM3bsewX903qclQFvVhgStjRWdC5bLb3kQqMkfV5A==} - engines: {node: '>=14'} - '@opentelemetry/api@1.9.0': resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==} engines: {node: '>=8.0.0'} - '@opentelemetry/context-async-hooks@1.25.1': - resolution: {integrity: sha512-UW/ge9zjvAEmRWVapOP0qyCvPulWU6cQxGxDbWEFfGOj1VBBZAuOqTo3X6yWmDTD3Xe15ysCZChHncr2xFMIfQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - - '@opentelemetry/core@1.25.1': - resolution: {integrity: sha512-GeT/l6rBYWVQ4XArluLVB6WWQ8flHbdb6r2FCHC3smtdOAbrJBIv35tpV/yp9bmYUJf+xmZpu9DRTIeJVhFbEQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - - '@opentelemetry/exporter-logs-otlp-http@0.52.1': - resolution: {integrity: sha512-qKgywId2DbdowPZpOBXQKp0B8DfhfIArmSic15z13Nk/JAOccBUQdPwDjDnjsM5f0ckZFMVR2t/tijTUAqDZoA==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - - '@opentelemetry/exporter-trace-otlp-grpc@0.52.1': - resolution: {integrity: sha512-pVkSH20crBwMTqB3nIN4jpQKUEoB0Z94drIHpYyEqs7UBr+I0cpYyOR3bqjA/UasQUMROb3GX8ZX4/9cVRqGBQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - - '@opentelemetry/exporter-trace-otlp-http@0.52.1': - resolution: {integrity: sha512-05HcNizx0BxcFKKnS5rwOV+2GevLTVIRA0tRgWYyw4yCgR53Ic/xk83toYKts7kbzcI+dswInUg/4s8oyA+tqg==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - - '@opentelemetry/exporter-trace-otlp-proto@0.52.1': - resolution: {integrity: sha512-pt6uX0noTQReHXNeEslQv7x311/F1gJzMnp1HD2qgypLRPbXDeMzzeTngRTUaUbP6hqWNtPxuLr4DEoZG+TcEQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - - '@opentelemetry/exporter-zipkin@1.25.1': - resolution: {integrity: sha512-RmOwSvkimg7ETwJbUOPTMhJm9A9bG1U8s7Zo3ajDh4zM7eYcycQ0dM7FbLD6NXWbI2yj7UY4q8BKinKYBQksyw==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - - '@opentelemetry/instrumentation@0.52.1': - resolution: {integrity: sha512-uXJbYU/5/MBHjMp1FqrILLRuiJCs3Ofk0MeRDk8g1S1gD47U8X3JnSwcMO1rtRo1x1a7zKaQHaoYu49p/4eSKw==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.3.0 - - '@opentelemetry/otlp-exporter-base@0.52.1': - resolution: {integrity: sha512-z175NXOtX5ihdlshtYBe5RpGeBoTXVCKPPLiQlD6FHvpM4Ch+p2B0yWKYSrBfLH24H9zjJiBdTrtD+hLlfnXEQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - - '@opentelemetry/otlp-grpc-exporter-base@0.52.1': - resolution: {integrity: sha512-zo/YrSDmKMjG+vPeA9aBBrsQM9Q/f2zo6N04WMB3yNldJRsgpRBeLLwvAt/Ba7dpehDLOEFBd1i2JCoaFtpCoQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - - '@opentelemetry/otlp-transformer@0.52.1': - resolution: {integrity: sha512-I88uCZSZZtVa0XniRqQWKbjAUm73I8tpEy/uJYPPYw5d7BRdVk0RfTBQw8kSUl01oVWEuqxLDa802222MYyWHg==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.3.0 <1.10.0' - - '@opentelemetry/propagator-b3@1.25.1': - resolution: {integrity: sha512-p6HFscpjrv7//kE+7L+3Vn00VEDUJB0n6ZrjkTYHrJ58QZ8B3ajSJhRbCcY6guQ3PDjTbxWklyvIN2ojVbIb1A==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - - '@opentelemetry/propagator-jaeger@1.25.1': - resolution: {integrity: sha512-nBprRf0+jlgxks78G/xq72PipVK+4or9Ypntw0gVZYNTCSK8rg5SeaGV19tV920CMqBD/9UIOiFr23Li/Q8tiA==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - - '@opentelemetry/resources@1.25.1': - resolution: {integrity: sha512-pkZT+iFYIZsVn6+GzM0kSX+u3MSLCY9md+lIJOoKl/P+gJFfxJte/60Usdp8Ce4rOs8GduUpSPNe1ddGyDT1sQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - - '@opentelemetry/sdk-logs@0.52.1': - resolution: {integrity: sha512-MBYh+WcPPsN8YpRHRmK1Hsca9pVlyyKd4BxOC4SsgHACnl/bPp4Cri9hWhVm5+2tiQ9Zf4qSc1Jshw9tOLGWQA==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.4.0 <1.10.0' - - '@opentelemetry/sdk-metrics@1.25.1': - resolution: {integrity: sha512-9Mb7q5ioFL4E4dDrc4wC/A3NTHDat44v4I3p2pLPSxRvqUbDIQyMVr9uK+EU69+HWhlET1VaSrRzwdckWqY15Q==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.3.0 <1.10.0' - - '@opentelemetry/sdk-node@0.52.1': - resolution: {integrity: sha512-uEG+gtEr6eKd8CVWeKMhH2olcCHM9dEK68pe0qE0be32BcCRsvYURhHaD1Srngh1SQcnQzZ4TP324euxqtBOJA==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.3.0 <1.10.0' - - '@opentelemetry/sdk-trace-base@1.25.1': - resolution: {integrity: sha512-C8k4hnEbc5FamuZQ92nTOp8X/diCY56XUTnMiv9UTuJitCzaNNHAVsdm5+HLCdI8SLQsLWIrG38tddMxLVoftw==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - - '@opentelemetry/sdk-trace-node@1.25.1': - resolution: {integrity: sha512-nMcjFIKxnFqoez4gUmihdBrbpsEnAX/Xj16sGvZm+guceYE0NE00vLhpDVK6f3q8Q4VFI5xG8JjlXKMB/SkTTQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - - '@opentelemetry/semantic-conventions@1.25.1': - resolution: {integrity: sha512-ZDjMJJQRlyk8A1KZFCc+bCbsyrn1wTwdNt56F7twdfUfnHUZUq77/WfONCj8p72NZOyP7pNTdUWSTYC3GTbuuQ==} - engines: {node: '>=14'} - '@pandacss/config@0.22.1': resolution: {integrity: sha512-odnBV0U7ZiehR8O4hA+XbqWuBxhEl//XVtiyfr2KIRy53oFuNudOFFwGDQPcowcVCVl+lzclsjByr9UT+tdT6Q==} @@ -5391,36 +5305,6 @@ packages: '@prisma/get-platform@5.13.0': resolution: {integrity: sha512-B/WrQwYTzwr7qCLifQzYOmQhZcFmIFhR81xC45gweInSUn2hTEbfKUPd2keAog+y5WI5xLAFNJ3wkXplvSVkSw==} - '@protobufjs/aspromise@1.1.2': - resolution: {integrity: sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==} - - '@protobufjs/base64@1.1.2': - resolution: {integrity: sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==} - - '@protobufjs/codegen@2.0.4': - resolution: {integrity: sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==} - - '@protobufjs/eventemitter@1.1.0': - resolution: {integrity: sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==} - - '@protobufjs/fetch@1.1.0': - resolution: {integrity: sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==} - - '@protobufjs/float@1.0.2': - resolution: {integrity: sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==} - - '@protobufjs/inquire@1.1.0': - resolution: {integrity: sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==} - - '@protobufjs/path@1.1.2': - resolution: {integrity: sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==} - - '@protobufjs/pool@1.1.0': - resolution: {integrity: sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==} - - '@protobufjs/utf8@1.1.0': - resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==} - '@radix-ui/colors@3.0.0': resolution: {integrity: sha512-FUOsGBkHrYJwCSEtWRCIfQbZG7q1e6DgxCIOe1SUQzDe/7rXXeA47s8yCn6fuTNQAj1Zq4oTFi9Yjp3wzElcxg==} @@ -6824,9 +6708,6 @@ packages: resolution: {integrity: sha512-IHk53BVw6MPMi2Gsn+hCng8rFA3ZmR3Rk7GllxDUW9qFJl/hiSvskn7XldkECapQVkIg/1dHpMAxI9xSTaLLSA==} engines: {node: '>=14.0.0'} - '@socket.io/component-emitter@3.1.2': - resolution: {integrity: sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==} - '@storybook/addon-actions@8.1.0-alpha.6': resolution: {integrity: sha512-EFXW0i4XOfR4a58MilMcVHdtthRTklg776fDsJCS5EFZI7wGbA+aAw2HmKUouwqkVVQACu5SQ/5yWD8EN+wCYA==} @@ -7466,18 +7347,6 @@ packages: '@tokenizer/token@0.3.0': resolution: {integrity: sha512-OvjF+z51L3ov0OyAU0duzsYuvO01PH7x4t6DJx+guahgTnBHkhJdG7soQeTSFLWN3efnHyibZ4Z8l2EuWwJN3A==} - '@trigger.dev/build@3.0.13': - resolution: {integrity: sha512-4pN3KDl4Cn3qqTo5ZsV14NESjBzCV6h0Q9ZFBurq6BeposFflVyBtRQMCh5oUD+3YyGfq4RRlTP5Qz7iu00YXQ==} - engines: {node: '>=18.20.0'} - - '@trigger.dev/core@3.0.13': - resolution: {integrity: sha512-BK4Yd+LrxA5cxAsO66yOm8UvBGdSplzfSqFGo6OpxhWZqZBsUMADAYBXhjb7ujl/F/BrN47qhLSuyaaybBiHcw==} - engines: {node: '>=18.20.0'} - - '@trigger.dev/sdk@3.0.13': - resolution: {integrity: sha512-Ju+NDhwGJgnYuiotEWwcsG4dmN69XYjpkLLCPTMhMeGZgECUvXoduNC5avEIt6FyUqii3M67k/4jDgUIfa7UJg==} - engines: {node: '>=18.20.0'} - '@trysound/sax@0.2.0': resolution: {integrity: sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==} engines: {node: '>=10.13.0'} @@ -7843,9 +7712,6 @@ packages: '@types/serve-static@1.15.7': resolution: {integrity: sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==} - '@types/shimmer@1.2.0': - resolution: {integrity: sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==} - '@types/stack-utils@2.0.3': resolution: {integrity: sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==} @@ -8715,11 +8581,6 @@ packages: peerDependencies: acorn: ^8 - acorn-import-attributes@1.9.5: - resolution: {integrity: sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==} - peerDependencies: - acorn: ^8 - acorn-jsx@5.3.2: resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} peerDependencies: @@ -9781,10 +9642,6 @@ packages: copy-anything@2.0.6: resolution: {integrity: sha512-1j20GZTsvKNkc4BY3NpMOM8tt///wY3FpIzozTOFO2ffuZcV61nojHXVKIy3WM+7ADCy5FVhdZYHYDdgTU0yJw==} - copy-anything@3.0.5: - resolution: {integrity: sha512-yCEafptTtb4bk7GLEQoM8KVJpxAfdBJYaXyzQEgQQQgYrZiDp8SJmGKlYza6CYjEDNstAdNdKA3UuoULlEbS6w==} - engines: {node: '>=12.13'} - copy-to-clipboard@3.3.3: resolution: {integrity: sha512-2KV8NhB5JqC3ky0r9PMCAZKbUHSwtEo4CwCs0KXgruG43gX5PMqDEBbVU4OUzw2MuAWUfsuFmWvEKG5QRfSnJA==} @@ -9857,10 +9714,6 @@ packages: create-require@1.1.1: resolution: {integrity: sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==} - cronstrue@2.51.0: - resolution: {integrity: sha512-7EG9VaZZ5SRbZ7m25dmP6xaS0qe9ay6wywMskFOU/lMDKa+3gZr2oeT5OUfXwRP/Bcj8wxdYJ65AHU70CI3tsw==} - hasBin: true - cross-spawn@5.1.0: resolution: {integrity: sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==} @@ -10534,13 +10387,6 @@ packages: endent@2.1.0: resolution: {integrity: sha512-r8VyPX7XL8U01Xgnb1CjZ3XV+z90cXIJ9JPE/R9SEC9vpw2P6CfsRPJmp20DppC5N7ZAMCmjYkJIa744Iyg96w==} - engine.io-client@6.5.4: - resolution: {integrity: sha512-GeZeeRjpD2qf49cZQ0Wvh/8NJNfeXkXXcoGh+F77oEAgo9gUHwT1fCRxSNU+YEEaysOJTnsFHmM5oAcPy4ntvQ==} - - engine.io-parser@5.2.3: - resolution: {integrity: sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q==} - engines: {node: '>=10.0.0'} - enhanced-resolve@5.16.1: resolution: {integrity: sha512-4U5pNsuDl0EhuZpq46M5xPslstkviJuhrdobaRDBk2Jy2KO37FDAJl4lb2KlNabxT0m4MTK2UHNrsAcphE8nyw==} engines: {node: '>=10.13.0'} @@ -10910,9 +10756,6 @@ packages: evp_bytestokey@1.0.3: resolution: {integrity: sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==} - evt@2.5.7: - resolution: {integrity: sha512-dr7Wd16ry5F8WNU1xXLKpFpO3HsoAGg8zC48e08vDdzMzGWCP9/QFGt1PQptEEDh8SwYP3EL8M+d/Gb0kgUp6g==} - execa@0.8.0: resolution: {integrity: sha512-zDWS+Rb1E8BlqqhALSt9kUhss8Qq4nN3iof3gsOdyINksElaPyNBtKUMTR62qhvgVWR0CqCX7sdnKe4MnUbFEA==} engines: {node: '>=4'} @@ -11032,14 +10875,6 @@ packages: fd-slicer@1.1.0: resolution: {integrity: sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==} - fdir@6.4.2: - resolution: {integrity: sha512-KnhMXsKSPZlAhp7+IjUkRZKPb4fUyccpDrdFXbi4QL1qkmFh9kVY09Yox+n4MaOb3lHZ1Tv829C3oaaXoMYPDQ==} - peerDependencies: - picomatch: ^3 || ^4 - peerDependenciesMeta: - picomatch: - optional: true - fecha@4.2.3: resolution: {integrity: sha512-OP2IUU6HeYKJi3i0z4A19kHMQoLVs4Hc+DPqqxI2h/DPZHTm/vjsfC6P0b4jCMy14XizLBqvndQ+UilD7707Jw==} @@ -11845,9 +11680,6 @@ packages: resolution: {integrity: sha512-/1/GPCpDUCCYwlERiYjxoczfP0zfvZMU/OWgQPMya9AbAE24vseigFdhAMObpc8Q4lc/kjutPfUddDYyAmejnA==} engines: {node: '>=18.18.0'} - humanize-duration@3.32.1: - resolution: {integrity: sha512-inh5wue5XdfObhu/IGEMiA1nUXigSGcaKNemcbLRKa7jXYGDZXr3LoT9pTIzq2hPEbld7w/qv9h+ikWGz8fL1g==} - humps@2.0.1: resolution: {integrity: sha512-E0eIbrFWUhwfXJmsbdjRQFQPrl5pTEoKlz163j1mTqqUnU9PgR4AgB8AIITzuB3vLBdxZXyZ9TDIrwB2OASz4g==} @@ -11914,9 +11746,6 @@ packages: resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} engines: {node: '>=6'} - import-in-the-middle@1.11.2: - resolution: {integrity: sha512-gK6Rr6EykBcc6cVWRSBR5TWf8nn6hZMYSRYqCcHa0l0d1fPK7JSYo6+Mlmck76jIX9aL/IZ71c06U2VpFwl1zA==} - import-local@3.1.0: resolution: {integrity: sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==} engines: {node: '>=8'} @@ -12908,9 +12737,6 @@ packages: resolution: {integrity: sha512-1ulHeNPp6k/LD8H91o7VYFBng5i1BDE7HoKxVbZiGFidS1Rj65qcywLxX+pVfAPoQJEjRdvKcusKwOupHCVOVQ==} engines: {node: '>= 12.0.0'} - long@5.2.3: - resolution: {integrity: sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==} - longest-streak@3.1.0: resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==} @@ -13373,9 +13199,6 @@ packages: resolution: {integrity: sha512-r9deDe9p5FJUPZAk3A59wGH7Ii9YrjjWw0jmw/liSbHl2CHiyXj6FcDXDu2K3TjVAXqiJdaw3xxwlZZr9E6nHg==} hasBin: true - minimal-polyfills@2.2.3: - resolution: {integrity: sha512-oxdmJ9cL+xV72h0xYxp4tP2d5/fTBpP45H8DIOn9pASuF8a3IYTf+25fMGDYGiWW+MFsuog6KD6nfmhZJQ+uUw==} - minimalistic-assert@1.0.1: resolution: {integrity: sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==} @@ -13455,9 +13278,6 @@ packages: mlly@1.7.2: resolution: {integrity: sha512-tN3dvVHYVz4DhSXinXIk7u9syPYaJvio118uomkovAtWBT+RdbP6Lfh/5Lvo519YMmwBafwlh20IPTXIStscpA==} - module-details-from-path@1.0.3: - resolution: {integrity: sha512-ySViT69/76t8VhE1xXHK6Ch4NcDd26gx0MzKXLO+F7NOtnqH68d9zF94nT8ZWSxXh8ELOERsnJO/sWt1xZYw5A==} - moment@2.30.1: resolution: {integrity: sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how==} @@ -14045,10 +13865,6 @@ packages: resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} engines: {node: '>=8.6'} - picomatch@4.0.2: - resolution: {integrity: sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==} - engines: {node: '>=12'} - pidtree@0.6.0: resolution: {integrity: sha512-eG2dWTVw5bzqGRztnHExczNxt5VGsE6OwTeCG3fdUf9KBsZzO3R5OIIIzWR+iZA0NtZ+RDVdaoE2dK1cn6jH4g==} engines: {node: '>=0.10'} @@ -14533,10 +14349,6 @@ packages: proto-list@1.2.4: resolution: {integrity: sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==} - protobufjs@7.4.0: - resolution: {integrity: sha512-mRUWCc3KUU4w1jU8sGxICXH/gNS94DvI1gxqDvBzhj1JpcsimQkYiOJfwsPUykUI5ZaspFbSgmBLER8IrQ3tqw==} - engines: {node: '>=12.0.0'} - proxy-addr@2.0.7: resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==} engines: {node: '>= 0.10'} @@ -15053,10 +14865,6 @@ packages: resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} engines: {node: '>=0.10.0'} - require-in-the-middle@7.4.0: - resolution: {integrity: sha512-X34iHADNbNDfr6OTStIAHWSAvvKQRYgLO6duASaVf7J2VA3lvmNYboAHOuLC2huav1IwgZJtyEcJCKVzFxOSMQ==} - engines: {node: '>=8.6.0'} - require-package-name@2.0.1: resolution: {integrity: sha512-uuoJ1hU/k6M0779t3VMVIYpb2VMJk05cehCaABFhXaibcbvfgR8wKiozLjVFSzJPmQMRqIcO0HMyTFqfV09V6Q==} @@ -15188,9 +14996,6 @@ packages: resolution: {integrity: sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==} engines: {node: '>=0.12.0'} - run-exclusive@2.2.19: - resolution: {integrity: sha512-K3mdoAi7tjJ/qT7Flj90L7QyPozwUaAG+CVhkdDje4HLKXUYC3N/Jzkau3flHVDLQVhiHBtcimVodMjN9egYbA==} - run-parallel-limit@1.1.0: resolution: {integrity: sha512-jJA7irRNM91jaKc3Hcl1npHsFLOXOoTkPCUL1JEa1R82O2miplXXRaGdjW/KM/98YQWDhJLiSs793CnXfblJUw==} @@ -15400,9 +15205,6 @@ packages: shiki@1.22.0: resolution: {integrity: sha512-/t5LlhNs+UOKQCYBtl5ZsH/Vclz73GIqT2yQsCBygr8L/ppTdmpL4w3kPLoZJbMKVWtoG77Ue1feOjZfDxvMkw==} - shimmer@1.2.1: - resolution: {integrity: sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==} - side-channel@1.0.6: resolution: {integrity: sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==} engines: {node: '>= 0.4'} @@ -15466,17 +15268,6 @@ packages: resolution: {integrity: sha512-FC+lgizVPfie0kkhqUScwRu1O/lF6NOgJmlCgK+/LYxDCTk8sGelYaHDhFcDN+Sn3Cv+3VSa4Byeo+IMCzpMgQ==} engines: {node: '>=12'} - slug@6.1.0: - resolution: {integrity: sha512-x6vLHCMasg4DR2LPiyFGI0gJJhywY6DTiGhCrOMzb3SOk/0JVLIaL4UhyFSHu04SD3uAavrKY/K3zZ3i6iRcgA==} - - socket.io-client@4.7.5: - resolution: {integrity: sha512-sJ/tqHOCe7Z50JCBCXrsY3I2k03iOiUe+tj1OmKeD2lXPiGH/RUCdTZFoqVyN7l1MnpIzPrGtLcijffmeouNlQ==} - engines: {node: '>=10.0.0'} - - socket.io-parser@4.2.4: - resolution: {integrity: sha512-/GbIKmo8ioc+NIWIhwdecY0ge+qVBSMdgxGygevmdHj24bsfgtCmcUUcQ5ZzcylGFHsN3k4HB4Cgkl96KVnuew==} - engines: {node: '>=10.0.0'} - sonner@1.5.0: resolution: {integrity: sha512-FBjhG/gnnbN6FY0jaNnqZOMmB73R+5IiyYAw8yBj7L54ER7HB3fOSE5OFiQiE2iXWxeXKvg6fIP4LtVppHEdJA==} peerDependencies: @@ -15862,10 +15653,6 @@ packages: engines: {node: '>=6.4.0 <13 || >=14'} deprecated: Please upgrade to v9.0.0+ as we have fixed a public vulnerability with formidable dependency. Note that v9.0.0+ requires Node.js v14.18.0+. See https://github.com/ladjs/superagent/pull/1800 for insight. This project is supported and maintained by the team at Forward Email @ https://forwardemail.net - superjson@2.2.1: - resolution: {integrity: sha512-8iGv75BYOa0xRJHK5vRLEjE2H/i4lulTjzpUXic3Eg8akftYjkmQDa8JARQ42rlczXyFR3IeRoeFCc7RxHsYZA==} - engines: {node: '>=16'} - supports-color@2.0.0: resolution: {integrity: sha512-KKNVtd6pCYgPIKU4cp2733HWYCpplQhddZLBUryaAHou723x+FRzQ5Df824Fj+IyyuiQTRoub4SnIFfIcrp70g==} engines: {node: '>=0.8.0'} @@ -15890,10 +15677,6 @@ packages: resolution: {integrity: sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==} engines: {node: '>=10'} - supports-hyperlinks@2.3.0: - resolution: {integrity: sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA==} - engines: {node: '>=8'} - supports-hyperlinks@3.0.0: resolution: {integrity: sha512-QBDPHyPQDRTy9ku4URNGY5Lah8PAaXs6tAAwp55sL5WCsSW7GIfdf6W5ixfziW+t7wh3GVvHyHHyQ1ESsoRvaA==} engines: {node: '>=14.18'} @@ -15982,10 +15765,6 @@ packages: resolution: {integrity: sha512-biM9brNqxSc04Ee71hzFbryD11nX7VPhQQY32AdDmjFvodsRFz/3ufeoTZ6uYkRFfGo188tENcASNs3vTdsM0w==} engines: {node: '>=10'} - terminal-link@3.0.0: - resolution: {integrity: sha512-flFL3m4wuixmf6IfhFJd1YPiLiMuxEc8uHRM1buzIeZPm22Au2pDqBJQgdo7n1WfPU1ONFGv7YDwpFBmHGF6lg==} - engines: {node: '>=12'} - terser-webpack-plugin@5.3.10: resolution: {integrity: sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==} engines: {node: '>= 10.13.0'} @@ -16065,10 +15844,6 @@ packages: tinyexec@0.3.0: resolution: {integrity: sha512-tVGE0mVJPGb0chKhqmsoosjsS+qUnJVGJpZgsHYQcGoPlG3B51R3PouqTgEGH2Dc9jjFyOqOpix6ZHNMXp1FZg==} - tinyglobby@0.2.10: - resolution: {integrity: sha512-Zc+8eJlFMvgatPZTl6A9L/yht8QqdmUNtURHaKZLmKBE12hNPSrqNkUp2cs3M/UKmNVVAMFQYSjYIVHDjW5zew==} - engines: {node: '>=12.0.0'} - tinylogic@2.0.0: resolution: {integrity: sha512-dljTkiLLITtsjqBvTA1MRZQK/sGP4kI3UJKc3yA9fMzYbMF2RhcN04SeROVqJBIYYOoJMM8u0WDnhFwMSFQotw==} @@ -16275,9 +16050,6 @@ packages: ts-toolbelt@6.15.5: resolution: {integrity: sha512-FZIXf1ksVyLcfr7M317jbB67XFJhOO1YqdTcuGaq9q5jLUoTikukZ+98TPjKiP2jC5CgmYdWWYs0s2nLSU0/1A==} - tsafe@1.7.5: - resolution: {integrity: sha512-tbNyyBSbwfbilFfiuXkSOj82a6++ovgANwcoqBAcO9/REPoZMEQoE8kWPeO0dy5A2D/2Lajr8Ohue5T0ifIvLQ==} - tsconfck@2.1.2: resolution: {integrity: sha512-ghqN1b0puy3MhhviwO2kGF8SeMDNhEbnKxjK7h6+fvY9JAxqvXi8y5NAHSQv687OVboS2uZIByzGd45/YxrRHg==} engines: {node: ^14.13.1 || ^16 || >=18} @@ -16288,16 +16060,6 @@ packages: typescript: optional: true - tsconfck@3.1.3: - resolution: {integrity: sha512-ulNZP1SVpRDesxeMLON/LtWM8HIgAJEIVpVVhBM6gsmvQ8+Rh+ZG7FWGvHh7Ah3pRABwVJWklWCr/BTZSv0xnQ==} - engines: {node: ^18 || >=20} - hasBin: true - peerDependencies: - typescript: ^5.0.0 - peerDependenciesMeta: - typescript: - optional: true - tsconfig-paths-webpack-plugin@4.1.0: resolution: {integrity: sha512-xWFISjviPydmtmgeUAuXp4N1fky+VCtfhOkDUFIv5ea7p4wuTomI4QTrXvFBX2S4jZsmyTSrStQl+E+4w+RzxA==} engines: {node: '>=10.13.0'} @@ -16501,10 +16263,6 @@ packages: engines: {node: '>=0.8.0'} hasBin: true - ulid@2.3.0: - resolution: {integrity: sha512-keqHubrlpvT6G2wH0OEfSW4mquYRcbe/J8NMmveoQOjUqmo+hXtO+ORCpWhdbZ7k72UtY61BL7haGxW6enBnjw==} - hasBin: true - unbox-primitive@1.0.2: resolution: {integrity: sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==} @@ -17093,18 +16851,6 @@ packages: utf-8-validate: optional: true - ws@8.17.1: - resolution: {integrity: sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==} - engines: {node: '>=10.0.0'} - peerDependencies: - bufferutil: ^4.0.1 - utf-8-validate: '>=5.0.2' - peerDependenciesMeta: - bufferutil: - optional: true - utf-8-validate: - optional: true - ws@8.18.0: resolution: {integrity: sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==} engines: {node: '>=10.0.0'} @@ -17136,10 +16882,6 @@ packages: xmlchars@2.2.0: resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} - xmlhttprequest-ssl@2.0.0: - resolution: {integrity: sha512-QKxVRxiRACQcVuQEYFsI1hhkrMlrXHPegbbd1yn9UHOmRxY+si12nQYzri3vbzt8VdTTRviqcKxcyllFas5z2A==} - engines: {node: '>=0.4.0'} - xregexp@5.1.1: resolution: {integrity: sha512-fKXeVorD+CzWvFs7VBuKTYIW63YD1e1osxwQ8caZ6o1jg6pDAbABDG54LCIq0j5cy7PjRvGIq6sef9DYPXpncg==} @@ -17233,18 +16975,6 @@ packages: resolution: {integrity: sha512-zK7YHHz4ZXpW89AHXUPbQVGKI7uvkd3hzusTdotCg1UxyaVtg0zFJSTfW/Dq5f7OBBVnq6cZIaC8Ti4hb6dtCA==} engines: {node: '>= 14'} - zod-error@1.5.0: - resolution: {integrity: sha512-zzopKZ/skI9iXpqCEPj+iLCKl9b88E43ehcU+sbRoHuwGd9F1IDVGQ70TyO6kmfiRL1g4IXkjsXK+g1gLYl4WQ==} - - zod-validation-error@1.5.0: - resolution: {integrity: sha512-/7eFkAI4qV0tcxMBB/3+d2c1P6jzzZYdYSlBuAklzMuCrJu5bzJfHS0yVAS87dRHVlhftd6RFJDIvv03JgkSbw==} - engines: {node: '>=16.0.0'} - peerDependencies: - zod: ^3.18.0 - - zod@3.22.3: - resolution: {integrity: sha512-EjIevzuJRiRPbVH4mGc8nApb/lVLKVpmUhAaR5R5doKGfAnGJ6Gr3CViAVjP+4FWSxCsybeWQdcgCtbX+7oZug==} - zod@3.23.8: resolution: {integrity: sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==} @@ -17658,9 +17388,9 @@ snapshots: '@aws-crypto/sha256-browser': 3.0.0 '@aws-crypto/sha256-js': 3.0.0 '@aws-sdk/client-sso-oidc': 3.572.0 - '@aws-sdk/client-sts': 3.572.0 + '@aws-sdk/client-sts': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0) '@aws-sdk/core': 3.572.0 - '@aws-sdk/credential-provider-node': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0(@aws-sdk/client-sts@3.572.0))(@aws-sdk/client-sts@3.572.0) + '@aws-sdk/credential-provider-node': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0)(@aws-sdk/client-sts@3.572.0(@aws-sdk/client-sso-oidc@3.572.0)) '@aws-sdk/middleware-host-header': 3.567.0 '@aws-sdk/middleware-logger': 3.568.0 '@aws-sdk/middleware-recursion-detection': 3.567.0 @@ -17696,7 +17426,7 @@ snapshots: '@smithy/util-retry': 2.2.0 '@smithy/util-utf8': 2.3.0 '@smithy/util-waiter': 2.2.0 - tslib: 2.6.2 + tslib: 2.8.0 uuid: 9.0.1 transitivePeerDependencies: - aws-crt @@ -17768,7 +17498,7 @@ snapshots: dependencies: '@aws-crypto/sha256-browser': 3.0.0 '@aws-crypto/sha256-js': 3.0.0 - '@aws-sdk/client-sts': 3.572.0 + '@aws-sdk/client-sts': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0) '@aws-sdk/core': 3.572.0 '@aws-sdk/credential-provider-node': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0)(@aws-sdk/client-sts@3.572.0(@aws-sdk/client-sso-oidc@3.572.0)) '@aws-sdk/middleware-host-header': 3.567.0 @@ -17809,52 +17539,6 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/client-sso-oidc@3.572.0(@aws-sdk/client-sts@3.572.0)': - dependencies: - '@aws-crypto/sha256-browser': 3.0.0 - '@aws-crypto/sha256-js': 3.0.0 - '@aws-sdk/client-sts': 3.572.0 - '@aws-sdk/core': 3.572.0 - '@aws-sdk/credential-provider-node': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0(@aws-sdk/client-sts@3.572.0))(@aws-sdk/client-sts@3.572.0) - '@aws-sdk/middleware-host-header': 3.567.0 - '@aws-sdk/middleware-logger': 3.568.0 - '@aws-sdk/middleware-recursion-detection': 3.567.0 - '@aws-sdk/middleware-user-agent': 3.572.0 - '@aws-sdk/region-config-resolver': 3.572.0 - '@aws-sdk/types': 3.567.0 - '@aws-sdk/util-endpoints': 3.572.0 - '@aws-sdk/util-user-agent-browser': 3.567.0 - '@aws-sdk/util-user-agent-node': 3.568.0 - '@smithy/config-resolver': 2.2.0 - '@smithy/core': 1.4.2 - '@smithy/fetch-http-handler': 2.5.0 - '@smithy/hash-node': 2.2.0 - '@smithy/invalid-dependency': 2.2.0 - '@smithy/middleware-content-length': 2.2.0 - '@smithy/middleware-endpoint': 2.5.1 - '@smithy/middleware-retry': 2.3.1 - '@smithy/middleware-serde': 2.3.0 - '@smithy/middleware-stack': 2.2.0 - '@smithy/node-config-provider': 2.3.0 - '@smithy/node-http-handler': 2.5.0 - '@smithy/protocol-http': 3.3.0 - '@smithy/smithy-client': 2.5.1 - '@smithy/types': 2.12.0 - '@smithy/url-parser': 2.2.0 - '@smithy/util-base64': 2.3.0 - '@smithy/util-body-length-browser': 2.2.0 - '@smithy/util-body-length-node': 2.3.0 - '@smithy/util-defaults-mode-browser': 2.2.1 - '@smithy/util-defaults-mode-node': 2.3.1 - '@smithy/util-endpoints': 1.2.0 - '@smithy/util-middleware': 2.2.0 - '@smithy/util-retry': 2.2.0 - '@smithy/util-utf8': 2.3.0 - tslib: 2.6.2 - transitivePeerDependencies: - - '@aws-sdk/client-sts' - - aws-crt - '@aws-sdk/client-sso@3.572.0': dependencies: '@aws-crypto/sha256-browser': 3.0.0 @@ -17894,62 +17578,17 @@ snapshots: '@smithy/util-middleware': 2.2.0 '@smithy/util-retry': 2.2.0 '@smithy/util-utf8': 2.3.0 - tslib: 2.6.2 + tslib: 2.8.0 transitivePeerDependencies: - aws-crt - '@aws-sdk/client-sts@3.572.0': + '@aws-sdk/client-sts@3.572.0(@aws-sdk/client-sso-oidc@3.572.0)': dependencies: '@aws-crypto/sha256-browser': 3.0.0 '@aws-crypto/sha256-js': 3.0.0 '@aws-sdk/client-sso-oidc': 3.572.0 '@aws-sdk/core': 3.572.0 - '@aws-sdk/credential-provider-node': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0)(@aws-sdk/client-sts@3.572.0) - '@aws-sdk/middleware-host-header': 3.567.0 - '@aws-sdk/middleware-logger': 3.568.0 - '@aws-sdk/middleware-recursion-detection': 3.567.0 - '@aws-sdk/middleware-user-agent': 3.572.0 - '@aws-sdk/region-config-resolver': 3.572.0 - '@aws-sdk/types': 3.567.0 - '@aws-sdk/util-endpoints': 3.572.0 - '@aws-sdk/util-user-agent-browser': 3.567.0 - '@aws-sdk/util-user-agent-node': 3.568.0 - '@smithy/config-resolver': 2.2.0 - '@smithy/core': 1.4.2 - '@smithy/fetch-http-handler': 2.5.0 - '@smithy/hash-node': 2.2.0 - '@smithy/invalid-dependency': 2.2.0 - '@smithy/middleware-content-length': 2.2.0 - '@smithy/middleware-endpoint': 2.5.1 - '@smithy/middleware-retry': 2.3.1 - '@smithy/middleware-serde': 2.3.0 - '@smithy/middleware-stack': 2.2.0 - '@smithy/node-config-provider': 2.3.0 - '@smithy/node-http-handler': 2.5.0 - '@smithy/protocol-http': 3.3.0 - '@smithy/smithy-client': 2.5.1 - '@smithy/types': 2.12.0 - '@smithy/url-parser': 2.2.0 - '@smithy/util-base64': 2.3.0 - '@smithy/util-body-length-browser': 2.2.0 - '@smithy/util-body-length-node': 2.3.0 - '@smithy/util-defaults-mode-browser': 2.2.1 - '@smithy/util-defaults-mode-node': 2.3.1 - '@smithy/util-endpoints': 1.2.0 - '@smithy/util-middleware': 2.2.0 - '@smithy/util-retry': 2.2.0 - '@smithy/util-utf8': 2.3.0 - tslib: 2.6.2 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/client-sts@3.572.0(@aws-sdk/client-sso-oidc@3.572.0)': - dependencies: - '@aws-crypto/sha256-browser': 3.0.0 - '@aws-crypto/sha256-js': 3.0.0 - '@aws-sdk/client-sso-oidc': 3.572.0 - '@aws-sdk/core': 3.572.0 - '@aws-sdk/credential-provider-node': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0)(@aws-sdk/client-sts@3.572.0(@aws-sdk/client-sso-oidc@3.572.0)) + '@aws-sdk/credential-provider-node': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0)(@aws-sdk/client-sts@3.572.0(@aws-sdk/client-sso-oidc@3.572.0)) '@aws-sdk/middleware-host-header': 3.567.0 '@aws-sdk/middleware-logger': 3.568.0 '@aws-sdk/middleware-recursion-detection': 3.567.0 @@ -18004,7 +17643,7 @@ snapshots: '@aws-sdk/types': 3.567.0 '@smithy/property-provider': 2.2.0 '@smithy/types': 2.12.0 - tslib: 2.6.2 + tslib: 2.8.0 '@aws-sdk/credential-provider-http@3.568.0': dependencies: @@ -18016,24 +17655,7 @@ snapshots: '@smithy/smithy-client': 2.5.1 '@smithy/types': 2.12.0 '@smithy/util-stream': 2.2.0 - tslib: 2.6.2 - - '@aws-sdk/credential-provider-ini@3.572.0(@aws-sdk/client-sso-oidc@3.572.0(@aws-sdk/client-sts@3.572.0))(@aws-sdk/client-sts@3.572.0)': - dependencies: - '@aws-sdk/client-sts': 3.572.0 - '@aws-sdk/credential-provider-env': 3.568.0 - '@aws-sdk/credential-provider-process': 3.572.0 - '@aws-sdk/credential-provider-sso': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0(@aws-sdk/client-sts@3.572.0)) - '@aws-sdk/credential-provider-web-identity': 3.568.0(@aws-sdk/client-sts@3.572.0) - '@aws-sdk/types': 3.567.0 - '@smithy/credential-provider-imds': 2.3.0 - '@smithy/property-provider': 2.2.0 - '@smithy/shared-ini-file-loader': 2.4.0 - '@smithy/types': 2.12.0 - tslib: 2.6.2 - transitivePeerDependencies: - - '@aws-sdk/client-sso-oidc' - - aws-crt + tslib: 2.8.0 '@aws-sdk/credential-provider-ini@3.572.0(@aws-sdk/client-sso-oidc@3.572.0)(@aws-sdk/client-sts@3.572.0(@aws-sdk/client-sso-oidc@3.572.0))': dependencies: @@ -18047,45 +17669,9 @@ snapshots: '@smithy/property-provider': 2.2.0 '@smithy/shared-ini-file-loader': 2.4.0 '@smithy/types': 2.12.0 - tslib: 2.6.2 - transitivePeerDependencies: - - '@aws-sdk/client-sso-oidc' - - aws-crt - - '@aws-sdk/credential-provider-ini@3.572.0(@aws-sdk/client-sso-oidc@3.572.0)(@aws-sdk/client-sts@3.572.0)': - dependencies: - '@aws-sdk/client-sts': 3.572.0 - '@aws-sdk/credential-provider-env': 3.568.0 - '@aws-sdk/credential-provider-process': 3.572.0 - '@aws-sdk/credential-provider-sso': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0) - '@aws-sdk/credential-provider-web-identity': 3.568.0(@aws-sdk/client-sts@3.572.0) - '@aws-sdk/types': 3.567.0 - '@smithy/credential-provider-imds': 2.3.0 - '@smithy/property-provider': 2.2.0 - '@smithy/shared-ini-file-loader': 2.4.0 - '@smithy/types': 2.12.0 - tslib: 2.6.2 - transitivePeerDependencies: - - '@aws-sdk/client-sso-oidc' - - aws-crt - - '@aws-sdk/credential-provider-node@3.572.0(@aws-sdk/client-sso-oidc@3.572.0(@aws-sdk/client-sts@3.572.0))(@aws-sdk/client-sts@3.572.0)': - dependencies: - '@aws-sdk/credential-provider-env': 3.568.0 - '@aws-sdk/credential-provider-http': 3.568.0 - '@aws-sdk/credential-provider-ini': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0(@aws-sdk/client-sts@3.572.0))(@aws-sdk/client-sts@3.572.0) - '@aws-sdk/credential-provider-process': 3.572.0 - '@aws-sdk/credential-provider-sso': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0(@aws-sdk/client-sts@3.572.0)) - '@aws-sdk/credential-provider-web-identity': 3.568.0(@aws-sdk/client-sts@3.572.0) - '@aws-sdk/types': 3.567.0 - '@smithy/credential-provider-imds': 2.3.0 - '@smithy/property-provider': 2.2.0 - '@smithy/shared-ini-file-loader': 2.4.0 - '@smithy/types': 2.12.0 - tslib: 2.6.2 + tslib: 2.8.0 transitivePeerDependencies: - '@aws-sdk/client-sso-oidc' - - '@aws-sdk/client-sts' - aws-crt '@aws-sdk/credential-provider-node@3.572.0(@aws-sdk/client-sso-oidc@3.572.0)(@aws-sdk/client-sts@3.572.0(@aws-sdk/client-sso-oidc@3.572.0))': @@ -18107,45 +17693,13 @@ snapshots: - '@aws-sdk/client-sts' - aws-crt - '@aws-sdk/credential-provider-node@3.572.0(@aws-sdk/client-sso-oidc@3.572.0)(@aws-sdk/client-sts@3.572.0)': - dependencies: - '@aws-sdk/credential-provider-env': 3.568.0 - '@aws-sdk/credential-provider-http': 3.568.0 - '@aws-sdk/credential-provider-ini': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0)(@aws-sdk/client-sts@3.572.0) - '@aws-sdk/credential-provider-process': 3.572.0 - '@aws-sdk/credential-provider-sso': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0) - '@aws-sdk/credential-provider-web-identity': 3.568.0(@aws-sdk/client-sts@3.572.0) - '@aws-sdk/types': 3.567.0 - '@smithy/credential-provider-imds': 2.3.0 - '@smithy/property-provider': 2.2.0 - '@smithy/shared-ini-file-loader': 2.4.0 - '@smithy/types': 2.12.0 - tslib: 2.6.2 - transitivePeerDependencies: - - '@aws-sdk/client-sso-oidc' - - '@aws-sdk/client-sts' - - aws-crt - '@aws-sdk/credential-provider-process@3.572.0': dependencies: '@aws-sdk/types': 3.567.0 '@smithy/property-provider': 2.2.0 '@smithy/shared-ini-file-loader': 2.4.0 '@smithy/types': 2.12.0 - tslib: 2.6.2 - - '@aws-sdk/credential-provider-sso@3.572.0(@aws-sdk/client-sso-oidc@3.572.0(@aws-sdk/client-sts@3.572.0))': - dependencies: - '@aws-sdk/client-sso': 3.572.0 - '@aws-sdk/token-providers': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0(@aws-sdk/client-sts@3.572.0)) - '@aws-sdk/types': 3.567.0 - '@smithy/property-provider': 2.2.0 - '@smithy/shared-ini-file-loader': 2.4.0 - '@smithy/types': 2.12.0 - tslib: 2.6.2 - transitivePeerDependencies: - - '@aws-sdk/client-sso-oidc' - - aws-crt + tslib: 2.8.0 '@aws-sdk/credential-provider-sso@3.572.0(@aws-sdk/client-sso-oidc@3.572.0)': dependencies: @@ -18155,7 +17709,7 @@ snapshots: '@smithy/property-provider': 2.2.0 '@smithy/shared-ini-file-loader': 2.4.0 '@smithy/types': 2.12.0 - tslib: 2.6.2 + tslib: 2.8.0 transitivePeerDependencies: - '@aws-sdk/client-sso-oidc' - aws-crt @@ -18166,7 +17720,7 @@ snapshots: '@aws-sdk/types': 3.567.0 '@smithy/property-provider': 2.2.0 '@smithy/types': 2.12.0 - tslib: 2.6.2 + tslib: 2.8.0 '@aws-sdk/middleware-bucket-endpoint@3.568.0': dependencies: @@ -18287,15 +17841,6 @@ snapshots: '@smithy/types': 2.12.0 tslib: 2.6.2 - '@aws-sdk/token-providers@3.572.0(@aws-sdk/client-sso-oidc@3.572.0(@aws-sdk/client-sts@3.572.0))': - dependencies: - '@aws-sdk/client-sso-oidc': 3.572.0(@aws-sdk/client-sts@3.572.0) - '@aws-sdk/types': 3.567.0 - '@smithy/property-provider': 2.2.0 - '@smithy/shared-ini-file-loader': 2.4.0 - '@smithy/types': 2.12.0 - tslib: 2.6.2 - '@aws-sdk/token-providers@3.572.0(@aws-sdk/client-sso-oidc@3.572.0)': dependencies: '@aws-sdk/client-sso-oidc': 3.572.0 @@ -18303,7 +17848,7 @@ snapshots: '@smithy/property-provider': 2.2.0 '@smithy/shared-ini-file-loader': 2.4.0 '@smithy/types': 2.12.0 - tslib: 2.6.2 + tslib: 2.8.0 '@aws-sdk/types@3.567.0': dependencies: @@ -18312,7 +17857,7 @@ snapshots: '@aws-sdk/util-arn-parser@3.568.0': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@aws-sdk/util-endpoints@3.572.0': dependencies: @@ -18330,7 +17875,7 @@ snapshots: '@aws-sdk/util-locate-window@3.568.0': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@aws-sdk/util-user-agent-browser@3.567.0': dependencies: @@ -18348,7 +17893,7 @@ snapshots: '@aws-sdk/util-utf8-browser@3.259.0': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@aws-sdk/xml-builder@3.567.0': dependencies: @@ -18375,7 +17920,7 @@ snapshots: '@babel/traverse': 7.24.5 '@babel/types': 7.24.5 convert-source-map: 2.0.0 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) gensync: 1.0.0-beta.2 json5: 2.2.3 semver: 6.3.1 @@ -19174,7 +18719,7 @@ snapshots: '@babel/helper-split-export-declaration': 7.24.5 '@babel/parser': 7.24.5 '@babel/types': 7.24.5 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) globals: 11.12.0 transitivePeerDependencies: - supports-color @@ -19432,7 +18977,7 @@ snapshots: '@eslint/eslintrc@2.1.4': dependencies: ajv: 6.12.6 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) espree: 9.6.1 globals: 13.24.0 ignore: 5.3.1 @@ -19667,24 +19212,10 @@ snapshots: dependencies: '@fortawesome/fontawesome-common-types': 6.5.2 - '@google-cloud/precise-date@4.0.0': {} - '@graphql-typed-document-node/core@3.2.0(graphql@16.9.0)': dependencies: graphql: 16.9.0 - '@grpc/grpc-js@1.12.2': - dependencies: - '@grpc/proto-loader': 0.7.13 - '@js-sdsl/ordered-map': 4.4.2 - - '@grpc/proto-loader@0.7.13': - dependencies: - lodash.camelcase: 4.3.0 - long: 5.2.3 - protobufjs: 7.4.0 - yargs: 17.7.2 - '@hapi/hoek@9.3.0': {} '@hapi/topo@5.1.0': @@ -19698,7 +19229,7 @@ snapshots: '@humanwhocodes/config-array@0.11.14': dependencies: '@humanwhocodes/object-schema': 2.0.3 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -20180,14 +19711,6 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.4.15 - '@js-sdsl/ordered-map@4.4.2': {} - - '@kwsites/file-exists@1.1.1': - dependencies: - debug: 4.3.4(supports-color@5.5.0) - transitivePeerDependencies: - - supports-color - '@kwsites/file-exists@1.1.1(supports-color@8.1.1)': dependencies: debug: 4.3.4(supports-color@8.1.1) @@ -20500,170 +20023,8 @@ snapshots: '@open-draft/until@2.1.0': optional: true - '@opentelemetry/api-logs@0.52.1': - dependencies: - '@opentelemetry/api': 1.9.0 - - '@opentelemetry/api@1.9.0': {} - - '@opentelemetry/context-async-hooks@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - - '@opentelemetry/core@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/semantic-conventions': 1.25.1 - - '@opentelemetry/exporter-logs-otlp-http@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.1 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-exporter-base': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-logs': 0.52.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/exporter-trace-otlp-grpc@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@grpc/grpc-js': 1.12.2 - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-grpc-exporter-base': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/exporter-trace-otlp-http@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-exporter-base': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/exporter-trace-otlp-proto@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-exporter-base': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/exporter-zipkin@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.1 - - '@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.1 - '@types/shimmer': 1.2.0 - import-in-the-middle: 1.11.2 - require-in-the-middle: 7.4.0 - semver: 7.6.2 - shimmer: 1.2.1 - transitivePeerDependencies: - - supports-color - - '@opentelemetry/otlp-exporter-base@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/otlp-grpc-exporter-base@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@grpc/grpc-js': 1.12.2 - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-exporter-base': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/otlp-transformer@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.1 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-logs': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-metrics': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - protobufjs: 7.4.0 - - '@opentelemetry/propagator-b3@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/propagator-jaeger@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/resources@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.1 - - '@opentelemetry/sdk-logs@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.1 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/sdk-metrics@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - lodash.merge: 4.6.2 - - '@opentelemetry/sdk-node@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.1 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-trace-otlp-grpc': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-trace-otlp-http': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-trace-otlp-proto': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-zipkin': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-logs': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-metrics': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-node': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.1 - transitivePeerDependencies: - - supports-color - - '@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.1 - - '@opentelemetry/sdk-trace-node@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/context-async-hooks': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/propagator-b3': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/propagator-jaeger': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - semver: 7.6.2 - - '@opentelemetry/semantic-conventions@1.25.1': {} + '@opentelemetry/api@1.9.0': + optional: true '@pandacss/config@0.22.1': dependencies: @@ -20836,18 +20197,18 @@ snapshots: dependencies: asn1js: 3.0.5 pvtsutils: 1.3.5 - tslib: 2.6.2 + tslib: 2.8.0 '@peculiar/json-schema@1.1.12': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@peculiar/webcrypto@1.5.0': dependencies: '@peculiar/asn1-schema': 2.3.13 '@peculiar/json-schema': 1.1.12 pvtsutils: 1.3.5 - tslib: 2.6.2 + tslib: 2.8.0 webcrypto-core: 1.8.1 '@pkgjs/parseargs@0.11.0': @@ -20911,29 +20272,6 @@ snapshots: dependencies: '@prisma/debug': 5.13.0 - '@protobufjs/aspromise@1.1.2': {} - - '@protobufjs/base64@1.1.2': {} - - '@protobufjs/codegen@2.0.4': {} - - '@protobufjs/eventemitter@1.1.0': {} - - '@protobufjs/fetch@1.1.0': - dependencies: - '@protobufjs/aspromise': 1.1.2 - '@protobufjs/inquire': 1.1.0 - - '@protobufjs/float@1.0.2': {} - - '@protobufjs/inquire@1.1.0': {} - - '@protobufjs/path@1.1.2': {} - - '@protobufjs/pool@1.1.0': {} - - '@protobufjs/utf8@1.1.0': {} - '@radix-ui/colors@3.0.0': {} '@radix-ui/number@1.0.1': @@ -22327,7 +21665,7 @@ snapshots: '@serverless/dashboard-plugin@7.2.3(supports-color@8.1.1)': dependencies: '@aws-sdk/client-cloudformation': 3.572.0 - '@aws-sdk/client-sts': 3.572.0 + '@aws-sdk/client-sts': 3.572.0(@aws-sdk/client-sso-oidc@3.572.0) '@serverless/event-mocks': 1.1.1 '@serverless/platform-client': 4.5.1(supports-color@8.1.1) '@serverless/utils': 6.15.0 @@ -22501,16 +21839,16 @@ snapshots: '@smithy/abort-controller@2.2.0': dependencies: '@smithy/types': 2.12.0 - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/chunked-blob-reader-native@2.2.0': dependencies: '@smithy/util-base64': 2.3.0 - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/chunked-blob-reader@2.2.0': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/config-resolver@2.2.0': dependencies: @@ -22537,14 +21875,14 @@ snapshots: '@smithy/property-provider': 2.2.0 '@smithy/types': 2.12.0 '@smithy/url-parser': 2.2.0 - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/eventstream-codec@2.2.0': dependencies: '@aws-crypto/crc32': 3.0.0 '@smithy/types': 2.12.0 '@smithy/util-hex-encoding': 2.2.0 - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/eventstream-serde-browser@2.2.0': dependencies: @@ -22567,7 +21905,7 @@ snapshots: dependencies: '@smithy/eventstream-codec': 2.2.0 '@smithy/types': 2.12.0 - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/fetch-http-handler@2.5.0': dependencies: @@ -22604,7 +21942,7 @@ snapshots: '@smithy/is-array-buffer@2.2.0': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/md5-js@2.2.0': dependencies: @@ -22668,7 +22006,7 @@ snapshots: '@smithy/property-provider@2.2.0': dependencies: '@smithy/types': 2.12.0 - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/protocol-http@3.3.0': dependencies: @@ -22679,12 +22017,12 @@ snapshots: dependencies: '@smithy/types': 2.12.0 '@smithy/util-uri-escape': 2.2.0 - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/querystring-parser@2.2.0': dependencies: '@smithy/types': 2.12.0 - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/service-error-classification@2.1.5': dependencies: @@ -22693,7 +22031,7 @@ snapshots: '@smithy/shared-ini-file-loader@2.4.0': dependencies: '@smithy/types': 2.12.0 - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/signature-v4@2.3.0': dependencies: @@ -22703,7 +22041,7 @@ snapshots: '@smithy/util-middleware': 2.2.0 '@smithy/util-uri-escape': 2.2.0 '@smithy/util-utf8': 2.3.0 - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/smithy-client@2.5.1': dependencies: @@ -22741,11 +22079,11 @@ snapshots: '@smithy/util-buffer-from@2.2.0': dependencies: '@smithy/is-array-buffer': 2.2.0 - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/util-config-provider@2.3.0': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/util-defaults-mode-browser@2.2.1': dependencies: @@ -22773,12 +22111,12 @@ snapshots: '@smithy/util-hex-encoding@2.2.0': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/util-middleware@2.2.0': dependencies: '@smithy/types': 2.12.0 - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/util-retry@2.2.0': dependencies: @@ -22799,7 +22137,7 @@ snapshots: '@smithy/util-uri-escape@2.2.0': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@smithy/util-utf8@2.3.0': dependencies: @@ -22812,8 +22150,6 @@ snapshots: '@smithy/types': 2.12.0 tslib: 2.6.2 - '@socket.io/component-emitter@3.1.2': {} - '@storybook/addon-actions@8.1.0-alpha.6': dependencies: '@storybook/core-events': 8.1.0-alpha.6 @@ -24141,7 +23477,7 @@ snapshots: flat-cache: 3.2.0 micromatch: 4.0.8 react-docgen-typescript: 2.2.2(typescript@5.4.3) - tslib: 2.6.2 + tslib: 2.8.0 typescript: 5.4.3 webpack: 5.91.0(@swc/core@1.5.7)(esbuild@0.20.2) transitivePeerDependencies: @@ -24440,7 +23776,7 @@ snapshots: '@swc/helpers@0.5.5': dependencies: '@swc/counter': 0.1.3 - tslib: 2.6.2 + tslib: 2.8.0 '@swc/types@0.1.7': dependencies: @@ -24475,7 +23811,7 @@ snapshots: dependencies: '@babel/runtime': 7.24.5 content-type: 1.0.5 - tslib: 2.6.2 + tslib: 2.8.0 '@tanstack/history@1.31.16': {} @@ -24610,66 +23946,6 @@ snapshots: '@tokenizer/token@0.3.0': {} - '@trigger.dev/build@3.0.13(typescript@5.4.3)': - dependencies: - '@trigger.dev/core': 3.0.13 - pkg-types: 1.2.1 - tinyglobby: 0.2.10 - tsconfck: 3.1.3(typescript@5.4.3) - transitivePeerDependencies: - - bufferutil - - supports-color - - typescript - - utf-8-validate - - '@trigger.dev/core@3.0.13': - dependencies: - '@google-cloud/precise-date': 4.0.0 - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.1 - '@opentelemetry/exporter-logs-otlp-http': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-trace-otlp-http': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-logs': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-node': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-node': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.1 - execa: 8.0.1 - humanize-duration: 3.32.1 - nanoid: 3.3.7 - socket.io-client: 4.7.5 - superjson: 2.2.1 - zod: 3.22.3 - zod-error: 1.5.0 - zod-validation-error: 1.5.0(zod@3.22.3) - transitivePeerDependencies: - - bufferutil - - supports-color - - utf-8-validate - - '@trigger.dev/sdk@3.0.13': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.1 - '@opentelemetry/semantic-conventions': 1.25.1 - '@trigger.dev/core': 3.0.13 - chalk: 5.3.0 - cronstrue: 2.51.0 - debug: 4.3.7(supports-color@8.1.1) - evt: 2.5.7 - slug: 6.1.0 - terminal-link: 3.0.0 - ulid: 2.3.0 - uuid: 9.0.1 - ws: 8.18.0 - zod: 3.22.3 - transitivePeerDependencies: - - bufferutil - - supports-color - - utf-8-validate - '@trysound/sax@0.2.0': {} '@ts-morph/common@0.16.0': @@ -25033,11 +24309,11 @@ snapshots: '@types/react-dom@18.3.0': dependencies: - '@types/react': 18.3.1 + '@types/react': 18.3.3 '@types/react-test-renderer@18.3.0': dependencies: - '@types/react': 18.3.1 + '@types/react': 18.3.3 '@types/react@18.3.1': dependencies: @@ -25074,8 +24350,6 @@ snapshots: '@types/node': 20.12.12 '@types/send': 0.17.4 - '@types/shimmer@1.2.0': {} - '@types/stack-utils@2.0.3': {} '@types/statuses@2.0.5': @@ -25145,7 +24419,7 @@ snapshots: '@typescript-eslint/type-utils': 7.3.1(eslint@8.57.0)(typescript@5.4.3) '@typescript-eslint/utils': 7.3.1(eslint@8.57.0)(typescript@5.4.3) '@typescript-eslint/visitor-keys': 7.3.1 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) eslint: 8.57.0 graphemer: 1.4.0 ignore: 5.3.1 @@ -25163,7 +24437,7 @@ snapshots: '@typescript-eslint/types': 7.17.0 '@typescript-eslint/typescript-estree': 7.17.0(typescript@5.4.3) '@typescript-eslint/visitor-keys': 7.17.0 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) eslint: 8.57.0 optionalDependencies: typescript: 5.4.3 @@ -25176,7 +24450,7 @@ snapshots: '@typescript-eslint/types': 7.3.1 '@typescript-eslint/typescript-estree': 7.3.1(typescript@5.4.3) '@typescript-eslint/visitor-keys': 7.3.1 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) eslint: 8.57.0 optionalDependencies: typescript: 5.4.3 @@ -25699,23 +24973,23 @@ snapshots: '@wry/caches@1.0.1': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@wry/context@0.7.4': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@wry/equality@0.5.7': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@wry/trie@0.4.3': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@wry/trie@0.5.0': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@xobotyi/scrollbar-width@1.9.5': {} @@ -25749,7 +25023,7 @@ snapshots: tar: 6.2.1 tinylogic: 2.0.0 treeify: 1.1.0 - tslib: 2.6.2 + tslib: 2.8.0 tunnel: 0.0.6 transitivePeerDependencies: - typanion @@ -25766,7 +25040,7 @@ snapshots: '@yarnpkg/fslib@3.1.0': dependencies: - tslib: 2.6.2 + tslib: 2.8.0 '@yarnpkg/libzip@2.3.0': dependencies: @@ -25777,7 +25051,7 @@ snapshots: dependencies: '@types/emscripten': 1.39.11 '@yarnpkg/fslib': 3.1.0 - tslib: 2.6.2 + tslib: 2.8.0 '@yarnpkg/nm@4.0.2(typanion@3.14.0)': dependencies: @@ -25790,7 +25064,7 @@ snapshots: '@yarnpkg/parsers@3.0.2': dependencies: js-yaml: 3.14.1 - tslib: 2.6.2 + tslib: 2.8.0 '@yarnpkg/pnp@4.0.5': dependencies: @@ -25829,7 +25103,7 @@ snapshots: cross-spawn: 7.0.3 fast-glob: 3.3.2 micromatch: 4.0.8 - tslib: 2.6.2 + tslib: 2.8.0 transitivePeerDependencies: - typanion @@ -26683,10 +25957,6 @@ snapshots: dependencies: acorn: 8.11.3 - acorn-import-attributes@1.9.5(acorn@8.13.0): - dependencies: - acorn: 8.13.0 - acorn-jsx@5.3.2(acorn@7.4.1): dependencies: acorn: 7.4.1 @@ -27030,7 +26300,7 @@ snapshots: dependencies: pvtsutils: 1.3.5 pvutils: 1.1.3 - tslib: 2.6.2 + tslib: 2.8.0 assert@2.1.0: dependencies: @@ -27048,7 +26318,7 @@ snapshots: ast-types@0.16.1: dependencies: - tslib: 2.6.2 + tslib: 2.8.0 astral-regex@2.0.0: {} @@ -27476,7 +26746,7 @@ snapshots: camel-case@4.1.2: dependencies: pascal-case: 3.1.2 - tslib: 2.6.2 + tslib: 2.8.0 camelcase-css@2.0.1: {} @@ -27903,10 +27173,6 @@ snapshots: dependencies: is-what: 3.14.1 - copy-anything@3.0.5: - dependencies: - is-what: 4.1.16 - copy-to-clipboard@3.3.3: dependencies: toggle-selection: 1.0.6 @@ -28012,8 +27278,6 @@ snapshots: create-require@1.1.1: {} - cronstrue@2.51.0: {} - cross-spawn@5.1.0: dependencies: lru-cache: 4.1.5 @@ -28571,7 +27835,7 @@ snapshots: callsite: 1.0.0 camelcase: 6.3.0 cosmiconfig: 7.1.0 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) deps-regex: 0.2.0 findup-sync: 5.0.0 ignore: 5.3.1 @@ -28715,7 +27979,7 @@ snapshots: dot-case@3.0.4: dependencies: no-case: 3.0.4 - tslib: 2.6.2 + tslib: 2.8.0 dotenv-cli@6.0.0: dependencies: @@ -28801,20 +28065,6 @@ snapshots: fast-json-parse: 1.0.3 objectorarray: 1.0.5 - engine.io-client@6.5.4: - dependencies: - '@socket.io/component-emitter': 3.1.2 - debug: 4.3.7(supports-color@8.1.1) - engine.io-parser: 5.2.3 - ws: 8.17.1 - xmlhttprequest-ssl: 2.0.0 - transitivePeerDependencies: - - bufferutil - - supports-color - - utf-8-validate - - engine.io-parser@5.2.3: {} - enhanced-resolve@5.16.1: dependencies: graceful-fs: 4.2.11 @@ -29296,7 +28546,7 @@ snapshots: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.3 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) doctrine: 3.0.0 escape-string-regexp: 4.0.0 eslint-scope: 7.2.2 @@ -29434,12 +28684,6 @@ snapshots: md5.js: 1.3.5 safe-buffer: 5.2.1 - evt@2.5.7: - dependencies: - minimal-polyfills: 2.2.3 - run-exclusive: 2.2.19 - tsafe: 1.7.5 - execa@0.8.0: dependencies: cross-spawn: 5.1.0 @@ -29632,10 +28876,6 @@ snapshots: dependencies: pend: 1.2.0 - fdir@6.4.2(picomatch@4.0.2): - optionalDependencies: - picomatch: 4.0.2 - fecha@4.2.3: {} feed@4.2.2: @@ -29873,7 +29113,7 @@ snapshots: framer-motion@10.18.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: - tslib: 2.6.2 + tslib: 2.8.0 optionalDependencies: '@emotion/is-prop-valid': 0.8.8 react: 18.3.1 @@ -30171,7 +29411,7 @@ snapshots: graphql-tag@2.12.6(graphql@16.9.0): dependencies: graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.8.0 graphql-ws@5.16.0(graphql@16.9.0): dependencies: @@ -30630,8 +29870,6 @@ snapshots: human-signals@8.0.0: {} - humanize-duration@3.32.1: {} - humps@2.0.1: {} husky@8.0.3: {} @@ -30680,13 +29918,6 @@ snapshots: parent-module: 1.0.1 resolve-from: 4.0.0 - import-in-the-middle@1.11.2: - dependencies: - acorn: 8.13.0 - acorn-import-attributes: 1.9.5(acorn@8.13.0) - cjs-module-lexer: 1.3.1 - module-details-from-path: 1.0.3 - import-local@3.1.0: dependencies: pkg-dir: 4.2.0 @@ -30772,7 +30003,7 @@ snapshots: dependencies: '@ioredis/commands': 1.2.0 cluster-key-slot: 1.1.2 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) denque: 2.1.0 lodash.defaults: 4.2.0 lodash.isarguments: 3.1.0 @@ -31730,7 +30961,7 @@ snapshots: dependencies: copy-anything: 2.0.6 parse-node-version: 1.0.1 - tslib: 2.6.2 + tslib: 2.8.0 optionalDependencies: errno: 0.1.8 graceful-fs: 4.2.11 @@ -31772,7 +31003,7 @@ snapshots: dependencies: chalk: 5.3.0 commander: 11.0.0 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) execa: 7.2.0 lilconfig: 2.1.0 listr2: 6.6.1 @@ -31930,8 +31161,6 @@ snapshots: safe-stable-stringify: 2.4.3 triple-beam: 1.4.1 - long@5.2.3: {} - longest-streak@3.1.0: {} look-it-up@2.1.0: {} @@ -31950,7 +31179,7 @@ snapshots: lower-case@2.0.2: dependencies: - tslib: 2.6.2 + tslib: 2.8.0 lowercase-keys@2.0.0: {} @@ -32796,8 +32025,6 @@ snapshots: mini-svg-data-uri@1.4.4: {} - minimal-polyfills@2.2.3: {} - minimalistic-assert@1.0.1: {} minimalistic-crypto-utils@1.0.1: {} @@ -32865,8 +32092,6 @@ snapshots: pkg-types: 1.2.1 ufo: 1.5.4 - module-details-from-path@1.0.3: {} - moment@2.30.1: {} mri@1.2.0: {} @@ -32992,7 +32217,7 @@ snapshots: no-case@3.0.4: dependencies: lower-case: 2.0.2 - tslib: 2.6.2 + tslib: 2.8.0 node-abi@3.62.0: dependencies: @@ -33258,7 +32483,7 @@ snapshots: '@wry/caches': 1.0.1 '@wry/context': 0.7.4 '@wry/trie': 0.4.3 - tslib: 2.6.2 + tslib: 2.8.0 optionator@0.8.3: dependencies: @@ -33382,7 +32607,7 @@ snapshots: param-case@3.0.4: dependencies: dot-case: 3.0.4 - tslib: 2.6.2 + tslib: 2.8.0 parent-module@1.0.1: dependencies: @@ -33445,7 +32670,7 @@ snapshots: pascal-case@3.1.2: dependencies: no-case: 3.0.4 - tslib: 2.6.2 + tslib: 2.8.0 path-browserify@1.0.1: {} @@ -33535,8 +32760,6 @@ snapshots: picomatch@2.3.1: {} - picomatch@4.0.2: {} - pidtree@0.6.0: {} pify@2.3.0: {} @@ -34011,21 +33234,6 @@ snapshots: proto-list@1.2.4: {} - protobufjs@7.4.0: - dependencies: - '@protobufjs/aspromise': 1.1.2 - '@protobufjs/base64': 1.1.2 - '@protobufjs/codegen': 2.0.4 - '@protobufjs/eventemitter': 1.1.0 - '@protobufjs/fetch': 1.1.0 - '@protobufjs/float': 1.0.2 - '@protobufjs/inquire': 1.1.0 - '@protobufjs/path': 1.1.2 - '@protobufjs/pool': 1.1.0 - '@protobufjs/utf8': 1.1.0 - '@types/node': 20.12.12 - long: 5.2.3 - proxy-addr@2.0.7: dependencies: forwarded: 0.2.0 @@ -34079,7 +33287,7 @@ snapshots: pvtsutils@1.3.5: dependencies: - tslib: 2.6.2 + tslib: 2.8.0 pvutils@1.1.3: {} @@ -34323,7 +33531,7 @@ snapshots: dependencies: react: 18.3.1 react-style-singleton: 2.2.1(@types/react@18.3.1)(react@18.3.1) - tslib: 2.6.2 + tslib: 2.8.0 optionalDependencies: '@types/react': 18.3.1 @@ -34331,7 +33539,7 @@ snapshots: dependencies: react: 18.3.1 react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1) - tslib: 2.6.2 + tslib: 2.8.0 optionalDependencies: '@types/react': 18.3.3 @@ -34379,7 +33587,7 @@ snapshots: get-nonce: 1.0.1 invariant: 2.2.4 react: 18.3.1 - tslib: 2.6.2 + tslib: 2.8.0 optionalDependencies: '@types/react': 18.3.1 @@ -34388,7 +33596,7 @@ snapshots: get-nonce: 1.0.1 invariant: 2.2.4 react: 18.3.1 - tslib: 2.6.2 + tslib: 2.8.0 optionalDependencies: '@types/react': 18.3.3 @@ -34525,7 +33733,7 @@ snapshots: esprima: 4.0.1 source-map: 0.6.1 tiny-invariant: 1.3.3 - tslib: 2.6.2 + tslib: 2.8.0 redent@3.0.0: dependencies: @@ -34775,14 +33983,6 @@ snapshots: require-from-string@2.0.2: {} - require-in-the-middle@7.4.0: - dependencies: - debug: 4.3.7(supports-color@8.1.1) - module-details-from-path: 1.0.3 - resolve: 1.22.8 - transitivePeerDependencies: - - supports-color - require-package-name@2.0.1: {} requireindex@1.2.0: {} @@ -34955,10 +34155,6 @@ snapshots: run-async@2.4.1: {} - run-exclusive@2.2.19: - dependencies: - minimal-polyfills: 2.2.3 - run-parallel-limit@1.1.0: dependencies: queue-microtask: 1.2.3 @@ -34973,7 +34169,7 @@ snapshots: rxjs@7.8.1: dependencies: - tslib: 2.6.2 + tslib: 2.8.0 sade@1.8.1: dependencies: @@ -35297,8 +34493,6 @@ snapshots: '@shikijs/vscode-textmate': 9.3.0 '@types/hast': 3.0.4 - shimmer@1.2.1: {} - side-channel@1.0.6: dependencies: call-bind: 1.0.7 @@ -35322,14 +34516,6 @@ snapshots: once: 1.4.0 simple-concat: 1.0.1 - simple-git@3.24.0: - dependencies: - '@kwsites/file-exists': 1.1.1 - '@kwsites/promise-deferred': 1.1.1 - debug: 4.3.4(supports-color@5.5.0) - transitivePeerDependencies: - - supports-color - simple-git@3.24.0(supports-color@8.1.1): dependencies: '@kwsites/file-exists': 1.1.1(supports-color@8.1.1) @@ -35375,26 +34561,6 @@ snapshots: ansi-styles: 6.2.1 is-fullwidth-code-point: 4.0.0 - slug@6.1.0: {} - - socket.io-client@4.7.5: - dependencies: - '@socket.io/component-emitter': 3.1.2 - debug: 4.3.7(supports-color@8.1.1) - engine.io-client: 6.5.4 - socket.io-parser: 4.2.4 - transitivePeerDependencies: - - bufferutil - - supports-color - - utf-8-validate - - socket.io-parser@4.2.4: - dependencies: - '@socket.io/component-emitter': 3.1.2 - debug: 4.3.7(supports-color@8.1.1) - transitivePeerDependencies: - - supports-color - sonner@1.5.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: react: 18.3.1 @@ -35788,7 +34954,7 @@ snapshots: cosmiconfig: 9.0.0(typescript@5.4.3) css-functions-list: 3.2.2 css-tree: 2.3.1 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) fast-glob: 3.3.2 fastest-levenshtein: 1.0.16 file-entry-cache: 8.0.0 @@ -35859,10 +35025,6 @@ snapshots: transitivePeerDependencies: - supports-color - superjson@2.2.1: - dependencies: - copy-anything: 3.0.5 - supports-color@2.0.0: {} supports-color@4.5.0: @@ -35885,11 +35047,6 @@ snapshots: dependencies: has-flag: 4.0.0 - supports-hyperlinks@2.3.0: - dependencies: - has-flag: 4.0.0 - supports-color: 7.2.0 - supports-hyperlinks@3.0.0: dependencies: has-flag: 4.0.0 @@ -36086,11 +35243,6 @@ snapshots: type-fest: 0.16.0 unique-string: 2.0.0 - terminal-link@3.0.0: - dependencies: - ansi-escapes: 5.0.0 - supports-hyperlinks: 2.3.0 - terser-webpack-plugin@5.3.10(@swc/core@1.5.7)(esbuild@0.20.2)(webpack@5.91.0(@swc/core@1.5.7)(esbuild@0.20.2)): dependencies: '@jridgewell/trace-mapping': 0.3.25 @@ -36162,11 +35314,6 @@ snapshots: tinyexec@0.3.0: {} - tinyglobby@0.2.10: - dependencies: - fdir: 6.4.2(picomatch@4.0.2) - picomatch: 4.0.2 - tinylogic@2.0.0: {} tinypool@0.8.4: {} @@ -36301,7 +35448,7 @@ snapshots: ts-invariant@0.10.3: dependencies: - tslib: 2.6.2 + tslib: 2.8.0 ts-morph@15.1.0: dependencies: @@ -36403,16 +35550,10 @@ snapshots: ts-toolbelt@6.15.5: {} - tsafe@1.7.5: {} - tsconfck@2.1.2(typescript@5.4.3): optionalDependencies: typescript: 5.4.3 - tsconfck@3.1.3(typescript@5.4.3): - optionalDependencies: - typescript: 5.4.3 - tsconfig-paths-webpack-plugin@4.1.0: dependencies: chalk: 4.1.2 @@ -36450,7 +35591,7 @@ snapshots: bundle-require: 4.1.0(esbuild@0.20.2) cac: 6.7.14 chokidar: 3.6.0 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) esbuild: 0.20.2 execa: 5.1.1 globby: 11.1.0 @@ -36469,6 +35610,30 @@ snapshots: - supports-color - ts-node + tsup@8.0.2(@swc/core@1.5.7)(postcss@8.4.31)(ts-node@10.9.2(@swc/core@1.5.7)(@types/node@18.19.33)(typescript@5.4.3))(typescript@5.4.3): + dependencies: + bundle-require: 4.1.0(esbuild@0.20.2) + cac: 6.7.14 + chokidar: 3.6.0 + debug: 4.3.4(supports-color@8.1.1) + esbuild: 0.20.2 + execa: 5.1.1 + globby: 11.1.0 + joycon: 3.1.1 + postcss-load-config: 4.0.2(postcss@8.4.31)(ts-node@10.9.2(@swc/core@1.5.7)(@types/node@18.19.33)(typescript@5.4.3)) + resolve-from: 5.0.0 + rollup: 4.17.2 + source-map: 0.8.0-beta.0 + sucrase: 3.35.0 + tree-kill: 1.2.2 + optionalDependencies: + '@swc/core': 1.5.7(@swc/helpers@0.5.5) + postcss: 8.4.31 + typescript: 5.4.3 + transitivePeerDependencies: + - supports-color + - ts-node + tsutils@3.21.0(typescript@5.4.3): dependencies: tslib: 1.14.1 @@ -36631,8 +35796,6 @@ snapshots: uglify-js@3.17.4: optional: true - ulid@2.3.0: {} - unbox-primitive@1.0.2: dependencies: call-bind: 1.0.7 @@ -36834,14 +35997,14 @@ snapshots: use-callback-ref@1.3.2(@types/react@18.3.1)(react@18.3.1): dependencies: react: 18.3.1 - tslib: 2.6.2 + tslib: 2.8.0 optionalDependencies: '@types/react': 18.3.1 use-callback-ref@1.3.2(@types/react@18.3.3)(react@18.3.1): dependencies: react: 18.3.1 - tslib: 2.6.2 + tslib: 2.8.0 optionalDependencies: '@types/react': 18.3.3 @@ -36883,7 +36046,7 @@ snapshots: dependencies: detect-node-es: 1.1.0 react: 18.3.1 - tslib: 2.6.2 + tslib: 2.8.0 optionalDependencies: '@types/react': 18.3.1 @@ -36891,7 +36054,7 @@ snapshots: dependencies: detect-node-es: 1.1.0 react: 18.3.1 - tslib: 2.6.2 + tslib: 2.8.0 optionalDependencies: '@types/react': 18.3.3 @@ -36986,7 +36149,7 @@ snapshots: vite-node@1.6.0(@types/node@18.19.33)(less@4.2.0)(sass@1.77.0)(stylus@0.62.0)(terser@5.31.0): dependencies: cac: 6.7.14 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) pathe: 1.1.2 picocolors: 1.0.0 vite: 5.4.6(@types/node@18.19.33)(less@4.2.0)(sass@1.77.0)(stylus@0.62.0)(terser@5.31.0) @@ -37004,7 +36167,7 @@ snapshots: vite-node@1.6.0(@types/node@22.5.5)(less@4.2.0)(sass@1.77.0)(stylus@0.62.0)(terser@5.31.0): dependencies: cac: 6.7.14 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) pathe: 1.1.2 picocolors: 1.0.0 vite: 5.4.6(@types/node@22.5.5)(less@4.2.0)(sass@1.77.0)(stylus@0.62.0)(terser@5.31.0) @@ -37102,7 +36265,7 @@ snapshots: '@vitest/utils': 1.6.0 acorn-walk: 8.3.2 chai: 4.4.1 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) execa: 8.0.1 local-pkg: 0.5.0 magic-string: 0.30.10 @@ -37138,7 +36301,7 @@ snapshots: '@vitest/utils': 1.6.0 acorn-walk: 8.3.2 chai: 4.4.1 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.4(supports-color@8.1.1) execa: 8.0.1 local-pkg: 0.5.0 magic-string: 0.30.10 @@ -37524,8 +36687,6 @@ snapshots: ws@7.5.9: {} - ws@8.17.1: {} - ws@8.18.0: {} xml-js@1.6.11: @@ -37543,8 +36704,6 @@ snapshots: xmlchars@2.2.0: {} - xmlhttprequest-ssl@2.0.0: {} - xregexp@5.1.1: dependencies: '@babel/runtime-corejs3': 7.24.7 @@ -37630,16 +36789,6 @@ snapshots: compress-commons: 6.0.2 readable-stream: 4.5.2 - zod-error@1.5.0: - dependencies: - zod: 3.23.8 - - zod-validation-error@1.5.0(zod@3.22.3): - dependencies: - zod: 3.22.3 - - zod@3.22.3: {} - zod@3.23.8: {} zwitch@2.0.4: {}