From a9784ad0196cdc62168b08d6685cfc773f2391eb Mon Sep 17 00:00:00 2001 From: Ryan Holinshead <> Date: Fri, 5 Jan 2024 14:49:51 -0500 Subject: [PATCH] [editor] Add AnyscaleEndpoint Prompt Schema --- .../AnyscaleEndpointPromptSchema.ts | 68 +++++++++++++++++++ .../editor/client/src/utils/promptUtils.ts | 4 ++ .../src/aiconfig/editor/travel.aiconfig.json | 37 ++++++++++ 3 files changed, 109 insertions(+) create mode 100644 python/src/aiconfig/editor/client/src/shared/prompt_schemas/AnyscaleEndpointPromptSchema.ts diff --git a/python/src/aiconfig/editor/client/src/shared/prompt_schemas/AnyscaleEndpointPromptSchema.ts b/python/src/aiconfig/editor/client/src/shared/prompt_schemas/AnyscaleEndpointPromptSchema.ts new file mode 100644 index 000000000..f3b7e4db8 --- /dev/null +++ b/python/src/aiconfig/editor/client/src/shared/prompt_schemas/AnyscaleEndpointPromptSchema.ts @@ -0,0 +1,68 @@ +import { PromptSchema } from "../../utils/promptUtils"; + +export const AnyscaleEndpointPromptSchema: PromptSchema = { + // See https://docs.anyscale.com/endpoints/model-serving/openai-migration-guide#step-3-check-parameter-compatibility + // for settings and defaults. The settings below are supported settings specified in the OpenAIInference + // refine_chat_completion_params implementation. + input: { + type: "string", + }, + model_settings: { + type: "object", + properties: { + model: { + type: "string", + }, + frequency_penalty: { + type: "number", + minimum: -2.0, + maximum: 2.0, + description: `Number between -2.0 and 2.0. + Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, + }, + max_tokens: { + type: "integer", + description: `The maximum number of tokens to generate in the chat completion.`, + }, + presence_penalty: { + type: "number", + minimum: -2.0, + maximum: 2.0, + description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, + increasing the model's likelihood to talk about new topics.`, + }, + stop: { + type: "array", + items: { + type: "string", + }, + description: `Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.`, + }, + stream: { + type: "boolean", + description: `If true, send messages token by token. If false, messages send in bulk.`, + }, + temperature: { + type: "number", + minimum: 0.0, + maximum: 2.0, + description: `A number between 0 and 2. Higher values correspond to more random responses and lower values being more deterministic.`, + }, + top_p: { + type: "number", + minimum: 0.0, + maximum: 1.0, + description: `The percentage of tokens with top_p probability mass to consider. + For example, 0.1 means only tokens comprising the top 10% probability mass become candidates.`, + }, + }, + }, + prompt_metadata: { + type: "object", + properties: { + remember_chat_context: { + type: "boolean", + }, + }, + }, +}; diff --git a/python/src/aiconfig/editor/client/src/utils/promptUtils.ts b/python/src/aiconfig/editor/client/src/utils/promptUtils.ts index 557bf3210..780dfa040 100644 --- a/python/src/aiconfig/editor/client/src/utils/promptUtils.ts +++ b/python/src/aiconfig/editor/client/src/utils/promptUtils.ts @@ -5,6 +5,7 @@ import { DalleImageGenerationParserPromptSchema } from "../shared/prompt_schemas import { PaLMTextParserPromptSchema } from "../shared/prompt_schemas/PaLMTextParserPromptSchema"; import { PaLMChatParserPromptSchema } from "../shared/prompt_schemas/PaLMChatParserPromptSchema"; import { HuggingFaceTextGenerationParserPromptSchema } from "../shared/prompt_schemas/HuggingFaceTextGenerationParserPromptSchema"; +import { AnyscaleEndpointPromptSchema } from "../shared/prompt_schemas/AnyscaleEndpointPromptSchema"; /** * Get the name of the model for the specified prompt. The name will either be specified in the prompt's @@ -76,6 +77,9 @@ export const PROMPT_SCHEMAS: Record = { // PaLMChatParser "models/chat-bison-001": PaLMChatParserPromptSchema, + + // AnyscaleEndpoint + AnyscaleEndpoint: AnyscaleEndpointPromptSchema, }; export type PromptInputSchema = diff --git a/python/src/aiconfig/editor/travel.aiconfig.json b/python/src/aiconfig/editor/travel.aiconfig.json index 3877af598..2b261a4a2 100644 --- a/python/src/aiconfig/editor/travel.aiconfig.json +++ b/python/src/aiconfig/editor/travel.aiconfig.json @@ -94,6 +94,43 @@ } } ] + }, + { + "name": "prompt_4", + "input": "test", + "metadata": { + "model": { + "name": "AnyscaleEndpoint", + "settings": { + "model": "meta-llama/Llama-2-7b-chat-hf" + } + }, + "parameters": {} + }, + "outputs": [ + { + "output_type": "execute_result", + "execution_count": 0, + "data": " Sure! Here are 5 questions to test your understanding of the conversation:\n\n1. What is the main topic of the conversation?\n2. What is the purpose of the meeting according to the speaker?\n3. What is the speaker's opinion on the proposed meeting time?\n4. How does the speaker feel about the potential attendees of the meeting?\n5. What is the speaker's recommended solution for the meeting time conflict?", + "metadata": { + "raw_response": { + "content": " Sure! Here are 5 questions to test your understanding of the conversation:\n\n1. What is the main topic of the conversation?\n2. What is the purpose of the meeting according to the speaker?\n3. What is the speaker's opinion on the proposed meeting time?\n4. How does the speaker feel about the potential attendees of the meeting?\n5. What is the speaker's recommended solution for the meeting time conflict?", + "role": "assistant" + }, + "id": "meta-llama/Llama-2-7b-chat-hf-JknFQzgthe2aABGHrYeW86GhXnUVx7fH9Z7JldFLe2g", + "created": 1704484147, + "model": "meta-llama/Llama-2-7b-chat-hf", + "object": "text_completion", + "usage": { + "completion_tokens": 97, + "prompt_tokens": 9, + "total_tokens": 106 + }, + "finish_reason": "stop", + "role": "assistant" + } + } + ] } ], "$schema": "https://json.schemastore.org/aiconfig-1.0"