-
Notifications
You must be signed in to change notification settings - Fork 80
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[editor] Fix Model Search to Ignore Casing (#756)
# [editor] Fix Model Search to Ignore Casing Previously, the model search was doing exact match, including capitalization. So, searching 'hu' would not show the "HuggingFaceTextGenerationParser". Fix by making both search and model names lowercase when filtering: <img width="354" alt="Screenshot 2024-01-04 at 3 35 46 PM" src="https://github.com/lastmile-ai/aiconfig/assets/5060851/10baf717-6192-4de8-bab6-046f678ac9ef"> --- Stack created with [Sapling](https://sapling-scm.com). Best reviewed with [ReviewStack](https://reviewstack.dev/lastmile-ai/aiconfig/pull/756). * __->__ #756 * #755 * #754
- Loading branch information
Showing
5 changed files
with
144 additions
and
4 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
73 changes: 73 additions & 0 deletions
73
...ig/editor/client/src/shared/prompt_schemas/HuggingFaceTextGenerationParserPromptSchema.ts
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
import { PromptSchema } from "../../utils/promptUtils"; | ||
|
||
export const HuggingFaceTextGenerationParserPromptSchema: PromptSchema = { | ||
// See https://github.com/huggingface/huggingface_hub/blob/a331e82aad1bc63038194611236db28fa013814c/src/huggingface_hub/inference/_client.py#L1206 | ||
// for settings and https://huggingface.co/docs/api-inference/detailed_parameters for defaults. | ||
// The settings below are supported settings specified in the HuggingFaceTextGenerationParser | ||
// refine_chat_completion_params implementation. | ||
input: { | ||
type: "string", | ||
}, | ||
model_settings: { | ||
type: "object", | ||
properties: { | ||
model: { | ||
type: "string", | ||
}, | ||
temperature: { | ||
type: "number", | ||
minimum: 0, | ||
maximum: 1, | ||
}, | ||
top_k: { | ||
type: "integer", | ||
}, | ||
top_p: { | ||
type: "number", | ||
minimum: 0, | ||
maximum: 1, | ||
}, | ||
details: { | ||
type: "boolean", | ||
}, | ||
stream: { | ||
type: "boolean", | ||
}, | ||
do_sample: { | ||
type: "boolean", | ||
}, | ||
max_new_tokens: { | ||
type: "integer", | ||
}, | ||
best_of: { | ||
type: "integer", | ||
}, | ||
repetition_penalty: { | ||
type: "number", | ||
minimum: 0, | ||
maximum: 1, | ||
}, | ||
return_full_text: { | ||
type: "boolean", | ||
}, | ||
seed: { | ||
type: "integer", | ||
}, | ||
stop_sequences: { | ||
type: "array", | ||
items: { | ||
type: "string", | ||
}, | ||
}, | ||
truncate: { | ||
type: "integer", | ||
}, | ||
typical_p: { | ||
type: "number", | ||
}, | ||
watermark: { | ||
type: "boolean", | ||
}, | ||
}, | ||
}, | ||
}; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
61 changes: 61 additions & 0 deletions
61
python/src/aiconfig/editor/client/src/shared/prompt_schemas/PaLMChatParserPromptSchema.ts
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
import { PromptSchema } from "../../utils/promptUtils"; | ||
|
||
export const PaLMChatParserPromptSchema: PromptSchema = { | ||
// See https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/text-chat for settings | ||
// and defaults. The settings below are supported settings specified in the PaLMChatParser | ||
// refine_chat_completion_params implementation. | ||
input: { | ||
type: "string", | ||
}, | ||
model_settings: { | ||
type: "object", | ||
properties: { | ||
context: { | ||
type: "string", | ||
}, | ||
candidate_count: { | ||
type: "integer", | ||
minimum: 1, | ||
maximum: 4, | ||
}, | ||
temperature: { | ||
type: "number", | ||
minimum: 0, | ||
maximum: 1, | ||
}, | ||
top_p: { | ||
type: "number", | ||
minimum: 0, | ||
maximum: 1, | ||
}, | ||
top_k: { | ||
type: "integer", | ||
minimum: 1, | ||
maximum: 40, | ||
}, | ||
examples: { | ||
type: "array", | ||
items: { | ||
type: "object", | ||
required: ["input", "output"], | ||
properties: { | ||
input: { | ||
type: "string", | ||
}, | ||
output: { | ||
type: "string", | ||
}, | ||
}, | ||
}, | ||
}, | ||
}, | ||
}, | ||
prompt_metadata: { | ||
type: "object", | ||
properties: { | ||
remember_chat_context: { | ||
type: "boolean", | ||
}, | ||
}, | ||
}, | ||
}; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters