Skip to content

Commit

Permalink
ui
Browse files Browse the repository at this point in the history
  • Loading branch information
infwinston committed Nov 27, 2023
1 parent ed9ec32 commit fe1b9ef
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 31 deletions.
12 changes: 6 additions & 6 deletions fastchat/model/model_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,12 @@ def get_model_info(name: str) -> ModelInfo:
"https://huggingface.co/allenai/tulu-2-dpo-70b",
"Tulu 2 by UW/AllenAI",
)
register_model_info(
["yi-34b-chat"],
"Yi-Chat",
"https://huggingface.co/01-ai/Yi-34B-Chat",
"A large language model by 01 AI",
)
register_model_info(
[
"vicuna-33b",
Expand Down Expand Up @@ -404,9 +410,3 @@ def get_model_info(name: str) -> ModelInfo:
"Chat models developed by BAAI team",
)

register_model_info(
["Yi-34B-Chat"],
"Yi-Chat",
"https://huggingface.co/01-ai",
"A large language model by 01.AI.",
)
22 changes: 15 additions & 7 deletions fastchat/serve/gradio_block_arena_anony.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
acknowledgment_md,
ip_expiration_dict,
get_ip,
get_model_description_md,
)
from fastchat.utils import (
build_logger,
Expand Down Expand Up @@ -160,14 +161,16 @@ def share_click(state0, state1, model_selector0, model_selector1, request: gr.Re
SAMPLING_WEIGHTS = {
# tier 0
"gpt-4": 4,
"gpt-4-turbo": 8,
"gpt-4-turbo": 4,
"gpt-3.5-turbo": 2,
"gpt-3.5-turbo-1106": 4,
"claude-2.1": 4,
"claude-2.0": 4,
"claude-1": 4,
"claude-2.0": 2,
"claude-1": 2,
"claude-instant-1": 4,
"wizardlm-70b": 4,
"tulu-2-dpo-70b": 2,
"yi-34b-chat": 2,
"zephyr-7b-beta": 2,
"openchat-3.5": 2,
"chatglm3-6b": 2,
Expand All @@ -179,7 +182,6 @@ def share_click(state0, state1, model_selector0, model_selector1, request: gr.Re
"codellama-34b-instruct": 1.5,
"vicuna-33b": 4,
"vicuna-13b": 1.5,
"wizardlm-70b": 1.5,
"wizardlm-13b": 1.5,
"qwen-14b-chat": 1.5,
"mistral-7b-instruct": 1.5,
Expand Down Expand Up @@ -218,6 +220,7 @@ def share_click(state0, state1, model_selector0, model_selector1, request: gr.Re
"claude-instant-1": {"gpt-3.5-turbo", "claude-2.1"},
"deluxe-chat-v1.1": {"gpt-4", "gpt-4-turbo"},
"tulu-2-dpo-70b": {"gpt-3.5-turbo", "vicuna-33b", "claude-instant-1"},
"yi-34b-chat": {"gpt-3.5-turbo", "vicuna-33b", "claude-instant-1"},
"openchat-3.5": {"gpt-3.5-turbo", "llama-2-70b-chat", "zephyr-7b-beta"},
"chatglm3-6b": {"chatglm2-6b", "qwen-14b-chat", "gpt-3.5-turbo"},
"qwen-14b-chat": {"vicuna-13b", "llama-2-13b-chat", "llama-2-70b-chat"},
Expand Down Expand Up @@ -245,16 +248,17 @@ def share_click(state0, state1, model_selector0, model_selector1, request: gr.Re

SAMPLING_BOOST_MODELS = [
"tulu-2-dpo-70b",
"openchat-3.5",
#"gpt-4-turbo",
"yi-34b-chat",
"claude-2.1",
"wizardlm-70b",
#"openchat-3.5",
#"gpt-4-turbo",
#"claude-1",
]

# outage models won't be sampled.
OUTAGE_MODELS = [
"zephyr-7b-alpha",
"wizardlm-70b",
"falcon-180b-chat",
"deluxe-chat-v1.1",
"gpt-3.5-turbo-1106",
Expand Down Expand Up @@ -528,6 +532,10 @@ def build_side_by_side_ui_anony(models):
label="Max output tokens",
)

with gr.Accordion("Expand to see all model candidates", open=False):
model_description_md = get_model_description_md(models)
gr.Markdown(model_description_md, elem_id="model_description_markdown")

gr.Markdown(acknowledgment_md, elem_id="ack_markdown")

# Register listeners
Expand Down
8 changes: 6 additions & 2 deletions fastchat/serve/gradio_block_arena_named.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,9 +284,8 @@ def build_side_by_side_ui_named(models):
model_selectors = [None] * num_sides
chatbots = [None] * num_sides

model_description_md = get_model_description_md(models)
notice = gr.Markdown(
notice_markdown + model_description_md, elem_id="notice_markdown"
notice_markdown, elem_id="notice_markdown"
)

with gr.Box(elem_id="share-region-named"):
Expand All @@ -300,6 +299,10 @@ def build_side_by_side_ui_named(models):
show_label=False,
container=False,
)
with gr.Row():
with gr.Accordion("Expand to see model descriptions", open=False):
model_description_md = get_model_description_md(models)
gr.Markdown(model_description_md, elem_id="model_description_markdown")

with gr.Row():
for i in range(num_sides):
Expand Down Expand Up @@ -360,6 +363,7 @@ def build_side_by_side_ui_named(models):
label="Max output tokens",
)


gr.Markdown(acknowledgment_md, elem_id="ack_markdown")

# Register listeners
Expand Down
39 changes: 23 additions & 16 deletions fastchat/serve/gradio_web_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,6 +498,9 @@ def bot_response(state, temperature, top_p, max_new_tokens, request: gr.Request)
padding-top: 6px;
padding-bottom: 6px;
}
#model_description_markdown {
font-size: 110%
}
#leaderboard_markdown {
font-size: 110%
}
Expand Down Expand Up @@ -623,23 +626,27 @@ def build_single_model_ui(models, add_promotion_links=False):
"""

state = gr.State()
model_description_md = get_model_description_md(models)
gr.Markdown(notice_markdown + model_description_md, elem_id="notice_markdown")

with gr.Row(elem_id="model_selector_row"):
model_selector = gr.Dropdown(
choices=models,
value=models[0] if len(models) > 0 else "",
interactive=True,
show_label=False,
container=False,
gr.Markdown(notice_markdown, elem_id="notice_markdown")

with gr.Box(elem_id="share-region-named"):
with gr.Row(elem_id="model_selector_row"):
model_selector = gr.Dropdown(
choices=models,
value=models[0] if len(models) > 0 else "",
interactive=True,
show_label=False,
container=False,
)
with gr.Row():
with gr.Accordion("Expand to see model descriptions", open=False):
model_description_md = get_model_description_md(models)
gr.Markdown(model_description_md, elem_id="model_description_markdown")

chatbot = gr.Chatbot(
elem_id="chatbot",
label="Scroll down and start chatting",
height=550,
)

chatbot = gr.Chatbot(
elem_id="chatbot",
label="Scroll down and start chatting",
height=550,
)
with gr.Row():
textbox = gr.Textbox(
show_label=False,
Expand Down

0 comments on commit fe1b9ef

Please sign in to comment.