iojw commited on
Commit
1d0039a
·
1 Parent(s): 047d586
Files changed (2) hide show
  1. app.py +59 -54
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,63 +1,68 @@
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
 
 
 
 
 
 
 
 
45
  demo = gr.ChatInterface(
46
- respond,
47
  additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
  gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
  ),
58
  ],
 
 
59
  )
60
 
61
-
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
+ import re
2
+
3
  import gradio as gr
4
+ from routellm.controller import Controller
5
+
6
+ TEMPERATURE = 0.8
7
+ THRESHOLD = 0.11593
8
+ ROUTER = "mf"
9
+
10
+ client = Controller(
11
+ routers=["mf"],
12
+ strong_model="gpt-4-1106-preview",
13
+ weak_model="anyscale/mistralai/Mixtral-8x7B-Instruct-v0.1",
14
+ )
15
+
16
+
17
+ def predict(message, history, threshold, temperature):
18
+ # Convert chat history to OpenAI format
19
+ history_openai_format = [
20
+ {"role": "system", "content": "You are a helpful AI assistant."}
21
+ ]
22
+ for human, assistant in history:
23
+ history_openai_format.append({"role": "user", "content": human})
24
+ history_openai_format.append(
25
+ {
26
+ "role": "assistant",
27
+ # Remove model name from response
28
+ "content": re.sub(r"^\[.*?\]\s*", "", assistant),
29
+ }
30
+ )
31
+ history_openai_format.append({"role": "user", "content": message})
32
+
33
+ # Create a chat completion request and send it to the API server
34
+ stream = client.chat.completions.create(
35
+ model=f"router-{ROUTER}-{threshold}", # Model name to use
36
+ messages=history_openai_format, # Chat history
37
+ temperature=temperature, # Temperature for text generation
38
+ stream=True, # Stream response
39
+ )
40
+ print(stream)
41
+
42
+ # Read and return generated text from response stream
43
+ partial_message = ""
44
+ for i, chunk in enumerate(stream):
45
+ print(chunk)
46
+ if i == 0:
47
+ model_prefix = f"[{chunk.model}]\n"
48
+ yield model_prefix
49
+ partial_message += model_prefix
50
+ partial_message += chunk.choices[0].delta.content or ""
51
+ yield partial_message
52
+
53
+
54
+ # Create and launch a chat interface with Gradio
55
  demo = gr.ChatInterface(
56
+ predict,
57
  additional_inputs=[
58
+ gr.Slider(label="Threshold", minimum=0, maximum=1, value=THRESHOLD, step=0.01),
 
 
59
  gr.Slider(
60
+ label="Temperature", minimum=0, maximum=1, value=TEMPERATURE, step=0.1
 
 
 
 
61
  ),
62
  ],
63
+ title="RouteLLM",
64
+ description="This is a demo of our matrix factorization router, calibrated so that approximately 50% of harder calls are routed to GPT-4, with remaining calls routed to Mixtral 8x7B.\nSee https://github.com/lm-sys/RouteLLM for details!",
65
  )
66
 
 
67
  if __name__ == "__main__":
68
+ demo.launch()
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- huggingface_hub==0.22.2
 
 
1
+ huggingface_hub==0.22.2
2
+ routellm[serve]