awacke1 commited on
Commit
861e7d1
1 Parent(s): efa10e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -58
app.py CHANGED
@@ -1,15 +1,11 @@
 
1
  import os
2
  import asyncio
3
  from concurrent.futures import ThreadPoolExecutor
4
  import requests
5
  import gradio as gr
6
 
7
- MAX_NEW_TOKENS = 256
8
- TOKEN = os.environ.get("HF_TOKEN", None)
9
- URLS = [
10
- "https://api-inference.huggingface.co/models/google/flan-ul2",
11
- "https://api-inference.huggingface.co/models/google/flan-t5-xxl",
12
- ]
13
  examples = [
14
  ["Please answer to the following question. Who is going to be the next Ballon d'or?"],
15
  ["Q: Can Barack Obama have a conversation with George Washington? Give the rationale before answering."],
@@ -30,6 +26,14 @@ description = "This demo compares [Flan-T5-xxl](https://huggingface.co/google/fl
30
 
31
 
32
 
 
 
 
 
 
 
 
 
33
  def fetch(session, text, api_url):
34
  model = api_url.split("/")[-1]
35
  response = session.post(api_url, json={"inputs": text, "parameters": {"max_new_tokens": MAX_NEW_TOKENS}})
@@ -37,6 +41,8 @@ def fetch(session, text, api_url):
37
  return model, None
38
  return model, response.json()
39
 
 
 
40
  async def inference(text):
41
  with ThreadPoolExecutor(max_workers=2) as executor:
42
  with requests.Session() as session:
@@ -61,55 +67,11 @@ async def inference(text):
61
  return responses
62
 
63
 
64
- def feedback(inputs, feedback, is_positive):
65
- with open('promptlog.txt', 'a') as f:
66
- f.write(f"Inputs: {inputs}\nFeedback: {feedback}\nIs positive: {is_positive}\n\n")
67
-
68
- def display_history():
69
- try:
70
- with open('promptlog.txt', 'r') as f:
71
- history = f.read()
72
- except FileNotFoundError:
73
- history = "No history yet."
74
- print(history)
75
-
76
- def app():
77
- title = "Flan UL2 vs Flan T5 XXL"
78
- description = "Compare with feedback: [Flan-T5-xxl](https://huggingface.co/google/flan-t5-xxl) and [Flan-UL2](https://huggingface.co/google/flan-ul2)."
79
- inputs = gr.inputs.Textbox(lines=3, label="Input Prompt")
80
- #outputs = [gr.outputs.Textbox(lines=3, label="Flan T5-UL2"), gr.outputs.Textbox(lines=3, label="Flan T5-XXL")]
81
- feedback_box = gr.inputs.CheckboxGroup(["Positive feedback", "Negative feedback"], label="Feedback")
82
- feedback_text = gr.inputs.Textbox(label="Feedback Reason")
83
- #feedback_button = gr.inputs.Button(label="Submit Feedback")
84
- #display_history_button = gr.inputs.Button(label="Display Feedback History")
85
-
86
- def predict_text(inputs):
87
- return inference(inputs)
88
-
89
- def handle_feedback(inputs, feedback, is_positive):
90
- feedback(inputs, feedback, is_positive)
91
- return "Thank you for your feedback!"
92
-
93
- def handle_display_history():
94
- display_history()
95
-
96
- #gr.Interface(fn=predict_text, inputs=inputs, outputs=outputs, title=title, description=description).launch()
97
-
98
- #feedback_ui = gr.Interface(fn=handle_feedback, inputs=[inputs, feedback_box, feedback_text, feedback_button], outputs=gr.outputs.Textbox(label="Feedback Submitted"), title="Feedback", description="Please provide feedback on the model's response.")
99
-
100
- #display_history_ui = gr.Interface(fn=handle_display_history, inputs=display_history_button, outputs=gr.outputs.Textbox(label="Feedback History"), title="Feedback History", description="View history of feedback submissions.")
101
-
102
- #gr.Interface([feedback_ui, display_history_ui], columns=2, title="Flan Feedback").launch()
103
-
104
-
105
- io = gr.Interface(
106
- inference,
107
- gr.Textbox(lines=3),
108
- outputs=[gr.Textbox(lines=3, label="Flan T5-UL2"), gr.Textbox(lines=3, label="Flan T5-XXL")],
109
- #title=title,
110
- description=description,
111
- examples=examples,
112
- )
113
- io.launch()
114
-
115
- app()
 
1
+
2
  import os
3
  import asyncio
4
  from concurrent.futures import ThreadPoolExecutor
5
  import requests
6
  import gradio as gr
7
 
8
+
 
 
 
 
 
9
  examples = [
10
  ["Please answer to the following question. Who is going to be the next Ballon d'or?"],
11
  ["Q: Can Barack Obama have a conversation with George Washington? Give the rationale before answering."],
 
26
 
27
 
28
 
29
+ MAX_NEW_TOKENS = 256
30
+ TOKEN = os.environ.get("API_TOKEN", None)
31
+ URLS = [
32
+ "https://api-inference.huggingface.co/models/google/flan-ul2",
33
+ "https://api-inference.huggingface.co/models/google/flan-t5-xxl",
34
+ ]
35
+
36
+
37
  def fetch(session, text, api_url):
38
  model = api_url.split("/")[-1]
39
  response = session.post(api_url, json={"inputs": text, "parameters": {"max_new_tokens": MAX_NEW_TOKENS}})
 
41
  return model, None
42
  return model, response.json()
43
 
44
+
45
+
46
  async def inference(text):
47
  with ThreadPoolExecutor(max_workers=2) as executor:
48
  with requests.Session() as session:
 
67
  return responses
68
 
69
 
70
+ io = gr.Interface(
71
+ inference,
72
+ gr.Textbox(lines=3),
73
+ outputs=[gr.Textbox(lines=3, label="Flan T5-UL2"), gr.Textbox(lines=3, label="Flan T5-XXL")],
74
+ description=description,
75
+ examples=examples,
76
+ )
77
+ io.launch()