Spaces:
Running
on
Zero
Running
on
Zero
Nihal Nayak
commited on
Commit
·
99d2247
1
Parent(s):
0e24f0d
add: bonito
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ from huggingface_hub import InferenceClient
|
|
5 |
"""
|
6 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
7 |
"""
|
8 |
-
client = InferenceClient("
|
9 |
|
10 |
|
11 |
@spaces.GPU
|
@@ -16,29 +16,30 @@ def respond(
|
|
16 |
temperature,
|
17 |
top_p,
|
18 |
):
|
19 |
-
|
|
|
|
|
20 |
|
21 |
-
|
22 |
-
# if val[0]:
|
23 |
-
# messages.append({"role": "user", "content": val[0]})
|
24 |
-
# if val[1]:
|
25 |
-
# messages.append({"role": "assistant", "content": val[1]})
|
26 |
-
messages = []
|
27 |
-
messages.append({"role": "user", "content": message})
|
28 |
|
29 |
-
response
|
|
|
|
|
30 |
|
31 |
-
|
32 |
-
messages,
|
33 |
-
max_tokens=max_tokens,
|
34 |
-
stream=True,
|
35 |
-
temperature=temperature,
|
36 |
-
top_p=top_p,
|
37 |
-
):
|
38 |
-
token = message.choices[0].delta.content
|
39 |
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
|
44 |
"""
|
@@ -77,6 +78,8 @@ task_types = [
|
|
77 |
"textual entailment",
|
78 |
"natural language inference",
|
79 |
]
|
|
|
|
|
80 |
|
81 |
demo = gr.Interface(
|
82 |
fn=respond,
|
|
|
5 |
"""
|
6 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
7 |
"""
|
8 |
+
client = InferenceClient("BatsResearch/bonito-v1")
|
9 |
|
10 |
|
11 |
@spaces.GPU
|
|
|
16 |
temperature,
|
17 |
top_p,
|
18 |
):
|
19 |
+
task_type = task_type.lower()
|
20 |
+
input_text = "<|tasktype|>\n" + task_type.strip()
|
21 |
+
input_text += "\n<|context|>\n" + message.strip() + "\n<|task|>\n"
|
22 |
|
23 |
+
response = client.text_generation(input_text, max_length=max_tokens, temperature=temperature, top_p=top_p)
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
+
return response
|
26 |
+
# messages = []
|
27 |
+
# messages.append({"role": "user", "content": message})
|
28 |
|
29 |
+
# response = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
+
|
32 |
+
# for message in client.text_generation(
|
33 |
+
# messages,
|
34 |
+
# max_tokens=max_tokens,
|
35 |
+
# stream=True,
|
36 |
+
# temperature=temperature,
|
37 |
+
# top_p=top_p,
|
38 |
+
# ):
|
39 |
+
# token = message.choices[0].delta.content
|
40 |
+
|
41 |
+
# response += token
|
42 |
+
# yield response
|
43 |
|
44 |
|
45 |
"""
|
|
|
78 |
"textual entailment",
|
79 |
"natural language inference",
|
80 |
]
|
81 |
+
# capitalize for better readability
|
82 |
+
task_types = [task_type.capitalize() for task_type in task_types]
|
83 |
|
84 |
demo = gr.Interface(
|
85 |
fn=respond,
|