Spaces:
Build error
Build error
update app.py
Browse files
app.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import requests
|
3 |
+
import os
|
4 |
+
|
5 |
+
##Bloom
|
6 |
+
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
|
7 |
+
HF_TOKEN = os.environ["HF_TOKEN"]
|
8 |
+
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
9 |
+
|
10 |
+
#Testing various prompts initially
|
11 |
+
prompt1 = """
|
12 |
+
word: risk
|
13 |
+
poem using word: And then the day came,
|
14 |
+
when the risk
|
15 |
+
to remain tight
|
16 |
+
in a bud
|
17 |
+
was more painful
|
18 |
+
than the risk
|
19 |
+
it took
|
20 |
+
to blossom.
|
21 |
+
word: """
|
22 |
+
|
23 |
+
prompt2 = """
|
24 |
+
Q: Joy has 5 balls. He buys 2 more cans of balls. Each can has 3 balls. How many balls he has now?
|
25 |
+
A: Joy had 5 balls. 2 cans of 3 balls each is 6 balls. 5 + 6 = 11. Answer is 11.
|
26 |
+
Q: Jane has 16 balls. Half balls are golf balls, and half golf balls are red. How many red golf balls are there?
|
27 |
+
A: """
|
28 |
+
|
29 |
+
prompt3 = """Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?
|
30 |
+
A: Let’s think step by step.
|
31 |
+
"""
|
32 |
+
|
33 |
+
|
34 |
+
def text_generate(prompt):
|
35 |
+
|
36 |
+
#prints for debug
|
37 |
+
print(f"*****Inside poem_generate - Prompt is :{prompt}")
|
38 |
+
json_ = {"inputs": prompt,
|
39 |
+
"parameters":
|
40 |
+
{
|
41 |
+
"top_p": 0.9,
|
42 |
+
"temperature": 1.1,
|
43 |
+
"max_new_tokens": 250,
|
44 |
+
"return_full_text": True
|
45 |
+
}}
|
46 |
+
response = requests.post(API_URL, headers=headers, json=json_)
|
47 |
+
print(f"Response is : {response}")
|
48 |
+
output = response.json()
|
49 |
+
print(f"output is : {output}") #{output}")
|
50 |
+
output_tmp = output[0]['generated_text']
|
51 |
+
print(f"output_tmp is: {output_tmp}")
|
52 |
+
solution = output_tmp.split("\nQ:")[0] #output[0]['generated_text'].split("Q:")[0] # +"."
|
53 |
+
print(f"Final response after splits is: {solution}")
|
54 |
+
|
55 |
+
return solution
|
56 |
+
|
57 |
+
|
58 |
+
demo = gr.Blocks()
|
59 |
+
|
60 |
+
with demo:
|
61 |
+
gr.Markdown("<h1><center>Step By Step With Bloom</center></h1>")
|
62 |
+
gr.Markdown(
|
63 |
+
""" BigScienceW Bloom \n\n Large language models have demonstrated a capability of 'Chain-of-thought reasoning'. Some amazing researchers recently found out that by addding **Lets think step by step** it improves the model's zero-shot performance. Some might say — You can get good results out of LLMs if you know how to speak to them"""
|
64 |
+
)
|
65 |
+
with gr.Row():
|
66 |
+
example_prompt = gr.Radio( ["Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\nA: Let’s think step by step.\n"], label= "Choose a sample Prompt")
|
67 |
+
#input_word = gr.Textbox(placeholder="Enter a word here to generate text ...")
|
68 |
+
generated_txt = gr.Textbox(lines=7)
|
69 |
+
|
70 |
+
|
71 |
+
b1 = gr.Button("Generate Text")
|
72 |
+
b1.click(text_generate,inputs=example_prompt, outputs=generated_txt)
|
73 |
+
|
74 |
+
demo.launch(enable_queue=True, debug=True)
|