Spaces:
Sleeping
Sleeping
Allen Park
commited on
Commit
•
210b40c
1
Parent(s):
d232ed1
add code to call lynx model
Browse files
app.py
CHANGED
@@ -1,7 +1,41 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
demo.launch()
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
3 |
import gradio as gr
|
4 |
|
5 |
+
PROMPT = """
|
6 |
+
Given the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning.
|
7 |
+
|
8 |
+
--
|
9 |
+
QUESTION (THIS DOES NOT COUNT AS BACKGROUND INFORMATION):
|
10 |
+
What word used is used to classify a group or family of related living
|
11 |
+
organisms; two examples being
|
12 |
+
Clytostoma from tropical America and
|
13 |
+
Syneilesis from East Asia?
|
14 |
+
|
15 |
+
--
|
16 |
+
DOCUMENT:
|
17 |
+
Clytostoma was a genus of woody-stemmed vines from tropical America, native to Argentina and the southern part of Brazil.Syneilesis is a genus of East Asian plants
|
18 |
+
in the groundsel tribe within the Asteraceae.
|
19 |
+
|
20 |
+
--
|
21 |
+
ANSWER:
|
22 |
+
The two examples are plants.
|
23 |
+
|
24 |
+
--
|
25 |
+
|
26 |
+
Your output should be in JSON FORMAT with the keys "REASONING" and "SCORE":
|
27 |
+
{{"REASONING": <your reasoning as bullet points>, "SCORE": <your final score>}}
|
28 |
+
"""
|
29 |
+
|
30 |
+
|
31 |
+
def greet(question, document, answer):
|
32 |
+
tokenizer = AutoTokenizer.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct")
|
33 |
+
model = AutoModelForCausalLM.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct", cache_dir='/tmp/cache', torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
34 |
+
inputs = tokenizer(PROMPT, return_tensors="pt")
|
35 |
+
model.generate(inputs)
|
36 |
+
generated_text = tokenizer.decode(inputs.input_ids[0])
|
37 |
+
print(generated_text)
|
38 |
+
return generated_text
|
39 |
|
40 |
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
41 |
demo.launch()
|