Spaces:
Paused
Paused
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import transformers
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
# from transformers import pipeline
|
7 |
+
|
8 |
+
|
9 |
+
auth_token = os.environ['HF_TOKEN'] or True
|
10 |
+
|
11 |
+
model_id = "fcastanedo/energy_v1"
|
12 |
+
|
13 |
+
|
14 |
+
pipeline = transformers.pipeline(
|
15 |
+
"text-generation",
|
16 |
+
model=model_id,
|
17 |
+
model_kwargs={"torch_dtype": torch.bfloat16},
|
18 |
+
token=auth_token,
|
19 |
+
device="cuda",
|
20 |
+
)
|
21 |
+
|
22 |
+
|
23 |
+
messages = [
|
24 |
+
{
|
25 |
+
"role":"system",
|
26 |
+
"content":"You are an expert in Oil, Gas, and Petroleum for certifications like Petroleum Engineering Certificate (SPE). You will be provided Multiple Choice Questions. Select the correct response out of the four choices."
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"role":"user",
|
30 |
+
"content":"Who are you?"
|
31 |
+
}
|
32 |
+
]
|
33 |
+
|
34 |
+
prompt = pipeline.tokenizer.apply_chat_template(
|
35 |
+
messages,
|
36 |
+
tokenize=False,
|
37 |
+
add_generation_prompt=True,
|
38 |
+
)
|
39 |
+
|
40 |
+
terminators = [
|
41 |
+
pipeline.tokenizer.eos_token_id,
|
42 |
+
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
43 |
+
]
|
44 |
+
|
45 |
+
outputs = pipeline(
|
46 |
+
prompt,
|
47 |
+
max_new_tokens = 256,
|
48 |
+
eos_token_id = terminators,
|
49 |
+
do_sample = True,
|
50 |
+
temperature = 0.6,
|
51 |
+
top_p = 0.9,
|
52 |
+
)
|
53 |
+
|
54 |
+
|
55 |
+
def chat_function(message, history, system_prompt, max_new_tokens, temperature):
|
56 |
+
messages = [{"role":"system","content":system_prompt},
|
57 |
+
{"role":"user", "content":message}]
|
58 |
+
prompt = pipeline.tokenizer.apply_chat_template(
|
59 |
+
messages,
|
60 |
+
tokenize=False,
|
61 |
+
add_generation_prompt=True,)
|
62 |
+
terminators = [
|
63 |
+
pipeline.tokenizer.eos_token_id,
|
64 |
+
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")]
|
65 |
+
outputs = pipeline(
|
66 |
+
prompt,
|
67 |
+
max_new_tokens = max_new_tokens,
|
68 |
+
eos_token_id = terminators,
|
69 |
+
do_sample = True,
|
70 |
+
temperature = temperature + 0.1,
|
71 |
+
top_p = 0.9,)
|
72 |
+
return outputs[0]["generated_text"][len(prompt):]
|
73 |
+
|
74 |
+
|
75 |
+
gr.ChatInterface(
|
76 |
+
chat_function,
|
77 |
+
textbox=gr.Textbox(placeholder="Enter message here", container=False, scale = 7),
|
78 |
+
chatbot=gr.Chatbot(height=400),
|
79 |
+
additional_inputs=[
|
80 |
+
gr.Textbox("You are helpful AI", label="System Prompt"),
|
81 |
+
gr.Slider(500,4000, label="Max New Tokens"),
|
82 |
+
gr.Slider(0,1, label="Temperature")
|
83 |
+
]
|
84 |
+
).launch()
|