Spaces:
Sleeping
Sleeping
hsuwill000
commited on
Commit
•
ab450a5
1
Parent(s):
5c8907e
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
+
from optimum.intel import OVModelForCausalLM
|
4 |
+
from transformers import AutoTokenizer, pipeline
|
5 |
+
|
6 |
+
# 載入模型和標記器
|
7 |
+
model_id = "hsuwill000/SmolLM2-135M-openvino"
|
8 |
+
model = OVModelForCausalLM.from_pretrained(model_id, device_map="auto")
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
10 |
+
|
11 |
+
# 建立生成管道
|
12 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
13 |
+
|
14 |
+
def respond(message, history):
|
15 |
+
# 將當前訊息與歷史訊息合併
|
16 |
+
input_text = message if not history else history[-1]["content"] + " " + message
|
17 |
+
input_text = message
|
18 |
+
# 獲取模型的回應
|
19 |
+
response = pipe(input_text, max_length=500, truncation=True, num_return_sequences=1)
|
20 |
+
reply = response[0]['generated_text']
|
21 |
+
|
22 |
+
# 返回新的消息格式
|
23 |
+
print(f"Message: {message}")
|
24 |
+
print(f"Reply: {reply}")
|
25 |
+
return reply
|
26 |
+
|
27 |
+
# 設定 Gradio 的聊天界面
|
28 |
+
demo = gr.ChatInterface(fn=respond, title="Chat with Qwen(通義千問) 2.5-0.5B", description="與 Qwen2.5-0.5B-Instruct-openvino 聊天!", type='messages')
|
29 |
+
|
30 |
+
if __name__ == "__main__":
|
31 |
+
demo.launch()
|