K00B404 commited on
Commit
25ddf29
·
verified ·
1 Parent(s): e9e5d40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -15
app.py CHANGED
@@ -1,21 +1,47 @@
1
  from huggingface_hub import InferenceClient
2
  import os
3
  api_key=os.getenv("HF_TOKEN")
 
 
 
 
 
 
4
  client = InferenceClient(api_key=api_key)
5
 
6
- messages = [
7
- { "role": "system", "content": "You are a good image generation prompt engineer for diffuser image generation models" },
8
- { "role": "user", "content": "Visualy describe a random character in extreme detail" }
9
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- result = client.chat.completions.create(
12
- model="HuggingFaceH4/zephyr-7b-beta",
13
- messages=messages,
14
- temperature=0.5,
15
- max_tokens=2048,
16
- top_p=0.7,
17
- stream=True
18
- )
19
- text=result#.choices[0].delta.content
20
- #for chunk in stream:
21
- #print(chunk.choices[0].delta.content)
 
1
  from huggingface_hub import InferenceClient
2
  import os
3
  api_key=os.getenv("HF_TOKEN")
4
+ import gradio as gr
5
+ from huggingface_hub import InferenceClient
6
+ import os
7
+
8
+ # Initialize Hugging Face Inference Client
9
+ api_key = os.getenv("HF_TOKEN")
10
  client = InferenceClient(api_key=api_key)
11
 
12
+ def chat_with_model(user_input):
13
+ """Send user input to the model and return its response."""
14
+ messages = [
15
+ {"role": "system", "content": "You are a good image generation prompt engineer for diffuser image generation models"},
16
+ {"role": "user", "content": user_input}
17
+ ]
18
+
19
+ try:
20
+ result = client.chat.completions.create(
21
+ model="HuggingFaceH4/zephyr-7b-beta",
22
+ messages=messages,
23
+ temperature=0.5,
24
+ max_tokens=2048,
25
+ top_p=0.7,
26
+ stream=False # Stream disabled for simplicity
27
+ )
28
+ return result["choices"][0]["message"]["content"]
29
+ except Exception as e:
30
+ return f"Error: {str(e)}"
31
+
32
+ # Gradio Interface
33
+ with gr.Blocks() as demo:
34
+ gr.Markdown("## Hugging Face Chatbot with Gradio")
35
+
36
+ with gr.Row():
37
+ with gr.Column():
38
+ user_input = gr.Textbox(label="Enter your prompt", placeholder="Describe the character or request a detailed description...")
39
+ submit_button = gr.Button("Generate")
40
+
41
+ with gr.Column():
42
+ output = gr.Textbox(label="Model Response", interactive=False)
43
+
44
+ submit_button.click(chat_with_model, inputs=[user_input], outputs=[output])
45
 
46
+ # Run the app
47
+ demo.launch()