K00B404 commited on
Commit
de3eaf5
·
verified ·
1 Parent(s): 25ddf29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -8
app.py CHANGED
@@ -1,18 +1,29 @@
1
- from huggingface_hub import InferenceClient
2
- import os
3
- api_key=os.getenv("HF_TOKEN")
4
  import gradio as gr
5
  from huggingface_hub import InferenceClient
6
  import os
 
7
 
8
  # Initialize Hugging Face Inference Client
9
  api_key = os.getenv("HF_TOKEN")
10
  client = InferenceClient(api_key=api_key)
11
 
12
- def chat_with_model(user_input):
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  """Send user input to the model and return its response."""
14
  messages = [
15
- {"role": "system", "content": "You are a good image generation prompt engineer for diffuser image generation models"},
16
  {"role": "user", "content": user_input}
17
  ]
18
 
@@ -29,19 +40,38 @@ def chat_with_model(user_input):
29
  except Exception as e:
30
  return f"Error: {str(e)}"
31
 
 
 
 
 
 
 
 
 
 
 
32
  # Gradio Interface
33
  with gr.Blocks() as demo:
34
  gr.Markdown("## Hugging Face Chatbot with Gradio")
35
 
36
  with gr.Row():
37
  with gr.Column():
38
- user_input = gr.Textbox(label="Enter your prompt", placeholder="Describe the character or request a detailed description...")
 
 
 
 
39
  submit_button = gr.Button("Generate")
40
 
41
  with gr.Column():
42
- output = gr.Textbox(label="Model Response", interactive=False)
 
 
 
43
 
44
- submit_button.click(chat_with_model, inputs=[user_input], outputs=[output])
 
 
45
 
46
  # Run the app
47
  demo.launch()
 
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
4
+ import json
5
 
6
  # Initialize Hugging Face Inference Client
7
  api_key = os.getenv("HF_TOKEN")
8
  client = InferenceClient(api_key=api_key)
9
 
10
+ # Load or initialize system prompts
11
+ PROMPTS_FILE = "system_prompts.json"
12
+ if os.path.exists(PROMPTS_FILE):
13
+ with open(PROMPTS_FILE, "r") as file:
14
+ system_prompts = json.load(file)
15
+ else:
16
+ system_prompts = {"default": "You are a good image generation prompt engineer for diffuser image generation models"}
17
+
18
+ def save_prompts():
19
+ """Save the current system prompts to a JSON file."""
20
+ with open(PROMPTS_FILE, "w") as file:
21
+ json.dump(system_prompts, file, indent=4)
22
+
23
+ def chat_with_model(user_input, system_prompt):
24
  """Send user input to the model and return its response."""
25
  messages = [
26
+ {"role": "system", "content": system_prompt},
27
  {"role": "user", "content": user_input}
28
  ]
29
 
 
40
  except Exception as e:
41
  return f"Error: {str(e)}"
42
 
43
+ def update_prompt(name, content):
44
+ """Update or add a new system prompt."""
45
+ system_prompts[name] = content
46
+ save_prompts()
47
+ return f"System prompt '{name}' saved."
48
+
49
+ def get_prompt(name):
50
+ """Retrieve a system prompt by name."""
51
+ return system_prompts.get(name, "")
52
+
53
  # Gradio Interface
54
  with gr.Blocks() as demo:
55
  gr.Markdown("## Hugging Face Chatbot with Gradio")
56
 
57
  with gr.Row():
58
  with gr.Column():
59
+ system_prompt_name = gr.Dropdown(choices=list(system_prompts.keys()), label="Select System Prompt")
60
+ system_prompt_content = gr.TextArea(label="System Prompt", value="", lines=4)
61
+ save_prompt_button = gr.Button("Save System Prompt")
62
+
63
+ user_input = gr.TextArea(label="Enter your prompt", placeholder="Describe the character or request a detailed description...", lines=4)
64
  submit_button = gr.Button("Generate")
65
 
66
  with gr.Column():
67
+ output = gr.TextArea(label="Model Response", interactive=False, lines=10)
68
+
69
+ def load_prompt(name):
70
+ return get_prompt(name)
71
 
72
+ system_prompt_name.change(load_prompt, inputs=[system_prompt_name], outputs=[system_prompt_content])
73
+ save_prompt_button.click(update_prompt, inputs=[system_prompt_name, system_prompt_content], outputs=[])
74
+ submit_button.click(chat_with_model, inputs=[user_input, system_prompt_content], outputs=[output])
75
 
76
  # Run the app
77
  demo.launch()