fantaxy commited on
Commit
ff1697a
โ€ข
1 Parent(s): 8b62ce7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -46
app.py CHANGED
@@ -1,33 +1,20 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
  import os
4
  import logging
5
- from gradio_client import Client # ์ด๋ฏธ์ง€ ์ƒ์„ฑ API ํด๋ผ์ด์–ธํŠธ
6
 
7
  # ๋กœ๊น… ์„ค์ •
8
  logging.basicConfig(level=logging.INFO)
9
 
10
- # ํ™˜๊ฒฝ ๋ณ€์ˆ˜์—์„œ Hugging Face API ํ† ํฐ์„ ๊ฐ€์ ธ์˜ต๋‹ˆ๋‹ค.
11
- hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus", token=os.getenv("HF_TOKEN"))
12
 
13
- # ์ด๋ฏธ์ง€ ์ƒ์„ฑ API ํด๋ผ์ด์–ธํŠธ ์„ค์ •
14
- client = Client("http://211.233.58.202:7960/")
15
-
16
- def respond(message, history, system_message, max_tokens, temperature, top_p):
17
- # ์ดˆ๊ธฐ ์„ค์ • ๋ฐ ๋ณ€์ˆ˜ ์ •์˜
18
- system_prefix = "System: ์ž…๋ ฅ์–ด์˜ ์–ธ์–ด์— ๋”ฐ๋ผ ๋™์ผํ•œ ์–ธ์–ด๋กœ ๋‹ต๋ณ€ํ•˜๋ผ."
19
- full_system_message = f"{system_prefix}{system_message}"
20
-
21
- messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}]
22
- for user_msg, assistant_msg in history:
23
- messages.append({"role": "user", "content": user_msg})
24
- if assistant_msg:
25
- messages.append({"role": "assistant", "content": assistant_msg})
26
- messages.append({"role": "user", "content": message})
27
-
28
- # ์ด๋ฏธ์ง€ ์ƒ์„ฑ ์š”์ฒญ
29
  try:
30
- result = client.predict(
 
31
  prompt=message,
32
  seed=123,
33
  randomize_seed=False,
@@ -37,35 +24,20 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
37
  num_inference_steps=28,
38
  api_name="/infer_t2i"
39
  )
40
- if 'url' in result:
41
- return result['url']
42
- else:
43
- logging.error("Image generation failed with error: %s", result.get('error', 'Unknown error'))
44
- return "Failed to generate image."
45
  except Exception as e:
46
  logging.error("Error during API request: %s", str(e))
47
- return f"An error occurred: {str(e)}"
48
-
49
- theme = "Nymbo/Nymbo_Theme"
50
- css = """
51
- footer {
52
- visibility: hidden;
53
- }
54
- """
55
 
56
- # Gradio ์ฑ„ํŒ… ์ธํ„ฐํŽ˜์ด์Šค๋ฅผ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค.
57
- demo = gr.ChatInterface(
58
  fn=respond,
59
- additional_inputs=[
60
- gr.Textbox(value="You are an AI assistant.", label="System Prompt"),
61
- gr.Slider(minimum=1, maximum=2000, value=512, step=1, label="Max new tokens"),
62
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
63
- gr.Slider(
64
- minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"
65
- ),
66
- ],
67
- theme=theme,
68
- css=css
69
  )
70
 
71
  if __name__ == "__main__":
 
1
  import gradio as gr
2
+ from gradio_client import Client
3
  import os
4
  import logging
 
5
 
6
  # ๋กœ๊น… ์„ค์ •
7
  logging.basicConfig(level=logging.INFO)
8
 
9
+ # API ํด๋ผ์ด์–ธํŠธ ์„ค์ •
10
+ api_client = Client("http://211.233.58.202:7960/")
11
 
12
+ def respond(message):
13
+ logging.info("Received message: %s", message)
14
+
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  try:
16
+ # ์ด๋ฏธ์ง€ ์ƒ์„ฑ ์š”์ฒญ
17
+ result = api_client.predict(
18
  prompt=message,
19
  seed=123,
20
  randomize_seed=False,
 
24
  num_inference_steps=28,
25
  api_name="/infer_t2i"
26
  )
27
+ logging.info("Image generated: %s", result['url'])
28
+
29
+ # ๊ฒฐ๊ณผ ์ด๋ฏธ์ง€ ๋ฐ˜ํ™˜
30
+ return result['url']
 
31
  except Exception as e:
32
  logging.error("Error during API request: %s", str(e))
33
+ return "Failed to generate image due to an error."
 
 
 
 
 
 
 
34
 
35
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์„ค์ •
36
+ demo = gr.Interface(
37
  fn=respond,
38
+ inputs=gr.Textbox(label="Enter your prompt for image generation"),
39
+ outputs=gr.Image(label="Generated Image"),
40
+ theme="Nymbo/Nymbo_Theme"
 
 
 
 
 
 
 
41
  )
42
 
43
  if __name__ == "__main__":