dlimeng commited on
Commit
22d26a9
·
1 Parent(s): 551db13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -13
app.py CHANGED
@@ -1,12 +1,13 @@
1
  import gradio as gr
2
- import openai
3
  import matplotlib.pyplot as plt
4
  import io
5
  import numpy as np
6
  import base64
7
  from PIL import Image
 
 
8
 
9
- # Convert image to base64 for display in gradio
10
  def get_image_data(plt):
11
  buf = io.BytesIO()
12
  plt.savefig(buf, format='PNG')
@@ -14,26 +15,44 @@ def get_image_data(plt):
14
  img = Image.open(buf)
15
  return img
16
 
17
- # Execute Python code and generate images
18
  def execute_code(code):
19
  exec(code)
20
  return get_image_data(plt)
21
 
22
- def gpt_inference(prompt, model, openai_key):
23
- openai.api_key = openai_key
24
- response = openai.Completion.create(
25
- engine=model,
26
- prompt=prompt,
27
- max_tokens=100
28
- )
29
- code = response.choices[0].text.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  img = execute_code(code)
31
  return img
32
 
33
  iface = gr.Interface(
34
  fn=gpt_inference,
35
- inputs=["text", gr.inputs.Dropdown(choices=["gpt3.5-turbo", "gpt4"], label="Model"), "text"],
36
  outputs=gr.outputs.Image(type="pil"),
37
- input_labels=["Prompt", "Model", "OpenAI Key"]
38
  )
39
  iface.launch()
 
1
  import gradio as gr
 
2
  import matplotlib.pyplot as plt
3
  import io
4
  import numpy as np
5
  import base64
6
  from PIL import Image
7
+ import requests
8
+ import json
9
 
10
+ # 将图像转换为 base64,以便在 gradio 中显示
11
  def get_image_data(plt):
12
  buf = io.BytesIO()
13
  plt.savefig(buf, format='PNG')
 
15
  img = Image.open(buf)
16
  return img
17
 
18
+ # 执行 Python 代码并生成图像
19
  def execute_code(code):
20
  exec(code)
21
  return get_image_data(plt)
22
 
23
+ def gpt_inference(base_url, model, openai_key, prompt):
24
+
25
+ newprompt = f'Write Python code that does the following: \n\n{prompt}\n\nNote, the code is going to be executed in a Jupyter Python kernel.\n\nLast instruction, and this is the most important, just return code. No other outputs, as your full response will directly be executed in the kernel.'
26
+
27
+ headers = {
28
+ 'Content-Type': 'application/json',
29
+ 'Authorization': f'Bearer {openai_key}'
30
+ }
31
+
32
+ data = {
33
+ "model": model,
34
+ "messages": [
35
+ {
36
+ "role": "system",
37
+ "content": "You are a helpful assistant."
38
+ },
39
+ {
40
+ "role": "user",
41
+ "content": newprompt
42
+ }
43
+ ]
44
+ }
45
+
46
+ response = requests.post(f"{base_url}/v1/chat/completions", headers=headers, data=json.dumps(data))
47
+ response_json = response.json()
48
+ code = response_json['choices'][0]['message']['content'].strip()
49
  img = execute_code(code)
50
  return img
51
 
52
  iface = gr.Interface(
53
  fn=gpt_inference,
54
+ inputs=["text", gr.inputs.Dropdown(choices=["gpt3.5-turbo", "gpt4"], label="Model"), "text", "text"],
55
  outputs=gr.outputs.Image(type="pil"),
56
+ input_labels=["Base URL", "Model", "OpenAI Key","Prompt"]
57
  )
58
  iface.launch()