import gradio as gr import matplotlib.pyplot as plt import io import numpy as np from PIL import Image import requests import json import re # 执行 Python 代码并生成图像 def execute_code(code): namespace = {} exec(code, namespace) fig = namespace.get('fig') # Assume the code generates a matplotlib figure named 'fig' if fig: img = get_image_data(fig) img_byte_arr = io.BytesIO() img.save(img_byte_arr, format='PNG') img_byte_arr = img_byte_arr.getvalue() img_b64 = base64.b64encode(img_byte_arr).decode('utf-8') return img_b64 else: raise ValueError("The code did not generate a matplotlib figure named 'fig'") def gpt_inference(base_url, model, openai_key, prompt): newprompt = f'Write Python code that does the following: \n\n{prompt}\n\nNote, the code is going to be executed in a Jupyter Python kernel. The code should create a matplotlib figure and assign it to a variable named "fig". The "fig" variable will be used for further processing.\n\nLast instruction, and this is the most important, just return code. No other outputs, as your full response will directly be executed in the kernel.' data = { "model": model, "messages": [ { "role": "user", "content": newprompt } ], "temperature": 0.7, } headers = { "Content-Type": "application/json", "Authorization": f"Bearer {openai_key}", } response = requests.post(f"{base_url}/v1/chat/completions", headers=headers, data=json.dumps(data)) print("Status code:", response.status_code) print("Response JSON:", response.json()) code = response.json()["choices"][0]["message"]["content"] print(f"code:{code}") img = execute_code(code) return img iface = gr.Interface( fn=gpt_inference, inputs=["text", gr.inputs.Dropdown(choices=["gpt3.5-turbo", "gpt4"], label="Model"), "text", "text"], outputs=gr.Image(), input_labels=["Base URL", "Model", "OpenAI Key","Prompt"] ) iface.launch()