|
import gradio as gr |
|
import matplotlib.pyplot as plt |
|
import io |
|
import numpy as np |
|
import base64 |
|
from PIL import Image |
|
import requests |
|
import json |
|
|
|
|
|
def get_image_data(plt): |
|
buf = io.BytesIO() |
|
plt.savefig(buf, format='PNG') |
|
buf.seek(0) |
|
img = Image.open(buf) |
|
return img |
|
|
|
|
|
def execute_code(code): |
|
exec(code) |
|
return get_image_data(plt) |
|
|
|
def gpt_inference(base_url, model, openai_key, prompt): |
|
|
|
newprompt = f'Write Python code that does the following: \n\n{prompt}\n\nNote, the code is going to be executed in a Jupyter Python kernel.\n\nLast instruction, and this is the most important, just return code. No other outputs, as your full response will directly be executed in the kernel.' |
|
|
|
headers = { |
|
'Content-Type': 'application/json', |
|
'Authorization': f'Bearer {openai_key}' |
|
} |
|
|
|
data = { |
|
"model": model, |
|
"messages": [ |
|
{ |
|
"role": "system", |
|
"content": "You are a helpful assistant." |
|
}, |
|
{ |
|
"role": "user", |
|
"content": newprompt |
|
} |
|
] |
|
} |
|
|
|
response = requests.post(f"{base_url}/v1/chat/completions", headers=headers, data=json.dumps(data)) |
|
print(f"xxxxxx: {response.text}") |
|
response_json = response.json() |
|
code = response_json['choices'][0]['message']['content'].strip() |
|
img = execute_code(code) |
|
return img |
|
|
|
iface = gr.Interface( |
|
fn=gpt_inference, |
|
inputs=["text", gr.inputs.Dropdown(choices=["gpt3.5-turbo", "gpt4"], label="Model"), "text", "text"], |
|
outputs=gr.outputs.Image(type="pil"), |
|
input_labels=["Base URL", "Model", "OpenAI Key","Prompt"] |
|
) |
|
iface.launch() |