OmParkashPandeY's picture
Upload 6 files
3969d04
raw
history blame
3.15 kB
import os
import io
from IPython.display import Image
from PIL import Image
import base64
import gradio as gr
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
hf_api_key = os.environ['HF_API_KEY']
#### Helper function
import requests, json
#Here we are going to call multiple endpoints!
def get_completion(inputs, parameters=None, ENDPOINT_URL=""):
headers = {
"Authorization": f"Bearer {hf_api_key}",
"Content-Type": "application/json"
}
data = { "inputs": inputs }
if parameters is not None:
data.update({"parameters": parameters})
response = requests.request("POST",
ENDPOINT_URL,
headers=headers,
data=json.dumps(data))
return json.loads(response.content.decode("utf-8"))
#Here we are going to call multiple endpoints!
def image_completion(inputs, parameters=None, ENDPOINT_URL=""):
headers = {
"Authorization": f"Bearer {hf_api_key}",
"Content-Type": "application/json"
}
data = { "inputs": inputs }
if parameters is not None:
data.update({"parameters": parameters})
response = requests.request("POST",
ENDPOINT_URL,
headers=headers,
data=json.dumps(data))
return response.content
#text-to-image
TTI_ENDPOINT ="https://api-inference.huggingface.co/models/cloudqi/cqi_text_to_image_pt_v0"
#image-to-text
ITT_ENDPOINT = "https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-base"
#Bringing the functions from lessons 3 and 4!
def image_to_base64_str(pil_image):
byte_arr = io.BytesIO()
pil_image.save(byte_arr, format='PNG')
byte_arr = byte_arr.getvalue()
return str(base64.b64encode(byte_arr).decode('utf-8'))
def base64_to_pil(img_base64):
base64_decoded = base64.b64decode(img_base64)
byte_stream = io.BytesIO(base64_decoded)
pil_image = Image.open(byte_stream)
return pil_image
def captioner(image):
base64_image = image_to_base64_str(image)
result = get_completion(base64_image, None, ITT_ENDPOINT)
return result[0]['generated_text']
def generate(prompt):
output = image_completion(prompt, None, TTI_ENDPOINT)
result_image = Image.open(io.BytesIO(output))
print(result_image)
return result_image
def caption_and_generate(image):
caption = captioner(image)
image = generate(caption)
return [caption, image]
def loadGUI():
with gr.Blocks() as demo:
gr.Markdown("# Describe-and-Generate game πŸ–οΈ")
image_upload = gr.Image(label="Your first image",type="pil")
btn_all = gr.Button("Caption and generate")
caption = gr.Textbox(label="Generated caption")
image_output = gr.Image(label="Generated Image")
btn_all.click(fn=caption_and_generate, inputs=[image_upload], outputs=[caption, image_output])
gr.close_all()
demo.launch(share=True)
def main():
loadGUI()
if __name__ == "__main__":
main()