Spaces:
Running
Running
File size: 1,863 Bytes
dd78cef 63292fe 2652021 c18889e a9d197e dd78cef 2652021 a9d197e dd78cef 2652021 a9d197e dd78cef 96c54a2 4b3cc5a e1133c6 f848200 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from openvino.runtime import Core
import gradio as gr
import numpy as np
from PIL import Image
import cv2
from torchvision import models,transforms
core = Core()
# Read model to OpenVINO Runtime
model_ir = core.read_model(model="Davinci_eye.xml")
compiled_model_ir = core.compile_model(model=model_ir, device_name='CPU')
tfms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # imagenet
])
def segment_image(filepath):
image=cv2.imread(filepath)
image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (512,512))
x=tfms(image.copy()/255.)
#ort_input={ort_session.get_inputs()[0].name:x.cpu().unsqueeze(0).float().numpy()}
#out=ort_session.run(None,ort_input)
out = compiled_model_ir([x.unsqueeze(0).float().cpu().numpy()])
pred_mask=np.squeeze(np.argmax(out[0],1)).astype(np.uint8)
color_mask=cv2.applyColorMap(pred_mask,cv2.COLORMAP_MAGMA)*10
masked_image=cv2.addWeighted(image,0.6,color_mask,0.4,0.1)
return Image.fromarray(masked_image),Image.fromarray((color_mask))
demo=gr.Interface(fn=segment_image,inputs=gr.Image(type='filepath'),
outputs=[gr.Image(type="pil"),gr.Image(type="pil")],
examples=["R001_ch1_video_03_00-29-13-03.jpg",
"R002_ch1_video_01_01-07-25-19.jpg",
"R003_ch1_video_05_00-22-42-23.jpg",
"R004_ch1_video_01_01-12-22-00.jpg",
"R005_ch1_video_03_00-19-10-11.jpg",
"R006_ch1_video_01_00-45-02-10.jpg",
"R013_ch1_video_03_00-40-17-11.jpg"],
themes=gr.themes.Glass(primary_hue=gr.themes.colors.blue,secondary_hue=gr.themes.colors.blue),
title="Davinci Eye(Quantized for CPU)")
demo.launch() |