|
from huggingface_hub import from_pretrained_fastai |
|
import gradio as gr |
|
from fastai.vision.all import * |
|
|
|
|
|
|
|
repo_id = "igmarco/grapes-semanticsegmentation" |
|
|
|
learner = from_pretrained_fastai(repo_id) |
|
|
|
import torchvision.transforms as transforms |
|
|
|
def transform_image(image): |
|
my_transforms = transforms.Compose([transforms.ToTensor(), |
|
transforms.Normalize( |
|
[0.485, 0.456, 0.406], |
|
[0.229, 0.224, 0.225])]) |
|
image_aux = image |
|
return my_transforms(image_aux).unsqueeze(0).to(device) |
|
|
|
|
|
def predict(img): |
|
image = transforms.Resize((480,640))(img) |
|
tensor = transform_image(image=image) |
|
|
|
model.to(device) |
|
with torch.no_grad(): |
|
outputs = model(tensor) |
|
|
|
outputs = torch.argmax(outputs,1) |
|
|
|
mask = np.array(outputs.cpu()) |
|
mask[mask==0]=0 |
|
mask[mask==1]=150 |
|
mask[mask==2]=25 |
|
mask[mask==3]=74 |
|
mask[mask==4]=255 |
|
|
|
mask=np.reshape(mask,(480,640)) |
|
|
|
return(Image.fromarray(mask.astype('uint8'))) |
|
|
|
|
|
gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(480, 640)), outputs=gr.outputs.Image(type="pil"),examples=['grapes1.jpg','grapes2.jpg']).launch(share=False) |