|
import requests |
|
import torch |
|
from PIL import Image |
|
from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation |
|
from IPython.display import display |
|
import torchvision.transforms as T |
|
import gradio as gr |
|
|
|
def greet(url): |
|
|
|
processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-large-cityscapes-semantic") |
|
model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-large-cityscapes-semantic") |
|
|
|
image = Image.open(requests.get(url, stream=True).raw) |
|
|
|
inputs = processor(images=image, return_tensors="pt") |
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
|
|
|
|
|
|
class_queries_logits = outputs.class_queries_logits |
|
masks_queries_logits = outputs.masks_queries_logits |
|
|
|
|
|
predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] |
|
|
|
|
|
|
|
|
|
predicted_semantic_map_scaled = (predicted_semantic_map - predicted_semantic_map.min()) / (predicted_semantic_map.max() - predicted_semantic_map.min()) * 255 |
|
predicted_semantic_map_uint8 = predicted_semantic_map_scaled.to(torch.uint8) |
|
|
|
tensor_to_pil = T.ToPILImage() |
|
image = tensor_to_pil(predicted_semantic_map_uint8) |
|
|
|
return image |
|
|
|
url = "http://www.apparelnews.co.kr/upfiles/manage/202302/5d5f694177b26fc86e5db623bf7ae4b7.jpg" |
|
|
|
|
|
iface = gr.Interface( |
|
fn=greet, |
|
inputs=gr.Image(value=url), |
|
live=True |
|
) |
|
|
|
iface.launch(debug = True) |
|
|