from transformers_js import import_transformers_js, as_url
import gradio as gr
transformers = await import_transformers_js()
AutoProcessor = transformers.AutoProcessor
AutoModel = transformers.AutoModel
RawImage = transformers.RawImage
processor = await AutoProcessor.from_pretrained('Xenova/yolov9-c');
# TODO: Resize the input image
model = await AutoModel.from_pretrained('Xenova/yolov9-c');
async def detect(image_path):
image = await RawImage.read(image_path)
processed_input = await processor(image)
# Predict bounding boxes
result = await model(images=processed_input["pixel_values"]);
outputs = result["outputs"] # Tensor
np_outputs = outputs.numpy() # [xmin, ymin, xmax, ymax, score, id][]
gradio_labels = [
# List[Tuple[numpy.ndarray | Tuple[int, int, int, int], str]]
(
(
int(xmin),
int(ymin),
int(xmax),
int(ymax),
),
model.config.id2label[str(int(id))],
)
for xmin, ymin, xmax, ymax, score, id in np_outputs
]
annotated_image_data = image_path, gradio_labels
return annotated_image_data, np_outputs
demo = gr.Interface(
detect,
gr.Image(type="filepath"),
[
gr.AnnotatedImage(),
gr.JSON(),
],
examples=[
["cats.jpg"],
["city-streets.jpg"],
]
)
demo.launch()
transformers_js_py