from huggingface_hub import from_pretrained_fastai import gradio as gr from fastai.vision.all import * from icevision.all import * from icevision.models.checkpoint import * import PIL # class map > y con esto done; class_map = ClassMap(['raccoon','banana']) size = 384 model_type = models.ross.efficientdet model_2 = model_type.model( backbone= model_type.backbones.tf_d0 (pretrained=True), num_classes=len(class_map), img_size = size ) # load from model_repo: from huggingface_hub import hf_hub_download hf_hub_download(repo_id="Alesteba/deep_model_02", filename="ross_racoon.pth") state_dict = torch.load('./ross_racoon.pth', map_location=torch.device('cpu')) model_2.load_state_dict(state_dict) # use test img: infer_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(size),tfms.A.Normalize()]) def predict(img): img = PIL.Image.fromarray(img, "RGB") pred_dict_2 = model_type.end2end_detect( img, infer_tfms, model_2.to("cpu"), class_map=class_map, detection_threshold=0.5 ) return pred_dict_2["img"] gr.Interface( fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=[gr.outputs.Image(type="pil", label="VFNet Inference")], examples=['raccoon-test_1.jpg','raccoon-test_2.jpg'] ).launch(share=False)