gabrielmotablima commited on
Commit
e1e390a
1 Parent(s): 125cdef

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +13 -2
README.md CHANGED
@@ -72,7 +72,7 @@ model = VisionEncoderDecoderModel.from_pretrained("laicsiifes/swin-distilbertimb
72
  tokenizer = AutoTokenizer.from_pretrained("laicsiifes/swin-distilbertimbau")
73
  image_processor = AutoImageProcessor.from_pretrained("laicsiifes/swin-distilbertimbau")
74
 
75
- # perform inference on an image
76
  url = "http://images.cocodataset.org/val2017/000000039769.jpg"
77
  image = Image.open(requests.get(url, stream=True).raw)
78
  pixel_values = image_processor(image, return_tensors="pt").pixel_values
@@ -80,9 +80,20 @@ pixel_values = image_processor(image, return_tensors="pt").pixel_values
80
  # generate caption
81
  generated_ids = model.generate(pixel_values)
82
  generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
83
- print(generated_text)
84
  ```
85
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  ## 📈 Results
87
 
88
  The evaluation metrics Cider-D, BLEU@4, ROUGE-L, METEOR and BERTScore
 
72
  tokenizer = AutoTokenizer.from_pretrained("laicsiifes/swin-distilbertimbau")
73
  image_processor = AutoImageProcessor.from_pretrained("laicsiifes/swin-distilbertimbau")
74
 
75
+ # preprocess an image
76
  url = "http://images.cocodataset.org/val2017/000000039769.jpg"
77
  image = Image.open(requests.get(url, stream=True).raw)
78
  pixel_values = image_processor(image, return_tensors="pt").pixel_values
 
80
  # generate caption
81
  generated_ids = model.generate(pixel_values)
82
  generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
 
83
  ```
84
 
85
+ ```python
86
+ import matplotlib.pyplot as plt
87
+
88
+ # plot image with caption
89
+ plt.imshow(image)
90
+ plt.axis("off")
91
+ plt.title(generated_text)
92
+ plt.show()
93
+ ```
94
+
95
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/637a149c0dbdecf0b5bd6490/IU_mU3gEzAR9FLEq4TQKL.png)
96
+
97
  ## 📈 Results
98
 
99
  The evaluation metrics Cider-D, BLEU@4, ROUGE-L, METEOR and BERTScore