patel18 commited on
Commit
efe939a
1 Parent(s): ffca997

Upload Gradio Examples.py

Browse files
Files changed (1) hide show
  1. Gradio Examples.py +246 -0
Gradio Examples.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # #### Gradio Comparing Transfer Learning Models
5
+
6
+ # In[1]:
7
+
8
+
9
+ import tensorflow as tf
10
+ print(tf.__version__)
11
+
12
+
13
+ # In[2]:
14
+
15
+
16
+ pip install gradio==1.6.0
17
+
18
+
19
+ # In[3]:
20
+
21
+
22
+ pip install MarkupSafe==2.1.1
23
+
24
+
25
+ # In[1]:
26
+
27
+
28
+ import gradio as gr
29
+ import tensorflow as tf
30
+ import numpy as np
31
+ from PIL import Image
32
+ import requests
33
+
34
+
35
+ # Download human-readable labels for ImageNet.
36
+ response = requests.get("https://git.io/JJkYN")
37
+ labels = response.text.split("\n")
38
+
39
+ mobile_net = tf.keras.applications.MobileNetV2()
40
+ inception_net = tf.keras.applications.InceptionV3()
41
+
42
+
43
+ # In[2]:
44
+
45
+
46
+ def classify_image_with_mobile_net(im):
47
+ im = Image.fromarray(im.astype('uint8'), 'RGB')
48
+ im = im.resize((224, 224))
49
+ arr = np.array(im).reshape((-1, 224, 224, 3))
50
+ arr = tf.keras.applications.mobilenet.preprocess_input(arr)
51
+ prediction = mobile_net.predict(arr).flatten()
52
+ return {labels[i]: float(prediction[i]) for i in range(1000)}
53
+
54
+
55
+
56
+ # In[3]:
57
+
58
+
59
+ def classify_image_with_inception_net(im):
60
+ # Resize the image to
61
+ im = Image.fromarray(im.astype('uint8'), 'RGB')
62
+ im = im.resize((299, 299))
63
+ arr = np.array(im).reshape((-1, 299, 299, 3))
64
+ arr = tf.keras.applications.inception_v3.preprocess_input(arr)
65
+ prediction = inception_net.predict(arr).flatten()
66
+ return {labels[i]: float(prediction[i]) for i in range(1000)}
67
+
68
+
69
+ # In[4]:
70
+
71
+
72
+ imagein = gr.inputs.Image()
73
+ label = gr.outputs.Label(num_top_classes=3)
74
+ sample_images = [
75
+ ["monkey.jpg"],
76
+ ["sailboat.jpg"],
77
+ ["bicycle.jpg"],
78
+ ["download.jpg"],
79
+ ]
80
+
81
+
82
+ # In[6]:
83
+
84
+
85
+ gr.Interface(
86
+ [classify_image_with_mobile_net, classify_image_with_inception_net],
87
+ imagein,
88
+ label,
89
+ title="MobileNet vs. InceptionNet",
90
+ description="""Let's compare 2 state-of-the-art machine learning models that classify images into one of 1,000 categories: MobileNet (top),
91
+ a lightweight model that has an accuracy of 0.704, vs. InceptionNet
92
+ (bottom), a much heavier model that has an accuracy of 0.779.""",
93
+ examples=sample_images).launch()
94
+
95
+
96
+ # In[6]:
97
+
98
+
99
+ pip install transformers
100
+
101
+
102
+ # In[6]:
103
+
104
+
105
+ import gradio as gr
106
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
107
+
108
+ # Load the models and tokenizers
109
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
110
+
111
+ tokenizer1 = AutoTokenizer.from_pretrained("textattack/bert-base-uncased-imdb")
112
+ tokenizer2 = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
113
+ model1 = AutoModelForSequenceClassification.from_pretrained("textattack/bert-base-uncased-imdb")
114
+ model2 = AutoModelForSequenceClassification.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
115
+
116
+
117
+
118
+
119
+ # Define the sentiment prediction functions
120
+ def predict_sentiment(text):
121
+ # Predict sentiment using model 1
122
+ inputs1 = tokenizer1.encode_plus(text, padding="longest", truncation=True, return_tensors="pt")
123
+ outputs1 = model1(**inputs1)
124
+ predicted_label1 = outputs1.logits.argmax().item()
125
+ sentiment1 = "Positive" if predicted_label1 == 1 else "Negative" if predicted_label1 == 0 else "Neutral"
126
+
127
+ # Predict sentiment using model 2
128
+ inputs2 = tokenizer2.encode_plus(text, padding="longest", truncation=True, return_tensors="pt")
129
+ outputs2 = model2(**inputs2)
130
+ predicted_label2 = outputs2.logits.argmax().item()
131
+ sentiment2 = "Positive" if predicted_label2 == 1 else "Negative" if predicted_label2 == 0 else "Neutral"
132
+
133
+ return sentiment1, sentiment2
134
+
135
+ # Create the Gradio interface
136
+ iface = gr.Interface(
137
+ fn=predict_sentiment,
138
+ inputs="text",
139
+ outputs=["text", "text"],
140
+ title="Sentiment Analysis (Model 1 vs Model 2)",
141
+ description="Compare sentiment predictions from two models.",
142
+ )
143
+
144
+ # Launch the interface
145
+ iface.launch()
146
+
147
+
148
+ # In[17]:
149
+
150
+
151
+ import gradio as gr
152
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
153
+ import torch
154
+ from torchvision import transforms
155
+ from io import BytesIO
156
+ from PIL import Image
157
+
158
+ # Define the available models and datasets
159
+ models = {
160
+ "Model 1": {
161
+ "model_name": "bert-base-uncased",
162
+ "tokenizer": None,
163
+ "model": None
164
+ },
165
+ "Model 2": {
166
+ "model_name": "distilbert-base-uncased",
167
+ "tokenizer": None,
168
+ "model": None
169
+ },
170
+ # Add more models as needed
171
+ }
172
+
173
+ datasets = {
174
+ "Dataset 1": {
175
+ "name": "imdb",
176
+ "split": "test",
177
+ "features": ["text"],
178
+ },
179
+ "Dataset 2": {
180
+ "name": "ag_news",
181
+ "split": "test",
182
+ "features": ["text"],
183
+ },
184
+ # Add more datasets as needed
185
+ }
186
+
187
+ # Load models
188
+ for model_key, model_info in models.items():
189
+ tokenizer = AutoTokenizer.from_pretrained(model_info["model_name"])
190
+ model = AutoModelForSequenceClassification.from_pretrained(model_info["model_name"])
191
+ model_info["tokenizer"] = tokenizer
192
+ model_info["model"] = model
193
+
194
+ # Set the device to GPU if available
195
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
196
+ for model_info in models.values():
197
+ model_info["model"].to(device)
198
+
199
+ # Define the preprocessing function
200
+ def preprocess(image_file):
201
+ image = Image.open(BytesIO(image_file.read())).convert("RGB")
202
+ preprocess_transform = transforms.Compose([
203
+ transforms.Resize((224, 224)),
204
+ transforms.ToTensor(),
205
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
206
+ ])
207
+ image = preprocess_transform(image)
208
+ image = image.unsqueeze(0)
209
+ return image.to(device)
210
+
211
+ # Define the prediction function
212
+ def predict(image_file, model_key):
213
+ model_info = models[model_key]
214
+ tokenizer = model_info["tokenizer"]
215
+ model = model_info["model"]
216
+
217
+ image = preprocess(image_file)
218
+
219
+ with torch.no_grad():
220
+ outputs = model(image)
221
+
222
+ predictions = outputs.logits.argmax(dim=1)
223
+
224
+ return predictions.item()
225
+
226
+ def classify_image(image, model_key):
227
+ image = Image.fromarray(image.astype('uint8'), 'RGB')
228
+ image_file = BytesIO()
229
+ image.save(image_file, format="JPEG")
230
+ prediction = predict(image_file=image_file, model_key=model_key)
231
+ return prediction
232
+
233
+ iface = gr.Interface(fn=classify_image,
234
+ inputs=["image", gr.inputs.Dropdown(list(models.keys()), label="Model")],
235
+ outputs="text",
236
+ title="Image Classification",
237
+ description="Classify images using Hugging Face models")
238
+
239
+ iface.launch()
240
+
241
+
242
+ # In[ ]:
243
+
244
+
245
+
246
+