ludjan commited on
Commit
38783f2
β€’
1 Parent(s): 5056b33

Fixed local notebook setup

Browse files
.ipynb_checkpoints/Untitled-checkpoint.ipynb DELETED
@@ -1,6 +0,0 @@
1
- {
2
- "cells": [],
3
- "metadata": {},
4
- "nbformat": 4,
5
- "nbformat_minor": 5
6
- }
 
 
 
 
 
 
 
.ipynb_checkpoints/app-checkpoint.py DELETED
@@ -1,8 +0,0 @@
1
- import gradio as gr
2
-
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
-
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
8
-
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/notebook-checkpoint.ipynb ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "4b40cb3a-544a-4b23-8c00-431cb7133130",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "Python 3.11.5\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "%%bash\n",
19
+ "python --version"
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "code",
24
+ "execution_count": 2,
25
+ "id": "93e1afb8-f78c-4862-9d56-a06a3559b4d1",
26
+ "metadata": {},
27
+ "outputs": [],
28
+ "source": [
29
+ "#|default_exp app"
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": 3,
35
+ "id": "2adf2fa8-199b-48e4-a91c-9a093032480c",
36
+ "metadata": {},
37
+ "outputs": [],
38
+ "source": [
39
+ "#|export\n",
40
+ "from fastai.vision.all import *\n",
41
+ "import gradio as gr\n",
42
+ "import timm"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": 4,
48
+ "id": "f0218bf1-1836-4d7a-8d47-33584471f28b",
49
+ "metadata": {},
50
+ "outputs": [],
51
+ "source": [
52
+ "#|export\n",
53
+ "learn = load_learner('model.pkl')"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "code",
58
+ "execution_count": 5,
59
+ "id": "168ac2e4-f83b-4ce0-8f23-00999eb5d556",
60
+ "metadata": {},
61
+ "outputs": [],
62
+ "source": [
63
+ "#|export\n",
64
+ "categories = learn.dls.vocab\n",
65
+ "\n",
66
+ "def classify_image(img):\n",
67
+ " pred,idx,probs = learn.predict(img)\n",
68
+ " return dict(zip(categories, map(float,probs)))"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": 6,
74
+ "id": "d343a0d3-40fd-4502-a86b-cb3bac9fdf7f",
75
+ "metadata": {},
76
+ "outputs": [
77
+ {
78
+ "data": {
79
+ "text/html": [
80
+ "\n",
81
+ "<style>\n",
82
+ " /* Turns off some styling */\n",
83
+ " progress {\n",
84
+ " /* gets rid of default border in Firefox and Opera. */\n",
85
+ " border: none;\n",
86
+ " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
87
+ " background-size: auto;\n",
88
+ " }\n",
89
+ " progress:not([value]), progress:not([value])::-webkit-progress-bar {\n",
90
+ " background: repeating-linear-gradient(45deg, #7e7e7e, #7e7e7e 10px, #5c5c5c 10px, #5c5c5c 20px);\n",
91
+ " }\n",
92
+ " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
93
+ " background: #F44336;\n",
94
+ " }\n",
95
+ "</style>\n"
96
+ ],
97
+ "text/plain": [
98
+ "<IPython.core.display.HTML object>"
99
+ ]
100
+ },
101
+ "metadata": {},
102
+ "output_type": "display_data"
103
+ },
104
+ {
105
+ "data": {
106
+ "text/html": [],
107
+ "text/plain": [
108
+ "<IPython.core.display.HTML object>"
109
+ ]
110
+ },
111
+ "metadata": {},
112
+ "output_type": "display_data"
113
+ },
114
+ {
115
+ "name": "stdout",
116
+ "output_type": "stream",
117
+ "text": [
118
+ "images/unicycle.jpeg is a tricycle\n"
119
+ ]
120
+ },
121
+ {
122
+ "data": {
123
+ "text/html": [
124
+ "\n",
125
+ "<style>\n",
126
+ " /* Turns off some styling */\n",
127
+ " progress {\n",
128
+ " /* gets rid of default border in Firefox and Opera. */\n",
129
+ " border: none;\n",
130
+ " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
131
+ " background-size: auto;\n",
132
+ " }\n",
133
+ " progress:not([value]), progress:not([value])::-webkit-progress-bar {\n",
134
+ " background: repeating-linear-gradient(45deg, #7e7e7e, #7e7e7e 10px, #5c5c5c 10px, #5c5c5c 20px);\n",
135
+ " }\n",
136
+ " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
137
+ " background: #F44336;\n",
138
+ " }\n",
139
+ "</style>\n"
140
+ ],
141
+ "text/plain": [
142
+ "<IPython.core.display.HTML object>"
143
+ ]
144
+ },
145
+ "metadata": {},
146
+ "output_type": "display_data"
147
+ },
148
+ {
149
+ "data": {
150
+ "text/html": [],
151
+ "text/plain": [
152
+ "<IPython.core.display.HTML object>"
153
+ ]
154
+ },
155
+ "metadata": {},
156
+ "output_type": "display_data"
157
+ },
158
+ {
159
+ "name": "stdout",
160
+ "output_type": "stream",
161
+ "text": [
162
+ "images/bicycle.jpeg is a bicycle\n"
163
+ ]
164
+ },
165
+ {
166
+ "data": {
167
+ "text/html": [
168
+ "\n",
169
+ "<style>\n",
170
+ " /* Turns off some styling */\n",
171
+ " progress {\n",
172
+ " /* gets rid of default border in Firefox and Opera. */\n",
173
+ " border: none;\n",
174
+ " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
175
+ " background-size: auto;\n",
176
+ " }\n",
177
+ " progress:not([value]), progress:not([value])::-webkit-progress-bar {\n",
178
+ " background: repeating-linear-gradient(45deg, #7e7e7e, #7e7e7e 10px, #5c5c5c 10px, #5c5c5c 20px);\n",
179
+ " }\n",
180
+ " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
181
+ " background: #F44336;\n",
182
+ " }\n",
183
+ "</style>\n"
184
+ ],
185
+ "text/plain": [
186
+ "<IPython.core.display.HTML object>"
187
+ ]
188
+ },
189
+ "metadata": {},
190
+ "output_type": "display_data"
191
+ },
192
+ {
193
+ "data": {
194
+ "text/html": [],
195
+ "text/plain": [
196
+ "<IPython.core.display.HTML object>"
197
+ ]
198
+ },
199
+ "metadata": {},
200
+ "output_type": "display_data"
201
+ },
202
+ {
203
+ "name": "stdout",
204
+ "output_type": "stream",
205
+ "text": [
206
+ "images/tricycle.png is a unicycle\n"
207
+ ]
208
+ }
209
+ ],
210
+ "source": [
211
+ "# Upload your own images and link them\n",
212
+ "examples = ['images/unicycle.jpeg', 'images/bicycle.jpeg', 'images/tricycle.png']\n",
213
+ "\n",
214
+ "for example in examples:\n",
215
+ " image = PILImage.create(example)\n",
216
+ " res_dict = classify_image(image)\n",
217
+ " top = max(res_dict, key=res_dict.get)\n",
218
+ "\n",
219
+ " print(example + ' is a '+ top)"
220
+ ]
221
+ },
222
+ {
223
+ "cell_type": "code",
224
+ "execution_count": 7,
225
+ "id": "156a1fa0-e124-4a18-b411-367e7926afa4",
226
+ "metadata": {},
227
+ "outputs": [
228
+ {
229
+ "name": "stdout",
230
+ "output_type": "stream",
231
+ "text": [
232
+ "Running on local URL: http://127.0.0.1:7860\n",
233
+ "\n",
234
+ "To create a public link, set `share=True` in `launch()`.\n"
235
+ ]
236
+ },
237
+ {
238
+ "data": {
239
+ "text/html": [
240
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
241
+ ],
242
+ "text/plain": [
243
+ "<IPython.core.display.HTML object>"
244
+ ]
245
+ },
246
+ "metadata": {},
247
+ "output_type": "display_data"
248
+ },
249
+ {
250
+ "data": {
251
+ "text/plain": []
252
+ },
253
+ "execution_count": 7,
254
+ "metadata": {},
255
+ "output_type": "execute_result"
256
+ }
257
+ ],
258
+ "source": [
259
+ "#|export\n",
260
+ "\n",
261
+ "image = gr.Image()\n",
262
+ "label = gr.Label()\n",
263
+ "\n",
264
+ "intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)\n",
265
+ "intf.launch()"
266
+ ]
267
+ },
268
+ {
269
+ "cell_type": "code",
270
+ "execution_count": 8,
271
+ "id": "2894f2be-e453-4795-8a16-2aa4770aa16d",
272
+ "metadata": {},
273
+ "outputs": [],
274
+ "source": [
275
+ "import nbdev"
276
+ ]
277
+ },
278
+ {
279
+ "cell_type": "code",
280
+ "execution_count": 9,
281
+ "id": "10568397-2167-4c39-8120-436e577b452d",
282
+ "metadata": {},
283
+ "outputs": [
284
+ {
285
+ "name": "stdout",
286
+ "output_type": "stream",
287
+ "text": [
288
+ "Export successful\n"
289
+ ]
290
+ }
291
+ ],
292
+ "source": [
293
+ "nbdev.export.nb_export('test.ipynb', 'app')\n",
294
+ "print('Export successful')"
295
+ ]
296
+ }
297
+ ],
298
+ "metadata": {
299
+ "kernelspec": {
300
+ "display_name": "Python 3 (ipykernel)",
301
+ "language": "python",
302
+ "name": "python3"
303
+ },
304
+ "language_info": {
305
+ "codemirror_mode": {
306
+ "name": "ipython",
307
+ "version": 3
308
+ },
309
+ "file_extension": ".py",
310
+ "mimetype": "text/x-python",
311
+ "name": "python",
312
+ "nbconvert_exporter": "python",
313
+ "pygments_lexer": "ipython3",
314
+ "version": "3.11.5"
315
+ }
316
+ },
317
+ "nbformat": 4,
318
+ "nbformat_minor": 5
319
+ }
README.md CHANGED
@@ -10,4 +10,4 @@ pinned: false
10
  license: apache-2.0
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
10
  license: apache-2.0
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py β†’ app/app.py RENAMED
@@ -1,23 +1,26 @@
1
- __all__ = ['learn', 'classify_image', 'categories', 'image', 'label', 'examples', 'intf']
2
 
 
 
 
 
3
  from fastai.vision.all import *
4
  import gradio as gr
5
  import timm
6
 
7
- # Upload your model
8
- learn = load_learner('cycle-model.pkl')
9
 
 
10
  categories = learn.dls.vocab
11
 
12
  def classify_image(img):
13
  pred,idx,probs = learn.predict(img)
14
  return dict(zip(categories, map(float,probs)))
15
 
 
16
  image = gr.Image()
17
  label = gr.Label()
18
 
19
- # Upload your own images and link them
20
- examples = ['unicycle.jpeg', 'bicycle.jpeg', 'tricycle.png']
21
-
22
  intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)
23
- intf.launch()
 
1
+ # AUTOGENERATED! DO NOT EDIT! File to edit: ../notebook.ipynb.
2
 
3
+ # %% auto 0
4
+ __all__ = ['learn', 'categories', 'image', 'label', 'intf', 'classify_image']
5
+
6
+ # %% ../notebook.ipynb 2
7
  from fastai.vision.all import *
8
  import gradio as gr
9
  import timm
10
 
11
+ # %% ../notebook.ipynb 3
12
+ learn = load_learner('model.pkl')
13
 
14
+ # %% ../notebook.ipynb 4
15
  categories = learn.dls.vocab
16
 
17
  def classify_image(img):
18
  pred,idx,probs = learn.predict(img)
19
  return dict(zip(categories, map(float,probs)))
20
 
21
+ # %% ../notebook.ipynb 6
22
  image = gr.Image()
23
  label = gr.Label()
24
 
 
 
 
25
  intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)
26
+ intf.launch()
bicycle.jpeg β†’ images/bicycle.jpeg RENAMED
File without changes
tricycle.png β†’ images/tricycle.png RENAMED
File without changes
unicycle.jpeg β†’ images/unicycle.jpeg RENAMED
File without changes
notebook.ipynb ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "4b40cb3a-544a-4b23-8c00-431cb7133130",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "Python 3.11.5\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "%%bash\n",
19
+ "python --version"
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "code",
24
+ "execution_count": 2,
25
+ "id": "93e1afb8-f78c-4862-9d56-a06a3559b4d1",
26
+ "metadata": {},
27
+ "outputs": [],
28
+ "source": [
29
+ "#|default_exp app"
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": 3,
35
+ "id": "2adf2fa8-199b-48e4-a91c-9a093032480c",
36
+ "metadata": {},
37
+ "outputs": [],
38
+ "source": [
39
+ "#|export\n",
40
+ "from fastai.vision.all import *\n",
41
+ "import gradio as gr\n",
42
+ "import timm"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": 4,
48
+ "id": "f0218bf1-1836-4d7a-8d47-33584471f28b",
49
+ "metadata": {},
50
+ "outputs": [],
51
+ "source": [
52
+ "#|export\n",
53
+ "learn = load_learner('model.pkl')"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "code",
58
+ "execution_count": 5,
59
+ "id": "168ac2e4-f83b-4ce0-8f23-00999eb5d556",
60
+ "metadata": {},
61
+ "outputs": [],
62
+ "source": [
63
+ "#|export\n",
64
+ "categories = learn.dls.vocab\n",
65
+ "\n",
66
+ "def classify_image(img):\n",
67
+ " pred,idx,probs = learn.predict(img)\n",
68
+ " return dict(zip(categories, map(float,probs)))"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": 6,
74
+ "id": "d343a0d3-40fd-4502-a86b-cb3bac9fdf7f",
75
+ "metadata": {},
76
+ "outputs": [
77
+ {
78
+ "data": {
79
+ "text/html": [
80
+ "\n",
81
+ "<style>\n",
82
+ " /* Turns off some styling */\n",
83
+ " progress {\n",
84
+ " /* gets rid of default border in Firefox and Opera. */\n",
85
+ " border: none;\n",
86
+ " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
87
+ " background-size: auto;\n",
88
+ " }\n",
89
+ " progress:not([value]), progress:not([value])::-webkit-progress-bar {\n",
90
+ " background: repeating-linear-gradient(45deg, #7e7e7e, #7e7e7e 10px, #5c5c5c 10px, #5c5c5c 20px);\n",
91
+ " }\n",
92
+ " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
93
+ " background: #F44336;\n",
94
+ " }\n",
95
+ "</style>\n"
96
+ ],
97
+ "text/plain": [
98
+ "<IPython.core.display.HTML object>"
99
+ ]
100
+ },
101
+ "metadata": {},
102
+ "output_type": "display_data"
103
+ },
104
+ {
105
+ "data": {
106
+ "text/html": [],
107
+ "text/plain": [
108
+ "<IPython.core.display.HTML object>"
109
+ ]
110
+ },
111
+ "metadata": {},
112
+ "output_type": "display_data"
113
+ },
114
+ {
115
+ "name": "stdout",
116
+ "output_type": "stream",
117
+ "text": [
118
+ "images/unicycle.jpeg is a tricycle\n"
119
+ ]
120
+ },
121
+ {
122
+ "data": {
123
+ "text/html": [
124
+ "\n",
125
+ "<style>\n",
126
+ " /* Turns off some styling */\n",
127
+ " progress {\n",
128
+ " /* gets rid of default border in Firefox and Opera. */\n",
129
+ " border: none;\n",
130
+ " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
131
+ " background-size: auto;\n",
132
+ " }\n",
133
+ " progress:not([value]), progress:not([value])::-webkit-progress-bar {\n",
134
+ " background: repeating-linear-gradient(45deg, #7e7e7e, #7e7e7e 10px, #5c5c5c 10px, #5c5c5c 20px);\n",
135
+ " }\n",
136
+ " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
137
+ " background: #F44336;\n",
138
+ " }\n",
139
+ "</style>\n"
140
+ ],
141
+ "text/plain": [
142
+ "<IPython.core.display.HTML object>"
143
+ ]
144
+ },
145
+ "metadata": {},
146
+ "output_type": "display_data"
147
+ },
148
+ {
149
+ "data": {
150
+ "text/html": [],
151
+ "text/plain": [
152
+ "<IPython.core.display.HTML object>"
153
+ ]
154
+ },
155
+ "metadata": {},
156
+ "output_type": "display_data"
157
+ },
158
+ {
159
+ "name": "stdout",
160
+ "output_type": "stream",
161
+ "text": [
162
+ "images/bicycle.jpeg is a bicycle\n"
163
+ ]
164
+ },
165
+ {
166
+ "data": {
167
+ "text/html": [
168
+ "\n",
169
+ "<style>\n",
170
+ " /* Turns off some styling */\n",
171
+ " progress {\n",
172
+ " /* gets rid of default border in Firefox and Opera. */\n",
173
+ " border: none;\n",
174
+ " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
175
+ " background-size: auto;\n",
176
+ " }\n",
177
+ " progress:not([value]), progress:not([value])::-webkit-progress-bar {\n",
178
+ " background: repeating-linear-gradient(45deg, #7e7e7e, #7e7e7e 10px, #5c5c5c 10px, #5c5c5c 20px);\n",
179
+ " }\n",
180
+ " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
181
+ " background: #F44336;\n",
182
+ " }\n",
183
+ "</style>\n"
184
+ ],
185
+ "text/plain": [
186
+ "<IPython.core.display.HTML object>"
187
+ ]
188
+ },
189
+ "metadata": {},
190
+ "output_type": "display_data"
191
+ },
192
+ {
193
+ "data": {
194
+ "text/html": [],
195
+ "text/plain": [
196
+ "<IPython.core.display.HTML object>"
197
+ ]
198
+ },
199
+ "metadata": {},
200
+ "output_type": "display_data"
201
+ },
202
+ {
203
+ "name": "stdout",
204
+ "output_type": "stream",
205
+ "text": [
206
+ "images/tricycle.png is a unicycle\n"
207
+ ]
208
+ }
209
+ ],
210
+ "source": [
211
+ "# Upload your own images and link them\n",
212
+ "examples = ['images/unicycle.jpeg', 'images/bicycle.jpeg', 'images/tricycle.png']\n",
213
+ "\n",
214
+ "for example in examples:\n",
215
+ " image = PILImage.create(example)\n",
216
+ " res_dict = classify_image(image)\n",
217
+ " top = max(res_dict, key=res_dict.get)\n",
218
+ "\n",
219
+ " print(example + ' is a '+ top)"
220
+ ]
221
+ },
222
+ {
223
+ "cell_type": "code",
224
+ "execution_count": 7,
225
+ "id": "156a1fa0-e124-4a18-b411-367e7926afa4",
226
+ "metadata": {},
227
+ "outputs": [
228
+ {
229
+ "name": "stdout",
230
+ "output_type": "stream",
231
+ "text": [
232
+ "Running on local URL: http://127.0.0.1:7861\n",
233
+ "\n",
234
+ "To create a public link, set `share=True` in `launch()`.\n"
235
+ ]
236
+ },
237
+ {
238
+ "data": {
239
+ "text/html": [
240
+ "<div><iframe src=\"http://127.0.0.1:7861/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
241
+ ],
242
+ "text/plain": [
243
+ "<IPython.core.display.HTML object>"
244
+ ]
245
+ },
246
+ "metadata": {},
247
+ "output_type": "display_data"
248
+ },
249
+ {
250
+ "data": {
251
+ "text/plain": []
252
+ },
253
+ "execution_count": 7,
254
+ "metadata": {},
255
+ "output_type": "execute_result"
256
+ }
257
+ ],
258
+ "source": [
259
+ "#|export\n",
260
+ "\n",
261
+ "image = gr.Image()\n",
262
+ "label = gr.Label()\n",
263
+ "\n",
264
+ "intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)\n",
265
+ "intf.launch()"
266
+ ]
267
+ },
268
+ {
269
+ "cell_type": "code",
270
+ "execution_count": 8,
271
+ "id": "2894f2be-e453-4795-8a16-2aa4770aa16d",
272
+ "metadata": {},
273
+ "outputs": [],
274
+ "source": [
275
+ "import nbdev"
276
+ ]
277
+ },
278
+ {
279
+ "cell_type": "code",
280
+ "execution_count": 10,
281
+ "id": "10568397-2167-4c39-8120-436e577b452d",
282
+ "metadata": {},
283
+ "outputs": [
284
+ {
285
+ "name": "stdout",
286
+ "output_type": "stream",
287
+ "text": [
288
+ "Export successful\n"
289
+ ]
290
+ }
291
+ ],
292
+ "source": [
293
+ "nbdev.export.nb_export('notebook.ipynb', 'app')\n",
294
+ "print('Export successful')"
295
+ ]
296
+ }
297
+ ],
298
+ "metadata": {
299
+ "kernelspec": {
300
+ "display_name": "Python 3 (ipykernel)",
301
+ "language": "python",
302
+ "name": "python3"
303
+ },
304
+ "language_info": {
305
+ "codemirror_mode": {
306
+ "name": "ipython",
307
+ "version": 3
308
+ },
309
+ "file_extension": ".py",
310
+ "mimetype": "text/x-python",
311
+ "name": "python",
312
+ "nbconvert_exporter": "python",
313
+ "pygments_lexer": "ipython3",
314
+ "version": "3.11.5"
315
+ }
316
+ },
317
+ "nbformat": 4,
318
+ "nbformat_minor": 5
319
+ }