Spaces:
Runtime error
Runtime error
jervinjosh68
commited on
Commit
·
4977ce5
1
Parent(s):
5e09f7f
debugged app.py
Browse files- app.py +6 -10
- gradio_queue.db +0 -0
- stock_baby.webp +0 -0
- stock_guy.jpg +0 -0
- stock_old_woman.jpg +0 -0
- stock_teen.webp +0 -0
app.py
CHANGED
@@ -8,7 +8,7 @@ model = ViTForImageClassification.from_pretrained('nateraw/vit-age-classifier')
|
|
8 |
transforms = ViTFeatureExtractor.from_pretrained('nateraw/vit-age-classifier')
|
9 |
|
10 |
def predict(im):
|
11 |
-
labels = {0:"0-2", 1: "3-9" , 2: "10-19", 3: "20-29", 4: "30-39", 5: "40-49", 6: "50-59"}
|
12 |
# Transform our image and pass it through the model
|
13 |
inputs = transforms(im, return_tensors='pt')
|
14 |
output = model(**inputs)
|
@@ -17,27 +17,23 @@ def predict(im):
|
|
17 |
proba = output.logits.softmax(1)
|
18 |
|
19 |
# Predicted Classes
|
20 |
-
preds = proba.argmax(1)
|
21 |
values, indices = torch.topk(proba, k=5)
|
22 |
-
|
23 |
|
24 |
|
25 |
return {labels[i.item()]: v.item() for i, v in zip(indices.numpy()[0], values.detach().numpy()[0])}
|
26 |
|
27 |
-
|
28 |
-
print(predict(Image.open("baby.jpg")))
|
29 |
-
|
30 |
-
|
31 |
inputs = [
|
32 |
-
gr.inputs.Image(type="pil", label="
|
33 |
]
|
34 |
|
35 |
|
36 |
|
37 |
-
title = "
|
38 |
description = "ViT-Age-Classification is used to categorize an individual's age using images"
|
39 |
article = " <a href='https://huggingface.co/nateraw/vit-age-classifier'>Model Repo on Hugging Face Model Hub</a>"
|
40 |
-
examples = ["
|
41 |
|
42 |
gr.Interface(
|
43 |
predict,
|
|
|
8 |
transforms = ViTFeatureExtractor.from_pretrained('nateraw/vit-age-classifier')
|
9 |
|
10 |
def predict(im):
|
11 |
+
labels = {0:"0-2", 1: "3-9" , 2: "10-19", 3: "20-29", 4: "30-39", 5: "40-49", 6: "50-59", 7:"60-69",8:"more than 70"}
|
12 |
# Transform our image and pass it through the model
|
13 |
inputs = transforms(im, return_tensors='pt')
|
14 |
output = model(**inputs)
|
|
|
17 |
proba = output.logits.softmax(1)
|
18 |
|
19 |
# Predicted Classes
|
20 |
+
#preds = proba.argmax(1)
|
21 |
values, indices = torch.topk(proba, k=5)
|
22 |
+
|
23 |
|
24 |
|
25 |
return {labels[i.item()]: v.item() for i, v in zip(indices.numpy()[0], values.detach().numpy()[0])}
|
26 |
|
|
|
|
|
|
|
|
|
27 |
inputs = [
|
28 |
+
gr.inputs.Image(type="pil", label="Input Image")
|
29 |
]
|
30 |
|
31 |
|
32 |
|
33 |
+
title = "ViT-Age-Classification"
|
34 |
description = "ViT-Age-Classification is used to categorize an individual's age using images"
|
35 |
article = " <a href='https://huggingface.co/nateraw/vit-age-classifier'>Model Repo on Hugging Face Model Hub</a>"
|
36 |
+
examples = ["stock_baby.webp","stock_teen.webp","stock_guy.jpg","stock_old_woman.jpg"]
|
37 |
|
38 |
gr.Interface(
|
39 |
predict,
|
gradio_queue.db
CHANGED
Binary files a/gradio_queue.db and b/gradio_queue.db differ
|
|
stock_baby.webp
ADDED
stock_guy.jpg
ADDED
stock_old_woman.jpg
ADDED
stock_teen.webp
ADDED