Datasets:

Modalities:
Tabular
Text
Formats:
arrow
Languages:
English
Libraries:
Datasets
License:
brettrenfer commited on
Commit
99383cb
·
verified ·
1 Parent(s): 49a1de6

Added text example with sentence_transformers

Browse files
Files changed (1) hide show
  1. example_text.py +31 -0
example_text.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # baased on https://huggingface.co/blog/getting-started-with-embeddings
2
+ import torch
3
+ from datasets import load_dataset
4
+ from sentence_transformers import SentenceTransformer, util
5
+
6
+
7
+ embeddings = load_dataset('metmuseum/openaccess_embeddings')
8
+
9
+ with torch.no_grad():
10
+ embeddings.set_format("torch", columns=['Embedding'], output_all_columns=True)
11
+
12
+ #First, we load the respective CLIP model
13
+ model = SentenceTransformer('clip-ViT-B-32')
14
+
15
+ def search(query, k=3):
16
+ # First, we encode the query (which can either be an image or a text string)
17
+ query_emb = model.encode([query], convert_to_tensor=True, show_progress_bar=False)
18
+
19
+ # Then, we use the util.semantic_search function, which computes the cosine-similarity
20
+ # between the query embedding and all image embeddings.
21
+ # It then returns the top_k highest ranked images, which we output
22
+ hits = util.semantic_search(query_emb, embeddings["train"]["Embedding"], top_k=k)[0]
23
+ print("Results for '"+query+"'")
24
+ for hit in hits:
25
+ # print(hit)
26
+ print("https://www.metmuseum.org/art/collection/search/"+str(embeddings["train"][hit['corpus_id']]["Object ID"]))
27
+ print("score: "+str(hit["score"]))
28
+
29
+ search("Painting of a sunset")
30
+ print("\n")
31
+ search("Angry cat")