# baased on https://huggingface.co./blog/getting-started-with-embeddings | |
import torch | |
from datasets import load_dataset | |
from sentence_transformers import SentenceTransformer, util | |
embeddings = load_dataset('metmuseum/openaccess_embeddings') | |
with torch.no_grad(): | |
embeddings.set_format("torch", columns=['Embedding'], output_all_columns=True) | |
#First, we load the respective CLIP model | |
model = SentenceTransformer('clip-ViT-B-32') | |
def search(query, k=3): | |
# First, we encode the query (which can either be an image or a text string) | |
query_emb = model.encode([query], convert_to_tensor=True, show_progress_bar=False) | |
# Then, we use the util.semantic_search function, which computes the cosine-similarity | |
# between the query embedding and all image embeddings. | |
# It then returns the top_k highest ranked images, which we output | |
hits = util.semantic_search(query_emb, embeddings["train"]["Embedding"], top_k=k)[0] | |
print("Results for '"+query+"'") | |
for hit in hits: | |
# print(hit) | |
print("https://www.metmuseum.org/art/collection/search/"+str(embeddings["train"][hit['corpus_id']]["Object ID"])) | |
print("score: "+str(hit["score"])) | |
search("Painting of a sunset") | |
print("\n") | |
search("Angry cat") |