Datasets:

Modalities:
Tabular
Text
Formats:
arrow
Languages:
English
Libraries:
Datasets
License:
brettrenfer commited on
Commit
49a1de6
·
verified ·
1 Parent(s): f8bd9a4

Upload example.py

Browse files

Added example use with larger Open Access Dataset

Files changed (1) hide show
  1. example.py +138 -0
example.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL import Image
3
+ from transformers import CLIPProcessor, CLIPModel
4
+ import numpy as np
5
+ import os
6
+ from datasets import load_dataset, Dataset
7
+
8
+ # prevent OMP error?
9
+ os.environ['KMP_DUPLICATE_LIB_OK']='True'
10
+
11
+ # ignore images? Do this unless you have a lot of memory!
12
+ remove_images = True
13
+ verbose = True
14
+
15
+ # Save locally for faster use next time
16
+ cache_on_disk = True
17
+
18
+ # Load datasets and remove images (or not)
19
+ def load_and_prepare_data():
20
+ if verbose:
21
+ print("Loading")
22
+ # Load from HF
23
+ embeddings_data = load_dataset("metmuseum/openaccess_embeddings", split='train')
24
+ collection_data = load_dataset("metmuseum/openaccess", split='train')
25
+
26
+ # Strip out image binary data (or not)
27
+ if remove_images:
28
+ cd_cleaned = collection_data.remove_columns(['jpg'])
29
+ # Convert collection to pandas dataframes
30
+ collection_df = cd_cleaned.to_pandas()
31
+ else:
32
+ # Convert collection to pandas dataframes
33
+ collection_df = collection_data.to_pandas()
34
+
35
+ # Convert embeddings to pandas dataframes
36
+ embedding_df = embeddings_data.to_pandas()
37
+
38
+ # Merge the datasets on "Object ID"
39
+ if verbose:
40
+ print("Merging")
41
+
42
+ merged_df = collection_df.merge(embedding_df, on="Object ID", how="left")
43
+
44
+ if verbose:
45
+ print("Merged")
46
+
47
+ # Convert back to Huggingface dataset
48
+ first_dataset = Dataset.from_pandas(merged_df)
49
+
50
+ # Remove empty embeddings - note, this will result in about 1/2 of the samples being tossed
51
+ # But make our lives easier when passing to FAISS etc
52
+ merged_dataset = first_dataset.filter(lambda example: example['Embedding'] is not None)
53
+
54
+ if cache_on_disk:
55
+ merged_dataset.save_to_disk('metmuseum_merged')
56
+
57
+ return merged_dataset
58
+
59
+ # Function to build the FAISS index & (optionally) save
60
+ def build_faiss_index(dataset, index_file):
61
+ dataset.add_faiss_index('Embedding')
62
+ if cache_on_disk:
63
+ dataset.save_faiss_index('Embedding', index_file)
64
+
65
+ # Function to load the FAISS on-disk index
66
+ def load_faiss_index(dataset, index_file):
67
+ dataset.load_faiss_index('Embedding',index_file)
68
+
69
+ def search_embeddings(dataset, query_embedding, k=5):
70
+ # """Search for the top k closest embeddings in the index."""
71
+ scores, samples = dataset.get_nearest_examples(
72
+ "Embedding", query_embedding, k
73
+ )
74
+ return scores, samples
75
+
76
+ def query_text(processor, model, text):
77
+ """Convert a text query into an embedding."""
78
+ inputs = processor(text=text, return_tensors="pt")
79
+ with torch.no_grad():
80
+ text_embedding = model.get_text_features(**inputs).numpy()
81
+ return text_embedding
82
+
83
+ def query_image(processor, model, image_path):
84
+ """Convert an image query into an embedding."""
85
+ image = Image.open(image_path)
86
+ inputs = processor(images=image, return_tensors="pt")
87
+ with torch.no_grad():
88
+ image_embedding = model.get_image_features(**inputs).numpy()
89
+ print(image_embedding.shape)
90
+ return image_embedding[0]
91
+
92
+ if __name__ == "__main__":
93
+ index_file = "faiss_index_file.index"
94
+ dataset_path = "metmuseum_merged"
95
+
96
+ # Try to load cahced data & cahced FAISS index
97
+ if os.path.exists(dataset_path):
98
+ dataset = Dataset.load_from_disk(dataset_path)
99
+ else:
100
+ dataset = load_and_prepare_data()
101
+
102
+ if not os.path.exists(index_file):
103
+ if verbose:
104
+ print("Building index")
105
+ build_faiss_index(dataset, index_file)
106
+ else:
107
+ load_faiss_index(dataset, index_file)
108
+
109
+ # Load CLIP to embed text / images to search
110
+ model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
111
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
112
+
113
+ # Example usage for text query
114
+ # This doesn't really seem to work right now...
115
+ text_query = "A painting of a sunflower"
116
+ text_embedding = query_text(processor, model, text_query)
117
+
118
+ # K = how many results to get
119
+ scores, samples = search_embeddings(dataset, text_embedding, k=5)
120
+
121
+ print("\Text Query Results:")
122
+ print(scores)
123
+ # The results are dataset columns -- you could loop through all fields,
124
+ # Or just get a URL like below
125
+ for result in samples["Object ID"]:
126
+ print("https://metmuseum.org/art/collection/search/" + str(result))
127
+
128
+ # Example usage for image query
129
+ image_path = "DP355692.jpg" # Replace with the path to your image file
130
+ image_embedding = query_image(processor, model, image_path)
131
+
132
+ # K = how many results to get
133
+ scores, samples = search_embeddings(dataset, image_embedding, k=5)
134
+
135
+ print("\nImage Query Results:")
136
+ print(scores)
137
+ for result in samples["Object ID"]:
138
+ print("https://metmuseum.org/art/collection/search/" + str(result))