Datasets:

Modalities:
Tabular
Text
Formats:
arrow
Languages:
English
Libraries:
Datasets
License:
openaccess_embeddings / example.py
brettrenfer's picture
Upload example.py
49a1de6 verified
raw
history blame
4.72 kB
import torch
from PIL import Image
from transformers import CLIPProcessor, CLIPModel
import numpy as np
import os
from datasets import load_dataset, Dataset
# prevent OMP error?
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# ignore images? Do this unless you have a lot of memory!
remove_images = True
verbose = True
# Save locally for faster use next time
cache_on_disk = True
# Load datasets and remove images (or not)
def load_and_prepare_data():
if verbose:
print("Loading")
# Load from HF
embeddings_data = load_dataset("metmuseum/openaccess_embeddings", split='train')
collection_data = load_dataset("metmuseum/openaccess", split='train')
# Strip out image binary data (or not)
if remove_images:
cd_cleaned = collection_data.remove_columns(['jpg'])
# Convert collection to pandas dataframes
collection_df = cd_cleaned.to_pandas()
else:
# Convert collection to pandas dataframes
collection_df = collection_data.to_pandas()
# Convert embeddings to pandas dataframes
embedding_df = embeddings_data.to_pandas()
# Merge the datasets on "Object ID"
if verbose:
print("Merging")
merged_df = collection_df.merge(embedding_df, on="Object ID", how="left")
if verbose:
print("Merged")
# Convert back to Huggingface dataset
first_dataset = Dataset.from_pandas(merged_df)
# Remove empty embeddings - note, this will result in about 1/2 of the samples being tossed
# But make our lives easier when passing to FAISS etc
merged_dataset = first_dataset.filter(lambda example: example['Embedding'] is not None)
if cache_on_disk:
merged_dataset.save_to_disk('metmuseum_merged')
return merged_dataset
# Function to build the FAISS index & (optionally) save
def build_faiss_index(dataset, index_file):
dataset.add_faiss_index('Embedding')
if cache_on_disk:
dataset.save_faiss_index('Embedding', index_file)
# Function to load the FAISS on-disk index
def load_faiss_index(dataset, index_file):
dataset.load_faiss_index('Embedding',index_file)
def search_embeddings(dataset, query_embedding, k=5):
# """Search for the top k closest embeddings in the index."""
scores, samples = dataset.get_nearest_examples(
"Embedding", query_embedding, k
)
return scores, samples
def query_text(processor, model, text):
"""Convert a text query into an embedding."""
inputs = processor(text=text, return_tensors="pt")
with torch.no_grad():
text_embedding = model.get_text_features(**inputs).numpy()
return text_embedding
def query_image(processor, model, image_path):
"""Convert an image query into an embedding."""
image = Image.open(image_path)
inputs = processor(images=image, return_tensors="pt")
with torch.no_grad():
image_embedding = model.get_image_features(**inputs).numpy()
print(image_embedding.shape)
return image_embedding[0]
if __name__ == "__main__":
index_file = "faiss_index_file.index"
dataset_path = "metmuseum_merged"
# Try to load cahced data & cahced FAISS index
if os.path.exists(dataset_path):
dataset = Dataset.load_from_disk(dataset_path)
else:
dataset = load_and_prepare_data()
if not os.path.exists(index_file):
if verbose:
print("Building index")
build_faiss_index(dataset, index_file)
else:
load_faiss_index(dataset, index_file)
# Load CLIP to embed text / images to search
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
# Example usage for text query
# This doesn't really seem to work right now...
text_query = "A painting of a sunflower"
text_embedding = query_text(processor, model, text_query)
# K = how many results to get
scores, samples = search_embeddings(dataset, text_embedding, k=5)
print("\Text Query Results:")
print(scores)
# The results are dataset columns -- you could loop through all fields,
# Or just get a URL like below
for result in samples["Object ID"]:
print("https://metmuseum.org/art/collection/search/" + str(result))
# Example usage for image query
image_path = "DP355692.jpg" # Replace with the path to your image file
image_embedding = query_image(processor, model, image_path)
# K = how many results to get
scores, samples = search_embeddings(dataset, image_embedding, k=5)
print("\nImage Query Results:")
print(scores)
for result in samples["Object ID"]:
print("https://metmuseum.org/art/collection/search/" + str(result))