|
import torch |
|
from PIL import Image |
|
from transformers import CLIPProcessor, CLIPModel |
|
import numpy as np |
|
import os |
|
from datasets import load_dataset, Dataset |
|
|
|
|
|
os.environ['KMP_DUPLICATE_LIB_OK']='True' |
|
|
|
|
|
remove_images = True |
|
verbose = True |
|
|
|
|
|
cache_on_disk = True |
|
|
|
|
|
def load_and_prepare_data(): |
|
if verbose: |
|
print("Loading") |
|
|
|
embeddings_data = load_dataset("metmuseum/openaccess_embeddings", split='train') |
|
collection_data = load_dataset("metmuseum/openaccess", split='train') |
|
|
|
|
|
if remove_images: |
|
cd_cleaned = collection_data.remove_columns(['jpg']) |
|
|
|
collection_df = cd_cleaned.to_pandas() |
|
else: |
|
|
|
collection_df = collection_data.to_pandas() |
|
|
|
|
|
embedding_df = embeddings_data.to_pandas() |
|
|
|
|
|
if verbose: |
|
print("Merging") |
|
|
|
merged_df = collection_df.merge(embedding_df, on="Object ID", how="left") |
|
|
|
if verbose: |
|
print("Merged") |
|
|
|
|
|
first_dataset = Dataset.from_pandas(merged_df) |
|
|
|
|
|
|
|
merged_dataset = first_dataset.filter(lambda example: example['Embedding'] is not None) |
|
|
|
if cache_on_disk: |
|
merged_dataset.save_to_disk('metmuseum_merged') |
|
|
|
return merged_dataset |
|
|
|
|
|
def build_faiss_index(dataset, index_file): |
|
dataset.add_faiss_index('Embedding') |
|
if cache_on_disk: |
|
dataset.save_faiss_index('Embedding', index_file) |
|
|
|
|
|
def load_faiss_index(dataset, index_file): |
|
dataset.load_faiss_index('Embedding',index_file) |
|
|
|
def search_embeddings(dataset, query_embedding, k=5): |
|
|
|
scores, samples = dataset.get_nearest_examples( |
|
"Embedding", query_embedding, k |
|
) |
|
return scores, samples |
|
|
|
def query_text(processor, model, text): |
|
"""Convert a text query into an embedding.""" |
|
inputs = processor(text=text, return_tensors="pt") |
|
with torch.no_grad(): |
|
text_embedding = model.get_text_features(**inputs).numpy() |
|
return text_embedding |
|
|
|
def query_image(processor, model, image_path): |
|
"""Convert an image query into an embedding.""" |
|
image = Image.open(image_path) |
|
inputs = processor(images=image, return_tensors="pt") |
|
with torch.no_grad(): |
|
image_embedding = model.get_image_features(**inputs).numpy() |
|
print(image_embedding.shape) |
|
return image_embedding[0] |
|
|
|
if __name__ == "__main__": |
|
index_file = "faiss_index_file.index" |
|
dataset_path = "metmuseum_merged" |
|
|
|
|
|
if os.path.exists(dataset_path): |
|
dataset = Dataset.load_from_disk(dataset_path) |
|
else: |
|
dataset = load_and_prepare_data() |
|
|
|
if not os.path.exists(index_file): |
|
if verbose: |
|
print("Building index") |
|
build_faiss_index(dataset, index_file) |
|
else: |
|
load_faiss_index(dataset, index_file) |
|
|
|
|
|
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") |
|
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") |
|
|
|
|
|
|
|
text_query = "A painting of a sunflower" |
|
text_embedding = query_text(processor, model, text_query) |
|
|
|
|
|
scores, samples = search_embeddings(dataset, text_embedding, k=5) |
|
|
|
print("\Text Query Results:") |
|
print(scores) |
|
|
|
|
|
for result in samples["Object ID"]: |
|
print("https://metmuseum.org/art/collection/search/" + str(result)) |
|
|
|
|
|
image_path = "DP355692.jpg" |
|
image_embedding = query_image(processor, model, image_path) |
|
|
|
|
|
scores, samples = search_embeddings(dataset, image_embedding, k=5) |
|
|
|
print("\nImage Query Results:") |
|
print(scores) |
|
for result in samples["Object ID"]: |
|
print("https://metmuseum.org/art/collection/search/" + str(result)) |