import datasets from transformers import CLIPProcessor, CLIPModel import torch from PIL import Image # Load the dataset dataset = datasets.load_dataset("metmuseum/openaccess") # Initialize the CLIP model and processor model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") # Set the model to use the CPU or Apple's MPS if available # Change this if you have a fancier computer (: device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") model.to(device) # Map to store embeddings with Object ID as key embeddings_map = {} # Function to create embeddings for a PIL Jpeg image def create_embedding(image_pil): try: inputs = processor(images=image_pil, return_tensors="pt", padding=True).to(device) with torch.no_grad(): embeddings = model.get_image_features(**inputs) return embeddings except Exception as e: print(f"Error processing image: {e}") return None # Loop through the dataset and process the images, and add them to a map # Optionally, you could add more keys here # Or, just add the embeddings to the full dataset for item in dataset['train']: object_id = item['Object ID'] image_pil = item['jpg'] if image_pil: embedding = create_embedding(image_pil) if embedding is not None: embeddings_map[object_id] = embedding.cpu().numpy() # Convert embeddings map to a new dataset # Note: I changed to to [embeddings][0] because the examples seemed to like [embeddings] format better # than [[[embeddings]]] # ...perhaps that's incorrect? embedding_dataset = datasets.Dataset.from_dict({ 'Object ID': list(embeddings_map.keys()), 'Embedding': [embedding.tolist() for embedding in embeddings_map.values()][0] }) # Save the new dataset to disk embedding_dataset.save_to_disk('metmuseum_embeddings')