Datasets:

Modalities:
Tabular
Text
Formats:
arrow
Languages:
English
Libraries:
Datasets
License:
brettrenfer commited on
Commit
b323980
·
verified ·
1 Parent(s): b852440

Upload embeddings.py

Browse files
Files changed (1) hide show
  1. embeddings.py +53 -0
embeddings.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ from transformers import CLIPProcessor, CLIPModel
3
+ import torch
4
+ from PIL import Image
5
+
6
+ # Load the dataset
7
+ dataset = datasets.load_dataset("metmuseum/openaccess")
8
+
9
+ # Initialize the CLIP model and processor
10
+ model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
11
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
12
+
13
+ # Set the model to use the CPU or Apple's MPS if available
14
+ # Change this if you have a fancier computer (:
15
+ device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
16
+ model.to(device)
17
+
18
+ # Map to store embeddings with Object ID as key
19
+ embeddings_map = {}
20
+
21
+ # Function to create embeddings for a PIL Jpeg image
22
+ def create_embedding(image_pil):
23
+ try:
24
+ inputs = processor(images=image_pil, return_tensors="pt", padding=True).to(device)
25
+ with torch.no_grad():
26
+ embeddings = model.get_image_features(**inputs)
27
+ return embeddings
28
+ except Exception as e:
29
+ print(f"Error processing image: {e}")
30
+ return None
31
+
32
+ # Loop through the dataset and process the images, and add them to a map
33
+ # Optionally, you could add more keys here
34
+ # Or, just add the embeddings to the full dataset
35
+ for item in dataset['train']:
36
+ object_id = item['Object ID']
37
+ image_pil = item['jpg']
38
+ if image_pil:
39
+ embedding = create_embedding(image_pil)
40
+ if embedding is not None:
41
+ embeddings_map[object_id] = embedding.cpu().numpy()
42
+
43
+ # Convert embeddings map to a new dataset
44
+ # Note: I changed to to [embeddings][0] because the examples seemed to like [embeddings] format better
45
+ # than [[[embeddings]]]
46
+ # ...perhaps that's incorrect?
47
+ embedding_dataset = datasets.Dataset.from_dict({
48
+ 'Object ID': list(embeddings_map.keys()),
49
+ 'Embedding': [embedding.tolist() for embedding in embeddings_map.values()][0]
50
+ })
51
+
52
+ # Save the new dataset to disk
53
+ embedding_dataset.save_to_disk('metmuseum_embeddings')