Datasets:

Modalities:
Image
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
Dask
License:
openaccess / add_images.py
Brett Renfer
Merge
1c3f728
raw
history blame
3.29 kB
import pandas as pd
import gzip
import csv
import requests
from requests.adapters import HTTPAdapter, Retry
import urllib3
import urllib.parse
from io import StringIO
# NOTE: this is not a good idea; this is solely a fix for Met networks
do_verify = False
if not do_verify:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Setup HTTPAdaptor & requests session to add retry pattern
s = requests.Session()
retries = Retry(total=3,
backoff_factor=0.1,
status_forcelist=[ 500, 502, 503, 504 ])
s.mount('https://', HTTPAdapter(max_retries=retries))
# Function to load and clean the CSV data
def load_and_clean_csv(file_path):
valid_rows = []
invalid_rows = []
index = 0
# Read the gzip file line by line
with gzip.open(file_path, 'rt') as f:
reader = csv.reader(f)
header = next(reader) # Read the header separately
header.append("primaryImageSmall") # Add the new column to the header
valid_rows.append(header)
expected_columns = len(header) - 1 # Exclude the new column
for line in f:
try:
# Try to parse the line
row = next(csv.reader([line]))
index = index + 1
# print(len(row)+":"+expected_columns)
if len(row) == expected_columns:
# Fetch primaryImageSmall from the API
object_id = row[4]
image_url = fetch_primary_image_small(object_id)
image_url = image_url.replace(" ","%20")
image_url = image_url.replace(u'\u2013',"–")
row.append(image_url)
valid_rows.append(row)
if index % 100 == 0:
print("Fetched " + str(index) +" image URLs")
else:
invalid_rows.append(line)
except Exception as e:
print(e)
invalid_rows.append(line)
print(f"Found {len(invalid_rows)} invalid rows")
return valid_rows, invalid_rows
# Function to fetch the primaryImageSmall URL from the MET Museum API
def fetch_primary_image_small(object_id):
url = f"https://collectionapi.metmuseum.org/public/collection/v1/objects/{object_id}"
try:
response = s.get(url, verify=do_verify)
response.raise_for_status() # Raise an error for bad status codes
data = response.json()
# print (data.get("primaryImageSmall", ""))
return data.get("primaryImageSmall", "")
except Exception as e:
print(f"Error fetching image for object ID {object_id}: {e}")
return ""
# Function to save the cleaned data to a new gzip CSV file
def save_cleaned_csv(valid_rows, output_path):
with gzip.open(output_path, 'wt', newline='') as f:
writer = csv.writer(f)
writer.writerows(valid_rows)
print(f"Cleaned data saved to {output_path}")
def main():
input_file = 'metadata.csv.gz'
output_file = 'metadata_images.csv.gz'
# Load and clean the data
valid_rows, invalid_rows = load_and_clean_csv(input_file)
# Save the cleaned data
save_cleaned_csv(valid_rows, output_file)
if __name__ == "__main__":
main()