File size: 3,288 Bytes
1c3f728 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
import pandas as pd
import gzip
import csv
import requests
from requests.adapters import HTTPAdapter, Retry
import urllib3
import urllib.parse
from io import StringIO
# NOTE: this is not a good idea; this is solely a fix for Met networks
do_verify = False
if not do_verify:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Setup HTTPAdaptor & requests session to add retry pattern
s = requests.Session()
retries = Retry(total=3,
backoff_factor=0.1,
status_forcelist=[ 500, 502, 503, 504 ])
s.mount('https://', HTTPAdapter(max_retries=retries))
# Function to load and clean the CSV data
def load_and_clean_csv(file_path):
valid_rows = []
invalid_rows = []
index = 0
# Read the gzip file line by line
with gzip.open(file_path, 'rt') as f:
reader = csv.reader(f)
header = next(reader) # Read the header separately
header.append("primaryImageSmall") # Add the new column to the header
valid_rows.append(header)
expected_columns = len(header) - 1 # Exclude the new column
for line in f:
try:
# Try to parse the line
row = next(csv.reader([line]))
index = index + 1
# print(len(row)+":"+expected_columns)
if len(row) == expected_columns:
# Fetch primaryImageSmall from the API
object_id = row[4]
image_url = fetch_primary_image_small(object_id)
image_url = image_url.replace(" ","%20")
image_url = image_url.replace(u'\u2013',"–")
row.append(image_url)
valid_rows.append(row)
if index % 100 == 0:
print("Fetched " + str(index) +" image URLs")
else:
invalid_rows.append(line)
except Exception as e:
print(e)
invalid_rows.append(line)
print(f"Found {len(invalid_rows)} invalid rows")
return valid_rows, invalid_rows
# Function to fetch the primaryImageSmall URL from the MET Museum API
def fetch_primary_image_small(object_id):
url = f"https://collectionapi.metmuseum.org/public/collection/v1/objects/{object_id}"
try:
response = s.get(url, verify=do_verify)
response.raise_for_status() # Raise an error for bad status codes
data = response.json()
# print (data.get("primaryImageSmall", ""))
return data.get("primaryImageSmall", "")
except Exception as e:
print(f"Error fetching image for object ID {object_id}: {e}")
return ""
# Function to save the cleaned data to a new gzip CSV file
def save_cleaned_csv(valid_rows, output_path):
with gzip.open(output_path, 'wt', newline='') as f:
writer = csv.writer(f)
writer.writerows(valid_rows)
print(f"Cleaned data saved to {output_path}")
def main():
input_file = 'metadata.csv.gz'
output_file = 'metadata_images.csv.gz'
# Load and clean the data
valid_rows, invalid_rows = load_and_clean_csv(input_file)
# Save the cleaned data
save_cleaned_csv(valid_rows, output_file)
if __name__ == "__main__":
main()
|