|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import requests |
|
import shutil |
|
import json |
|
|
|
import pandas as pd |
|
from checksum import get_checksums |
|
|
|
from tqdm import tqdm |
|
import os |
|
import sys |
|
import time |
|
import argparse |
|
|
|
|
|
EXPECTED_COLS = ["CAMID", |
|
"X", |
|
"Image_name", |
|
"file_url", |
|
"Taxonomic_Name", |
|
"record_number", |
|
"Dataset" |
|
] |
|
|
|
REDO_CODE_LIST = [429, 500, 502, 503, 504] |
|
|
|
|
|
STARTING_INDEX = 0 |
|
|
|
|
|
def parse_args(): |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--csv", required=True, help="Path to CSV file with urls.", nargs="?") |
|
parser.add_argument("--output", required=True, help="Main directory to download images into.", nargs="?") |
|
|
|
return parser.parse_args() |
|
|
|
|
|
def log_response(log_data, index, image, url, record_number, dataset, cam_id, response_code): |
|
|
|
log_entry = {} |
|
log_entry["Image"] = image |
|
log_entry["file_url"] = url |
|
log_entry["record_number"] = str(record_number) |
|
log_entry["dataset"] = dataset |
|
log_entry["CAMID"] = cam_id |
|
log_entry["Response_status"] = str(response_code) |
|
log_data[index] = log_entry |
|
|
|
return log_data |
|
|
|
|
|
def update_log(log, index, filepath): |
|
|
|
with open(filepath, "a") as log_file: |
|
json.dump(log[index], log_file, indent = 4) |
|
log_file.write("\n") |
|
|
|
|
|
def download_images(jiggins_data, image_folder, log_filepath, error_log_filepath): |
|
log_data = {} |
|
log_errors = {} |
|
|
|
for i in tqdm(range(0, len(jiggins_data))) : |
|
|
|
species = jiggins_data["Taxonomic_Name"][i] |
|
image_name = jiggins_data["X"][i].astype(str) + "_" + jiggins_data["Image_name"][i] |
|
record_number = jiggins_data["record_number"][i] |
|
|
|
|
|
|
|
if os.path.exists(f"{image_folder}/{species}/{image_name}") != True: |
|
|
|
url = jiggins_data["file_url"][i] |
|
dataset = jiggins_data["Dataset"][i] |
|
cam_id = jiggins_data["CAMID"][i] |
|
|
|
|
|
redo = True |
|
max_redos = 2 |
|
while redo and max_redos > 0: |
|
try: |
|
response = requests.get(url, stream=True) |
|
except Exception as e: |
|
redo = True |
|
max_redos -= 1 |
|
if max_redos <= 0: |
|
log_errors = log_response(log_errors, |
|
index = i, |
|
image = species + "/" + image_name, |
|
url = url, |
|
record_number = record_number, |
|
dataset = dataset, |
|
cam_id = cam_id, |
|
response_code = str(e)) |
|
update_log(log = log_errors, index = i, filepath = error_log_filepath) |
|
|
|
if response.status_code == 200: |
|
redo = False |
|
|
|
log_data = log_response(log_data, |
|
index = i, |
|
image = species + "/" + image_name, |
|
url = url, |
|
record_number = record_number, |
|
dataset = dataset, |
|
cam_id = cam_id, |
|
response_code = response.status_code |
|
) |
|
update_log(log = log_data, index = i, filepath = log_filepath) |
|
|
|
|
|
if os.path.exists(f"{image_folder}/{species}") != True: |
|
os.makedirs(f"{image_folder}/{species}", exist_ok=False) |
|
|
|
|
|
with open(f"{image_folder}/{species}/{image_name}", "wb") as out_file: |
|
shutil.copyfileobj(response.raw, out_file) |
|
|
|
|
|
elif response.status_code in REDO_CODE_LIST: |
|
redo = True |
|
max_redos -= 1 |
|
if max_redos <= 0: |
|
log_errors = log_response(log_errors, |
|
index = i, |
|
image = species + "/" + image_name, |
|
url = url, |
|
record_number = record_number, |
|
dataset = dataset, |
|
cam_id = cam_id, |
|
response_code = response.status_code) |
|
update_log(log = log_errors, index = i, filepath = error_log_filepath) |
|
|
|
else: |
|
time.sleep(1) |
|
else: |
|
redo = False |
|
log_errors = log_response(log_errors, |
|
index = i, |
|
image = species + "/" + image_name, |
|
url = url, |
|
record_number = record_number, |
|
dataset = dataset, |
|
cam_id = cam_id, |
|
response_code = response.status_code) |
|
update_log(log = log_errors, index = i, filepath = error_log_filepath) |
|
|
|
del response |
|
|
|
else: |
|
if i > STARTING_INDEX: |
|
|
|
print(f"duplicate image: {jiggins_data['X']}, {jiggins_data['Image_name']}, from record {record_number}") |
|
|
|
return |
|
|
|
def main(): |
|
|
|
|
|
args = parse_args() |
|
csv_path = args.csv |
|
image_folder = args.output |
|
|
|
|
|
log_filepath = csv_path.split(".")[0] + "_log.json" |
|
error_log_filepath = csv_path.split(".")[0] + "_error_log.json" |
|
|
|
|
|
jiggins_data = pd.read_csv(csv_path, low_memory = False) |
|
|
|
|
|
missing_cols = [] |
|
for col in EXPECTED_COLS: |
|
if col not in list(jiggins_data.columns): |
|
missing_cols.append(col) |
|
if len(missing_cols) > 0: |
|
sys.exit(f"The CSV is missing column(s): {missing_cols}") |
|
|
|
|
|
download_images(jiggins_data, image_folder, log_filepath, error_log_filepath) |
|
|
|
|
|
checksum_path = csv_path.split(".")[0] + "_checksums.csv" |
|
get_checksums(image_folder, checksum_path) |
|
|
|
print(f"Images downloaded from {csv_path} to {image_folder}.") |
|
print(f"Checksums recorded in {checksum_path} and download logs are in {log_filepath} and {error_log_filepath}.") |
|
|
|
return |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|