|
import os |
|
import requests |
|
import time |
|
import zipfile |
|
import glob |
|
from hashlib import md5 |
|
import concurrent.futures |
|
|
|
base_url = "https://huggingface.co./datasets/imageomics/KABR/resolve/main/KABR" |
|
|
|
""" |
|
To extend the dataset, add additional animals and parts ranges to the list and dictionary below. |
|
""" |
|
|
|
animals = ["giraffes", "zebras_grevys", "zebras_plains"] |
|
|
|
animal_parts_range = { |
|
"giraffes": ("aa", "ad"), |
|
"zebras_grevys": ("aa", "am"), |
|
"zebras_plains": ("aa", "al"), |
|
} |
|
|
|
dataset_prefix = "dataset/image/" |
|
|
|
|
|
static_files = [ |
|
"README.txt", |
|
"annotation/classes.json", |
|
"annotation/distribution.xlsx", |
|
"annotation/train.csv", |
|
"annotation/val.csv", |
|
"configs/I3D.yaml", |
|
"configs/SLOWFAST.yaml", |
|
"configs/X3D.yaml", |
|
"dataset/image2video.py", |
|
"dataset/image2visual.py", |
|
] |
|
|
|
def generate_part_files(animal, start, end): |
|
start_a, start_b = ord(start[0]), ord(start[1]) |
|
end_a, end_b = ord(end[0]), ord(end[1]) |
|
return [ |
|
f"{dataset_prefix}{animal}_part_{chr(a)}{chr(b)}" |
|
for a in range(start_a, end_a + 1) |
|
for b in range(start_b, end_b + 1) |
|
] |
|
|
|
|
|
part_files = [ |
|
part |
|
for animal, (start, end) in animal_parts_range.items() |
|
for part in generate_part_files(animal, start, end) |
|
] |
|
|
|
archive_md5_files = [f"{dataset_prefix}{animal}_md5.txt" for animal in animals] |
|
|
|
files = static_files + archive_md5_files + part_files |
|
|
|
def progress_bar(iteration, total, message, bar_length=50): |
|
progress = (iteration / total) |
|
bar = '=' * int(round(progress * bar_length) - 1) |
|
spaces = ' ' * (bar_length - len(bar)) |
|
message = f'{message:<100}' |
|
print(f'[{bar + spaces}] {int(progress * 100)}% {message}', end='\r', flush=True) |
|
|
|
if iteration == total: |
|
print() |
|
|
|
|
|
save_dir = "KABR_files" |
|
|
|
|
|
|
|
print(f"Downloading the Kenyan Animal Behavior Recognition (KABR) dataset ...") |
|
|
|
total = len(files) |
|
for i, file_path in enumerate(files): |
|
|
|
save_path = os.path.join(save_dir, file_path) |
|
|
|
if os.path.exists(save_path): |
|
print(f"File {save_path} already exists. Skipping download.") |
|
continue |
|
|
|
full_url = f"{base_url}/{file_path}" |
|
|
|
|
|
os.makedirs(os.path.join(save_dir, os.path.dirname(file_path)), exist_ok=True) |
|
|
|
|
|
response = requests.get(full_url) |
|
with open(save_path, 'wb') as file: |
|
file.write(response.content) |
|
|
|
progress_bar(i+1, total, f"downloaded: {save_path}") |
|
|
|
print("Download of repository contents completed.") |
|
|
|
print(f"Concatenating split files into a full archive for {animals} ...") |
|
|
|
def concatenate_files(animal): |
|
print(f"Concatenating files for {animal} ...") |
|
part_files_pattern = f"{save_dir}/dataset/image/{animal}_part_*" |
|
part_files = sorted(glob.glob(part_files_pattern)) |
|
if part_files: |
|
with open(f"{save_dir}/dataset/image/{animal}.zip", 'wb') as f_out: |
|
for f_name in part_files: |
|
with open(f_name, 'rb') as f_in: |
|
f_out.write(f_in.read()) |
|
print(f"Archive for {animal} concatenated.") |
|
else: |
|
print(f"No part files found for {animal}.") |
|
|
|
with concurrent.futures.ThreadPoolExecutor() as executor: |
|
executor.map(concatenate_files, animals) |
|
|
|
def verify_and_extract(animal): |
|
print(f"Confirming data integrity for {animal}.zip ...") |
|
with open(f"{save_dir}/dataset/image/{animal}.zip", 'rb') as f: |
|
zip_md5 = md5(f.read()).hexdigest() |
|
|
|
with open(f"{save_dir}/dataset/image/{animal}_md5.txt", 'r') as file: |
|
expected_md5 = file.read().strip().split()[0] |
|
|
|
if zip_md5 == expected_md5: |
|
print(f"MD5 sum for {animal}.zip is correct.") |
|
|
|
print(f"Extracting {animal}.zip ...") |
|
with zipfile.ZipFile(f"{save_dir}/dataset/image/{animal}.zip", 'r') as zip_ref: |
|
zip_ref.extractall(f"{save_dir}/dataset/image/") |
|
print(f"{animal}.zip extracted.") |
|
print(f"Cleaning up for {animal} ...") |
|
os.remove(f"{save_dir}/dataset/image/{animal}.zip") |
|
os.remove(f"{save_dir}/dataset/image/{animal}_md5.txt") |
|
else: |
|
print(f"MD5 sum for {animal}.zip is incorrect. Expected: {expected_md5}, but got: {zip_md5}.") |
|
print("There may be data corruption. Please try to download and reconstruct the data again or reach out to the corresponding authors for assistance.") |
|
|
|
with concurrent.futures.ThreadPoolExecutor() as executor: |
|
executor.map(verify_and_extract, animals) |
|
|
|
print("Download and setup complete.") |