import requests from bs4 import BeautifulSoup import re import json import pandas as pd import os import threading import concurrent.futures from datasets import load_dataset def download_genbank(genbank_file): if genbank_file: response = requests.get(genbank_file) if response.status_code == 200: return response.text return None # Clean text by removing excessive spaces and unwanted content def clean_text(text): return ' '.join(text.split()).strip().replace('(How to cite)', '') # Fetch and parse the webpage content def fetch_page_content(plasmid_id): url = f"https://www.addgene.org/{plasmid_id}/" response = requests.get(url) return BeautifulSoup(response.content, 'html.parser') # Generic function to extract data based on element class or id def extract_text(soup, identifier, tag='div', attr='class', fallback=None): element = soup.find(tag, {attr: identifier}) return clean_text(element.get_text(strip=True)) if element else fallback # Extract the flame status (high, medium, or low) based on class def extract_flame_status(soup): flame_container = soup.find('div', id='plasmid-flame-container') if flame_container: flame_tag = flame_container.find('span', class_='addgene-flame-with-popover') if flame_tag: if 'addgene-flame-high' in flame_tag['class']: return 'High' elif 'addgene-flame-medium' in flame_tag['class']: return 'Medium' elif 'addgene-flame-low' in flame_tag['class']: return 'Low' return None # Extract key-value pairs like Depositing Lab and Publication def extract_field_label_content(soup, label_text): label = soup.find('div', class_='field-label', string=label_text) if label: content = label.find_next('div', class_='field-content') return clean_text(content.get_text(strip=True)) if content else None return None def get_sequence(plasmid_id): sequence_url = f"https://www.addgene.org/{plasmid_id}/sequences/" response = requests.get(sequence_url) if response.status_code == 200: soup = BeautifulSoup(response.content, 'html.parser') sequence_type = None genbank_link = None # Priority order: Addgene full, Depositor full, Addgene partial, Depositor partial for section_id in ['addgene-full', 'depositor-full', 'addgene-partial', 'depositor-partial']: sequence_type = 'full' if 'full' in section_id else 'partial' section = soup.find('section', {'id': section_id}) if section: genbank_tag = section.find('a', {'class': 'genbank-file-download'}) if genbank_tag: genbank_link = genbank_tag['href'] break # Exit loop if sequence is found return genbank_link, sequence_type return None, None, None, None # Extract GenBank and SnapGene file links def extract_file_links(soup, plasmid_id): files = {'GenBank File': None} genbank_link = soup.find('a', class_='genbank-file-download') if genbank_link: files['GenBank File'] = genbank_link['href'] files['Sequence Type'] = 'full' else: genbank_link, sequence_type = get_sequence(plasmid_id) files['GenBank File'] = genbank_link files['Sequence Type'] = sequence_type # snapgene_link = soup.find('a', class_='snpagene-file-download') # if snapgene_link: # files['SnapGene File'] = snapgene_link['href'] return files # Extract content from sections based on

headers def extract_sections(soup): sections = {} for section_header in soup.find_all('h2'): section_title = clean_text(section_header.get_text(strip=True)) if section_title.startswith('Information for'): continue section_data = {} # Find the next