File size: 3,322 Bytes
abd4b4e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from bs4 import BeautifulSoup
import pandas as pd
import time

chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-dev-shm-usage")

driver = webdriver.Chrome(options=chrome_options)
driver.set_page_load_timeout(30)

driver.get("https://www.nomfoundation.org/nom-project/Ho-Xuan-Huong/Ho-Xuan-Huong-of-poems?uiLang=vn")

data = {"Poem_Title": [], "Line_Number": [], "Nom_Text": [], "Translation": []}

num_pages = 4

for page in range(num_pages):
    WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "pagination")))
    soup = BeautifulSoup(driver.page_source, 'html.parser')
    
    for poem_link in soup.select('table a'):
        title = poem_link.text.split(" - ")[0].strip()
        link = "https://www.nomfoundation.org" + poem_link['href']

        try:
            driver.get(link)
        except TimeoutException:
            print(f"Timeout khi truy cập: {link}")
            continue

        poem_soup = BeautifulSoup(driver.page_source, 'html.parser')

        nom_text_element = poem_soup.select_one('td.hnTextBodyGray')
        translation_text_element = poem_soup.select_one('td.alt2')

        if nom_text_element and translation_text_element:
            nom_lines = nom_text_element.decode_contents().replace('<br>', '<br/>').split('<br/>')
            nom_lines = [BeautifulSoup(line, 'html.parser').get_text(strip=True) for line in nom_lines if line.strip()]

            translation_lines = translation_text_element.decode_contents().replace('<br>', '<br/>').split('<br/>')
            translation_lines = [BeautifulSoup(line, 'html.parser').get_text(strip=True) for line in translation_lines if line.strip()]

            for line_number, (nom_line, translation_line) in enumerate(zip(nom_lines, translation_lines), start=1):
                nom_line = BeautifulSoup(nom_line, 'html.parser').get_text(strip=True)
                data["Poem_Title"].append(title)
                data["Line_Number"].append(line_number)
                data["Nom_Text"].append(nom_line)
                data["Translation"].append(translation_line)
                print(f"Đã ghi vào: {title} - Dòng {line_number}")

        driver.back()
        time.sleep(1)

    pagination = driver.find_element(By.CLASS_NAME, "pagination")
    page_links = pagination.find_elements(By.TAG_NAME, "a")

    if page + 1 < len(page_links):
        next_page_button = page_links[page + 1]
        next_page_button.click()
        time.sleep(2)
    else:
        print("Không có trang tiếp theo.")
        break

driver.quit()

df = pd.DataFrame(data)
df_cleaned = df.dropna()
df_cleaned.to_csv("ho_xuan_huong_poems_full.csv", index=False, encoding="utf-8-sig")
print("Dữ liệu đã được lưu thành công vào file ho_xuan_huong_poems_full.csv!")