Datasets:
re-add indexer
Browse files- .gitignore +0 -1
- index.py +94 -0
.gitignore
CHANGED
@@ -5,5 +5,4 @@ all
|
|
5 |
dictionary
|
6 |
*.txt
|
7 |
trash
|
8 |
-
*.py
|
9 |
urls
|
|
|
5 |
dictionary
|
6 |
*.txt
|
7 |
trash
|
|
|
8 |
urls
|
index.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
"""
|
5 |
+
Simple script to generate metadata about corpus.
|
6 |
+
"""
|
7 |
+
|
8 |
+
__author__ = "Amittai Siavava"
|
9 |
+
__version__ = "0.0.1"
|
10 |
+
|
11 |
+
from os import mkdir
|
12 |
+
from collections import Counter
|
13 |
+
import csv
|
14 |
+
import pandas as pd
|
15 |
+
|
16 |
+
def index_pages():
|
17 |
+
"""
|
18 |
+
Generate a friendly index of the pages.
|
19 |
+
|
20 |
+
We create a csv and a tsv (in case one proves more convenient than the other).
|
21 |
+
"""
|
22 |
+
docID = 0
|
23 |
+
|
24 |
+
with open("raw.csv", "w") as csv_file, open("urls", "w") as urls:
|
25 |
+
writer = csv.writer(csv_file)
|
26 |
+
writer.writerow(["id", "year", "title", "url", "text"])
|
27 |
+
while True:
|
28 |
+
try:
|
29 |
+
with open(f"../log/{docID}", "r") as meta, open(f"../log/{docID}.txt", "r") as data:
|
30 |
+
title = meta.readline().strip()
|
31 |
+
year = meta.readline().strip()
|
32 |
+
url = meta.readline().strip()
|
33 |
+
text = data.read()
|
34 |
+
|
35 |
+
meta.close()
|
36 |
+
data.close()
|
37 |
+
|
38 |
+
if year and 2000 <= int(year) <= 2023:
|
39 |
+
|
40 |
+
print(f"Indexing: {docID}")
|
41 |
+
writer.writerow([docID, year, f'"{title}"', f'"{url}"', f'"{text}"'])
|
42 |
+
# urls.write(f"{url}\n")
|
43 |
+
docID += 1
|
44 |
+
except Exception as e:
|
45 |
+
print(e)
|
46 |
+
break
|
47 |
+
|
48 |
+
print("Done.")
|
49 |
+
print(f"okay...")
|
50 |
+
# save to file
|
51 |
+
|
52 |
+
# df = pd.read_csv("raw.csv")
|
53 |
+
print("Done.")
|
54 |
+
|
55 |
+
def categorize():
|
56 |
+
"""
|
57 |
+
Categorize the pages by year.
|
58 |
+
"""
|
59 |
+
|
60 |
+
docID = 0
|
61 |
+
years = Counter()
|
62 |
+
years_index = { str(year) for year in range(2000, 2024)}
|
63 |
+
while True:
|
64 |
+
try:
|
65 |
+
with open(f"../log/{docID}.txt", "r") as doc, open(f"../log/{docID}", "r") as meta:
|
66 |
+
title = meta.readline().strip()
|
67 |
+
year = meta.readline().strip()
|
68 |
+
url = meta.readline().strip()
|
69 |
+
text = doc.read()
|
70 |
+
doc.close()
|
71 |
+
meta.close()
|
72 |
+
|
73 |
+
if year in years_index:
|
74 |
+
try:
|
75 |
+
mkdir(f"../categorized/{year}")
|
76 |
+
except:
|
77 |
+
pass
|
78 |
+
|
79 |
+
id = years[year]
|
80 |
+
with open(f"../categorized/{year}/{id}.txt", "w") as f:
|
81 |
+
f.write(f"{title}\n{year}\n{url}\n\n{text}")
|
82 |
+
f.close()
|
83 |
+
years[year] += 1
|
84 |
+
|
85 |
+
docID += 1
|
86 |
+
|
87 |
+
except:
|
88 |
+
break
|
89 |
+
|
90 |
+
|
91 |
+
if __name__ == "__main__":
|
92 |
+
index_pages()
|
93 |
+
# categorize()
|
94 |
+
# load_data()
|