Datasets:
update load script
Browse files- README.md +4 -4
- analytics.ipynb +0 -0
- index.py +7 -5
README.md
CHANGED
@@ -28,10 +28,10 @@ dataset_info:
|
|
28 |
dtype: string
|
29 |
splits:
|
30 |
- name: train
|
31 |
-
num_bytes:
|
32 |
-
num_examples:
|
33 |
-
download_size:
|
34 |
-
dataset_size:
|
35 |
---
|
36 |
|
37 |
# AI/Tech Dataset
|
|
|
28 |
dtype: string
|
29 |
splits:
|
30 |
- name: train
|
31 |
+
num_bytes: 180820047
|
32 |
+
num_examples: 17092
|
33 |
+
download_size: 81702923
|
34 |
+
dataset_size: 180820047
|
35 |
---
|
36 |
|
37 |
# AI/Tech Dataset
|
analytics.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
index.py
CHANGED
@@ -21,14 +21,18 @@ def index_pages():
|
|
21 |
"""
|
22 |
docID = 0
|
23 |
|
24 |
-
with open("
|
25 |
writer = csv.writer(csv_file)
|
26 |
writer.writerow(["id", "year", "title", "url", "text"])
|
27 |
while True:
|
28 |
try:
|
29 |
-
with open(f"../log/{docID}", "r") as meta, open(f"../log/{docID}.txt", "r") as data:
|
30 |
title = meta.readline().strip()
|
31 |
year = meta.readline().strip()
|
|
|
|
|
|
|
|
|
32 |
url = meta.readline().strip()
|
33 |
text = data.read()
|
34 |
|
@@ -36,13 +40,12 @@ def index_pages():
|
|
36 |
data.close()
|
37 |
|
38 |
if year and 2000 <= int(year) <= 2023:
|
39 |
-
|
40 |
print(f"Indexing: {docID}")
|
41 |
writer.writerow([docID, year, f'"{title}"', f'"{url}"', f'"{text}"'])
|
42 |
-
# urls.write(f"{url}\n")
|
43 |
docID += 1
|
44 |
except Exception as e:
|
45 |
print(e)
|
|
|
46 |
break
|
47 |
|
48 |
print("Done.")
|
@@ -81,7 +84,6 @@ def categorize():
|
|
81 |
f.write(f"{title}\n{year}\n{url}\n\n{text}")
|
82 |
f.close()
|
83 |
years[year] += 1
|
84 |
-
|
85 |
docID += 1
|
86 |
|
87 |
except:
|
|
|
21 |
"""
|
22 |
docID = 0
|
23 |
|
24 |
+
with open("raw2.csv", "w") as csv_file:
|
25 |
writer = csv.writer(csv_file)
|
26 |
writer.writerow(["id", "year", "title", "url", "text"])
|
27 |
while True:
|
28 |
try:
|
29 |
+
with open(f"../data/log/{docID}", "r") as meta, open(f"../data/log/{docID}.txt", "r") as data:
|
30 |
title = meta.readline().strip()
|
31 |
year = meta.readline().strip()
|
32 |
+
# if year starts with '|', append to title and read next line
|
33 |
+
if year.startswith("|"):
|
34 |
+
title += " " + year
|
35 |
+
year = meta.readline().strip()
|
36 |
url = meta.readline().strip()
|
37 |
text = data.read()
|
38 |
|
|
|
40 |
data.close()
|
41 |
|
42 |
if year and 2000 <= int(year) <= 2023:
|
|
|
43 |
print(f"Indexing: {docID}")
|
44 |
writer.writerow([docID, year, f'"{title}"', f'"{url}"', f'"{text}"'])
|
|
|
45 |
docID += 1
|
46 |
except Exception as e:
|
47 |
print(e)
|
48 |
+
print(f"{docID = }")
|
49 |
break
|
50 |
|
51 |
print("Done.")
|
|
|
84 |
f.write(f"{title}\n{year}\n{url}\n\n{text}")
|
85 |
f.close()
|
86 |
years[year] += 1
|
|
|
87 |
docID += 1
|
88 |
|
89 |
except:
|