Upload Scripts/RedditThreader.py with huggingface_hub
Browse files- Scripts/RedditThreader.py +516 -0
Scripts/RedditThreader.py
ADDED
@@ -0,0 +1,516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
import gc
|
3 |
+
import itertools
|
4 |
+
import multiprocessing
|
5 |
+
import pathlib
|
6 |
+
import random
|
7 |
+
from typing import Generator, Optional
|
8 |
+
from urllib.parse import urlparse
|
9 |
+
|
10 |
+
import natural.number
|
11 |
+
import orjson
|
12 |
+
import peewee
|
13 |
+
import tqdm
|
14 |
+
import typer
|
15 |
+
from loguru import logger
|
16 |
+
from loguru._logger import Logger
|
17 |
+
from playhouse.sqlite_ext import JSONField, SqliteExtDatabase
|
18 |
+
|
19 |
+
app = typer.Typer()
|
20 |
+
|
21 |
+
GB = 2**30
|
22 |
+
|
23 |
+
logger.add("RedditThreader_{time}.log",rotation="10 MB",enqueue=True)
|
24 |
+
|
25 |
+
|
26 |
+
def read_lines_jsonl(file_name, chunk_size=GB // 2):
|
27 |
+
with open(file_name, "rb") as file_handle:
|
28 |
+
buffer = b""
|
29 |
+
while True:
|
30 |
+
chunk = file_handle.read(chunk_size)
|
31 |
+
|
32 |
+
if not chunk:
|
33 |
+
break
|
34 |
+
lines = (buffer + chunk).split(b"\n")
|
35 |
+
|
36 |
+
for line in lines[:-1]:
|
37 |
+
yield line.strip()
|
38 |
+
|
39 |
+
buffer = lines[-1]
|
40 |
+
|
41 |
+
|
42 |
+
def grouper(n, iterable: Generator):
|
43 |
+
"""
|
44 |
+
>>> list(grouper(3, 'ABCDEFG'))
|
45 |
+
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
|
46 |
+
"""
|
47 |
+
return iter(lambda: list(itertools.islice(iterable, n)), [])
|
48 |
+
|
49 |
+
|
50 |
+
def base36encode(number):
|
51 |
+
if not isinstance(number, (int)):
|
52 |
+
raise TypeError("number must be an integer")
|
53 |
+
is_negative = number < 0
|
54 |
+
number = abs(number)
|
55 |
+
|
56 |
+
alphabet, base36 = ["0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ", ""]
|
57 |
+
|
58 |
+
while number:
|
59 |
+
number, i = divmod(number, 36)
|
60 |
+
base36 = alphabet[i] + base36
|
61 |
+
if is_negative:
|
62 |
+
base36 = "-" + base36
|
63 |
+
|
64 |
+
return base36 or alphabet[0]
|
65 |
+
|
66 |
+
|
67 |
+
# Sometimes pushshift might send int ids. Fix for those.
|
68 |
+
def transform_ids(post_id: str | int):
|
69 |
+
if isinstance(post_id, int):
|
70 |
+
return base36encode(post_id).lower()
|
71 |
+
return post_id
|
72 |
+
|
73 |
+
|
74 |
+
SHN_NETLOC_REWRITE = {
|
75 |
+
"x.com": "twitter.com",
|
76 |
+
"v.redd.it": "video.reddit.com",
|
77 |
+
"i.redd.it": "image.reddit.com",
|
78 |
+
}
|
79 |
+
|
80 |
+
# Default Reddit filter for comment threads with < -4 upvotes
|
81 |
+
RDT_SCORE = -4
|
82 |
+
# RES: Custom Comment Depth
|
83 |
+
# 50 Comments: Minimum to activate this feature.
|
84 |
+
# 6: Any comment more than depth 6 is purged
|
85 |
+
RES_DEPTH = [50, 6]
|
86 |
+
# Minimum No. of Comments to consider adding a thread:
|
87 |
+
# - We have at least 5 comments OR
|
88 |
+
# - The root submission's text is more than 2500 characters. (Probably worth fetching)
|
89 |
+
SHN_MIN_REPLIES = 5
|
90 |
+
SINGLE_COMMENT_MIN = 2500
|
91 |
+
# fuzzy selection to prune stuff arund this range.
|
92 |
+
FUZZY_SUBREDDIT = (5, 20)
|
93 |
+
|
94 |
+
|
95 |
+
def flatten_thread(reply_thread: dict, working_list: list[dict]):
|
96 |
+
# Add the current reply to the list
|
97 |
+
working_list.append({k: v for k, v in reply_thread.items() if k != "children"})
|
98 |
+
if reply_thread["children"]:
|
99 |
+
for sub_reply in reply_thread["children"]:
|
100 |
+
working_list = flatten_thread(sub_reply, working_list)
|
101 |
+
return working_list
|
102 |
+
|
103 |
+
|
104 |
+
def try_get_netloc(url:str):
|
105 |
+
try:
|
106 |
+
return urlparse(url).netloc
|
107 |
+
except Exception:
|
108 |
+
return url
|
109 |
+
|
110 |
+
|
111 |
+
def rethread_subreddit(
|
112 |
+
db_path: pathlib.Path,
|
113 |
+
submissions: pathlib.Path,
|
114 |
+
comments: pathlib.Path,
|
115 |
+
subreddit_file: pathlib.Path,
|
116 |
+
global_logger: Optional[Logger] = None,
|
117 |
+
hide_pbars: bool = False,
|
118 |
+
wipe_db_afterdone: bool = True,
|
119 |
+
):
|
120 |
+
if global_logger:
|
121 |
+
globals()["logger"] = global_logger
|
122 |
+
if db_path.is_file():
|
123 |
+
db_path.unlink()
|
124 |
+
db_sqlite = SqliteExtDatabase(
|
125 |
+
str(db_path.resolve()),
|
126 |
+
pragmas={"journal_mode": "off", "locking_mode": "exclusive", "synchronous": 0},
|
127 |
+
)
|
128 |
+
|
129 |
+
class BaseModel(peewee.Model):
|
130 |
+
class Meta:
|
131 |
+
database = db_sqlite
|
132 |
+
|
133 |
+
class SubComment(BaseModel):
|
134 |
+
id = peewee.CharField(unique=True)
|
135 |
+
thread_id = peewee.CharField(index=True)
|
136 |
+
parent_id = peewee.CharField(
|
137 |
+
index=True,
|
138 |
+
)
|
139 |
+
subreddit = peewee.CharField()
|
140 |
+
is_sub = peewee.BooleanField()
|
141 |
+
data = JSONField()
|
142 |
+
|
143 |
+
SubComment.create_table()
|
144 |
+
|
145 |
+
def jsonl_generator(file: pathlib.Path):
|
146 |
+
for line in read_lines_jsonl(file, chunk_size=GB):
|
147 |
+
yield orjson.loads(line)
|
148 |
+
|
149 |
+
for batch in tqdm.tqdm(
|
150 |
+
grouper(30_000, jsonl_generator(submissions)),
|
151 |
+
desc="Submission Batches",
|
152 |
+
disable=hide_pbars,
|
153 |
+
):
|
154 |
+
# fixup for ids
|
155 |
+
for sub in batch:
|
156 |
+
sub["id"] = transform_ids(sub["id"])
|
157 |
+
batch = [
|
158 |
+
dict(
|
159 |
+
id=f't3_{sub["id"]}',
|
160 |
+
thread_id=f't3_{sub["id"]}',
|
161 |
+
parent_id="",
|
162 |
+
subreddit=sub["sub"]["name"],
|
163 |
+
data=sub,
|
164 |
+
is_sub=True,
|
165 |
+
)
|
166 |
+
for sub in batch
|
167 |
+
]
|
168 |
+
# print(len(batch))
|
169 |
+
with db_sqlite.transaction():
|
170 |
+
SubComment.insert_many(batch).execute()
|
171 |
+
# print(r)
|
172 |
+
del batch
|
173 |
+
gc.collect()
|
174 |
+
|
175 |
+
for batch in tqdm.tqdm(
|
176 |
+
grouper(30_000, jsonl_generator(comments)),
|
177 |
+
desc="Comment Batches",
|
178 |
+
disable=hide_pbars,
|
179 |
+
):
|
180 |
+
# fixup for ids
|
181 |
+
for sub in batch:
|
182 |
+
sub["id"] = transform_ids(sub["id"])
|
183 |
+
batch = [
|
184 |
+
dict(
|
185 |
+
id=f't1_{sub["id"]}',
|
186 |
+
thread_id=sub["thread_id"],
|
187 |
+
parent_id=sub["parent_id"] if sub["parent_id"] else "",
|
188 |
+
subreddit=sub["sub"]["name"],
|
189 |
+
data=sub,
|
190 |
+
is_sub=False,
|
191 |
+
)
|
192 |
+
for sub in batch
|
193 |
+
]
|
194 |
+
# print(batch)
|
195 |
+
SubComment.insert_many(batch).on_conflict_replace().execute()
|
196 |
+
del batch
|
197 |
+
gc.collect()
|
198 |
+
|
199 |
+
thread_query = (
|
200 |
+
SubComment.select(SubComment.thread_id, SubComment.data, SubComment.subreddit)
|
201 |
+
.where(SubComment.is_sub == True)
|
202 |
+
.distinct()
|
203 |
+
)
|
204 |
+
|
205 |
+
# Default Reddit filter for comment threads with < -4 upvotes
|
206 |
+
depth_defaults = [0, 0, 0, "", "", {}]
|
207 |
+
|
208 |
+
thread_count = thread_query.count()
|
209 |
+
logger.debug(
|
210 |
+
f"Making Threads for /r/{db_path.stem} {thread_count} Threads found. Init pass for potential threads"
|
211 |
+
)
|
212 |
+
# Inital pass
|
213 |
+
usable_threads = 0
|
214 |
+
for _, prethread_row in enumerate(db_sqlite.execute(thread_query)):
|
215 |
+
# Get comment counts
|
216 |
+
comment_query = SubComment.select(
|
217 |
+
SubComment.id, SubComment.parent_id, SubComment.data
|
218 |
+
).where(SubComment.thread_id == prethread_row[0], SubComment.is_sub == False, SubComment.parent_id != "")
|
219 |
+
# Count number of comments.
|
220 |
+
pretotal_comments = comment_query.count()
|
221 |
+
preroot_submission = orjson.loads(prethread_row[1])
|
222 |
+
|
223 |
+
if pretotal_comments >= SHN_MIN_REPLIES or (
|
224 |
+
preroot_submission["text"]
|
225 |
+
and len(preroot_submission["text"]) > SINGLE_COMMENT_MIN
|
226 |
+
):
|
227 |
+
usable_threads += 1
|
228 |
+
|
229 |
+
# Check for subreddit inclusion
|
230 |
+
fuzz_threads = random.randrange(FUZZY_SUBREDDIT[0], FUZZY_SUBREDDIT[1])
|
231 |
+
if usable_threads <= fuzz_threads:
|
232 |
+
logger.debug(
|
233 |
+
f"/r/{db_path.stem} has {usable_threads}, which is less than {fuzz_threads} (fuzzy {FUZZY_SUBREDDIT}) to be worth including. Skipping subreddit entirely..."
|
234 |
+
)
|
235 |
+
db_sqlite.close()
|
236 |
+
if db_path.is_file():
|
237 |
+
db_path.unlink()
|
238 |
+
return
|
239 |
+
|
240 |
+
logger.debug(
|
241 |
+
f"Init Search Done. Found {usable_threads} for /r/{db_path.stem}. Making threads..."
|
242 |
+
)
|
243 |
+
|
244 |
+
with open(subreddit_file, "wb") as subreddit_fp:
|
245 |
+
for thread_idx, thread_row in enumerate(db_sqlite.execute(thread_query)):
|
246 |
+
# Get comment counts
|
247 |
+
comment_query = SubComment.select(
|
248 |
+
SubComment.id, SubComment.parent_id, SubComment.data
|
249 |
+
).where(SubComment.thread_id == thread_row[0], SubComment.is_sub == False)
|
250 |
+
# Count number of comments.
|
251 |
+
total_comments = comment_query.count()
|
252 |
+
root_submission = orjson.loads(thread_row[1])
|
253 |
+
# logger.debug("Compute Depth Stats")
|
254 |
+
depth_counter = {}
|
255 |
+
|
256 |
+
if total_comments >= SHN_MIN_REPLIES or (
|
257 |
+
root_submission["text"]
|
258 |
+
and len(root_submission["text"]) > SINGLE_COMMENT_MIN
|
259 |
+
):
|
260 |
+
pass
|
261 |
+
else:
|
262 |
+
continue
|
263 |
+
|
264 |
+
# Compute depth mapping
|
265 |
+
for comment_id, _, comment_data in db_sqlite.execute(comment_query):
|
266 |
+
comment_data = orjson.loads(comment_data)
|
267 |
+
|
268 |
+
parent_depth_data = depth_counter.get(
|
269 |
+
comment_data["parent_id"], depth_defaults
|
270 |
+
)
|
271 |
+
|
272 |
+
# There is probably a better way to do this, but whatever lol.
|
273 |
+
depth_data = [
|
274 |
+
parent_depth_data[0] + 1,
|
275 |
+
parent_depth_data[1] + comment_data["score"],
|
276 |
+
comment_data["score"],
|
277 |
+
parent_depth_data[3],
|
278 |
+
comment_data["parent_id"],
|
279 |
+
comment_data,
|
280 |
+
]
|
281 |
+
if not depth_data[3]:
|
282 |
+
if depth_data[2] <= RDT_SCORE:
|
283 |
+
depth_data[3] = f"[Rdt] <{RDT_SCORE} Votes"
|
284 |
+
elif total_comments > RES_DEPTH[0] and depth_data[0] > RES_DEPTH[1]:
|
285 |
+
depth_data[3] = "[RES] TComment Thr"
|
286 |
+
elif depth_data[1] < 0 and depth_data[2] != depth_data[3]:
|
287 |
+
depth_data[3] = "[Shn] Accumulated Score"
|
288 |
+
else:
|
289 |
+
depth_data[3] = "Purged from Parent"
|
290 |
+
|
291 |
+
depth_counter.setdefault(
|
292 |
+
comment_id,
|
293 |
+
depth_data,
|
294 |
+
)
|
295 |
+
|
296 |
+
# thread_file.write_bytes(orjson.dumps(depth_counter, option=orjson.OPT_INDENT_2))
|
297 |
+
comments_lookup = {}
|
298 |
+
all_comments_data = []
|
299 |
+
|
300 |
+
for comment_id, parent_id, comment_data in tqdm.tqdm(
|
301 |
+
db_sqlite.execute(comment_query),
|
302 |
+
desc="Rewire query...",
|
303 |
+
disable=hide_pbars,
|
304 |
+
):
|
305 |
+
# Yes we do a 2nd json load but it's fast.
|
306 |
+
comment_data = orjson.loads(comment_data)
|
307 |
+
if depth_counter.get(comment_id, depth_defaults)[3]:
|
308 |
+
continue
|
309 |
+
comments_lookup[comment_id] = comment_data
|
310 |
+
all_comments_data.append(comment_data)
|
311 |
+
|
312 |
+
# Mark as "Purgable". We don't use it anymore here
|
313 |
+
del depth_counter
|
314 |
+
gc.collect()
|
315 |
+
|
316 |
+
# A bit of code was from chatgpt but I have to rewrite a bunch of it anyway
|
317 |
+
|
318 |
+
# As all comments should have have a reply to "Something", it's a safe assumption to sort it by creation time.
|
319 |
+
comments_lookup = {
|
320 |
+
k: v
|
321 |
+
for k, v in sorted(
|
322 |
+
comments_lookup.items(), key=lambda item: int(item[1]["created"])
|
323 |
+
)
|
324 |
+
}
|
325 |
+
|
326 |
+
for comment in all_comments_data:
|
327 |
+
comment["children"] = []
|
328 |
+
root_comments = []
|
329 |
+
for post in tqdm.tqdm(
|
330 |
+
all_comments_data, desc="Make sorted", disable=hide_pbars
|
331 |
+
):
|
332 |
+
# parent_id or id's can be int's.
|
333 |
+
# We drop all int's since we now do resolve all int's before hand.
|
334 |
+
parent_id = post["parent_id"]
|
335 |
+
if isinstance(parent_id, int) or isinstance(post["id"], int):
|
336 |
+
continue
|
337 |
+
subdebug = f"<https://reddit.com/r/{post['sub']['name']}/comments/{post['thread_id'][3:]}/a/{post['id']}>"
|
338 |
+
if not isinstance(parent_id, str):
|
339 |
+
# logger.warning(f"{parent_id} is not a valid string. {subdebug}")
|
340 |
+
continue
|
341 |
+
if parent_id.startswith("t3_"):
|
342 |
+
root_comments.append(post)
|
343 |
+
else:
|
344 |
+
if parent_id not in comments_lookup:
|
345 |
+
if len(comments_lookup) < 10:
|
346 |
+
logger.warning(comments_lookup)
|
347 |
+
# This *Should* not happen but if it does then we just warn and skip it.
|
348 |
+
# In practice, it does happen but it's kinda uncommon.
|
349 |
+
logger.warning(
|
350 |
+
f"{parent_id} doesn't seem to exist for {subdebug}"
|
351 |
+
)
|
352 |
+
continue
|
353 |
+
parent_post = comments_lookup[parent_id]
|
354 |
+
# I still have no idea how does this work.
|
355 |
+
# It *works* though. Though internally probably some pointer magic.
|
356 |
+
parent_post["children"].append(post)
|
357 |
+
# Again, we clear up 2 unused variables.
|
358 |
+
del comments_lookup, all_comments_data
|
359 |
+
gc.collect()
|
360 |
+
|
361 |
+
# After depth sorting, we reflatten it into a list.
|
362 |
+
|
363 |
+
# Sort roots by parent main score.
|
364 |
+
# This sorts it based on "Top".
|
365 |
+
# Reddit stopped exposing downvotes to public so we can't replicate "Best"
|
366 |
+
# Else I would have just used "Best"
|
367 |
+
root_comments = sorted(root_comments, key=lambda comment: comment["score"])
|
368 |
+
flatten_comments = []
|
369 |
+
for root_comment in root_comments:
|
370 |
+
flatten_comments.extend(flatten_thread(root_comment, []))
|
371 |
+
flatten_comments.insert(0, root_submission)
|
372 |
+
|
373 |
+
# Conversion to namedconversation.
|
374 |
+
|
375 |
+
def to_namedconversation():
|
376 |
+
conversation = []
|
377 |
+
for comment in flatten_comments:
|
378 |
+
time = datetime.datetime.fromtimestamp(
|
379 |
+
int(comment["created"]), tz=datetime.UTC
|
380 |
+
).strftime("%d %b %Y, %H:%m:%S")
|
381 |
+
comment_fmt = {
|
382 |
+
"sender": comment["author"]["name"]
|
383 |
+
if comment["author"]
|
384 |
+
else "[deleted]",
|
385 |
+
"message": "",
|
386 |
+
}
|
387 |
+
if "title" in comment:
|
388 |
+
text = f"[{time}] {comment['title']}\n\n"
|
389 |
+
if "M" in comment["flags"]:
|
390 |
+
text = "[R-18] " + text
|
391 |
+
|
392 |
+
if "url" in comment and comment["url"]:
|
393 |
+
netloc = try_get_netloc(comment["url"])
|
394 |
+
|
395 |
+
if not netloc.endswith(("www.reddit.com", "reddit.com")):
|
396 |
+
netloc = SHN_NETLOC_REWRITE.get(netloc.lower(), netloc)
|
397 |
+
text += f"Link: {netloc}\n\n"
|
398 |
+
|
399 |
+
text = text.rstrip("\n")
|
400 |
+
else:
|
401 |
+
text = f"[{time}] "
|
402 |
+
if "url" in comment and comment["url"]:
|
403 |
+
netloc = try_get_netloc(comment["url"])
|
404 |
+
text += f"Link: {netloc}\n\n"
|
405 |
+
added_text = False
|
406 |
+
if "text" in comment and comment["text"]:
|
407 |
+
text += f"{comment['text']}\n\n"
|
408 |
+
added_text = True
|
409 |
+
elif (
|
410 |
+
"text" in comment
|
411 |
+
and not comment["text"]
|
412 |
+
and comment_fmt["sender"].lower()
|
413 |
+
in ["[removed]", "[deleted]"]
|
414 |
+
):
|
415 |
+
text += "[Deleted]\n\n"
|
416 |
+
added_text = True
|
417 |
+
else:
|
418 |
+
text += "[No Comment]"
|
419 |
+
logger.warning(f"Empty Text: {comment}")
|
420 |
+
added_text = True
|
421 |
+
|
422 |
+
if not added_text:
|
423 |
+
logger.warning(f"Invalid comment data? {comment}")
|
424 |
+
|
425 |
+
text = text.rstrip("\n")
|
426 |
+
comment_fmt["message"] = text
|
427 |
+
conversation.append(comment_fmt)
|
428 |
+
return conversation
|
429 |
+
|
430 |
+
thread_data = {
|
431 |
+
"thread_id": thread_row[0],
|
432 |
+
"subreddit": thread_row[2],
|
433 |
+
"namedconversation": to_namedconversation(),
|
434 |
+
"submission": root_submission,
|
435 |
+
"comments": root_comments,
|
436 |
+
}
|
437 |
+
usable_threads += 1
|
438 |
+
subreddit_fp.write(
|
439 |
+
orjson.dumps(thread_data, option=orjson.OPT_APPEND_NEWLINE)
|
440 |
+
)
|
441 |
+
|
442 |
+
if thread_idx % 1000 == 0 and thread_idx > 0:
|
443 |
+
logger.debug(
|
444 |
+
f"/r/{db_path.stem} Threading: {round((thread_idx/thread_count)*100,ndigits=2)}% ({natural.number.number(thread_count-thread_idx)} to go...) done."
|
445 |
+
)
|
446 |
+
logger.debug(f"/r/{db_path.stem} Threads: {100}% done.")
|
447 |
+
if wipe_db_afterdone:
|
448 |
+
try:
|
449 |
+
db_sqlite.close()
|
450 |
+
db_path.unlink()
|
451 |
+
except Exception as e:
|
452 |
+
logger.error(e)
|
453 |
+
|
454 |
+
|
455 |
+
@app.command()
|
456 |
+
def file(
|
457 |
+
db_file: pathlib.Path,
|
458 |
+
submission: pathlib.Path,
|
459 |
+
comments: pathlib.Path,
|
460 |
+
thread_output: pathlib.Path,
|
461 |
+
):
|
462 |
+
rethread_subreddit(
|
463 |
+
db_file, submission, comments, thread_output, wipe_db_afterdone=False
|
464 |
+
)
|
465 |
+
|
466 |
+
|
467 |
+
def main_err_cb(err):
|
468 |
+
logger.exception(err)
|
469 |
+
|
470 |
+
|
471 |
+
@app.command()
|
472 |
+
def folder(
|
473 |
+
m700_folder: pathlib.Path, export_folder: pathlib.Path, subfilter_file: pathlib.Path
|
474 |
+
):
|
475 |
+
reddit_db_tmp = pathlib.Path(".reddit_tmp")
|
476 |
+
if not reddit_db_tmp.is_dir():
|
477 |
+
reddit_db_tmp.mkdir(exist_ok=True, parents=True)
|
478 |
+
with multiprocessing.Pool(processes=96) as pool:
|
479 |
+
futures = []
|
480 |
+
selected_subs = set()
|
481 |
+
with open(subfilter_file, "rb") as f:
|
482 |
+
for line in f:
|
483 |
+
selected_subs.add("_".join(orjson.loads(line)["file"].split("_")[:-1]))
|
484 |
+
|
485 |
+
for sub in [i for i in m700_folder.iterdir() if i.stem.endswith("_Submission")]:
|
486 |
+
root_sub = sub.with_stem(sub.stem[: -len("_Submission")])
|
487 |
+
comments = root_sub.with_stem(root_sub.stem + "_Comments")
|
488 |
+
if sub.exists() and comments.exists():
|
489 |
+
if root_sub.stem in selected_subs:
|
490 |
+
# logger.debug(f"Subreddit: /r/{root_sub} was selected.")
|
491 |
+
futures.append(
|
492 |
+
pool.apply_async(
|
493 |
+
rethread_subreddit,
|
494 |
+
args=(
|
495 |
+
reddit_db_tmp / f"{root_sub.stem}.sqlite.db",
|
496 |
+
sub,
|
497 |
+
comments,
|
498 |
+
export_folder / f"{root_sub.stem}.jsonl",
|
499 |
+
None,
|
500 |
+
True,
|
501 |
+
True,
|
502 |
+
),
|
503 |
+
error_callback=main_err_cb,
|
504 |
+
)
|
505 |
+
)
|
506 |
+
else:
|
507 |
+
pass
|
508 |
+
# logger.warning(f"Mismatched: {sub} {comments}")
|
509 |
+
# sub.unlink() if sub.exists() else None
|
510 |
+
# comments.unlink() if comments.exists() else None
|
511 |
+
logger.debug(f"Waiting for {len(futures)}")
|
512 |
+
[i.wait() for i in futures]
|
513 |
+
|
514 |
+
|
515 |
+
if __name__ == "__main__":
|
516 |
+
app()
|