|
import datetime |
|
import gc |
|
import itertools |
|
import multiprocessing |
|
import pathlib |
|
import random |
|
from typing import Generator, Optional |
|
from urllib.parse import urlparse |
|
|
|
import natural.number |
|
import orjson |
|
import peewee |
|
import tqdm |
|
import typer |
|
from loguru import logger |
|
from loguru._logger import Logger |
|
from playhouse.sqlite_ext import JSONField, SqliteExtDatabase |
|
|
|
app = typer.Typer() |
|
|
|
GB = 2**30 |
|
|
|
logger.add("RedditThreader_{time}.log",rotation="10 MB",enqueue=True) |
|
|
|
|
|
def read_lines_jsonl(file_name, chunk_size=GB // 2): |
|
with open(file_name, "rb") as file_handle: |
|
buffer = b"" |
|
while True: |
|
chunk = file_handle.read(chunk_size) |
|
|
|
if not chunk: |
|
break |
|
lines = (buffer + chunk).split(b"\n") |
|
|
|
for line in lines[:-1]: |
|
yield line.strip() |
|
|
|
buffer = lines[-1] |
|
|
|
|
|
def grouper(n, iterable: Generator): |
|
""" |
|
>>> list(grouper(3, 'ABCDEFG')) |
|
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] |
|
""" |
|
return iter(lambda: list(itertools.islice(iterable, n)), []) |
|
|
|
|
|
def base36encode(number): |
|
if not isinstance(number, (int)): |
|
raise TypeError("number must be an integer") |
|
is_negative = number < 0 |
|
number = abs(number) |
|
|
|
alphabet, base36 = ["0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ", ""] |
|
|
|
while number: |
|
number, i = divmod(number, 36) |
|
base36 = alphabet[i] + base36 |
|
if is_negative: |
|
base36 = "-" + base36 |
|
|
|
return base36 or alphabet[0] |
|
|
|
|
|
|
|
def transform_ids(post_id: str | int): |
|
if isinstance(post_id, int): |
|
return base36encode(post_id).lower() |
|
return post_id |
|
|
|
|
|
SHN_NETLOC_REWRITE = { |
|
"x.com": "twitter.com", |
|
"v.redd.it": "video.reddit.com", |
|
"i.redd.it": "image.reddit.com", |
|
} |
|
|
|
|
|
RDT_SCORE = -4 |
|
|
|
|
|
|
|
RES_DEPTH = [50, 6] |
|
|
|
|
|
|
|
SHN_MIN_REPLIES = 5 |
|
SINGLE_COMMENT_MIN = 2500 |
|
|
|
FUZZY_SUBREDDIT = (5, 20) |
|
|
|
|
|
def flatten_thread(reply_thread: dict, working_list: list[dict]): |
|
|
|
working_list.append({k: v for k, v in reply_thread.items() if k != "children"}) |
|
if reply_thread["children"]: |
|
for sub_reply in reply_thread["children"]: |
|
working_list = flatten_thread(sub_reply, working_list) |
|
return working_list |
|
|
|
|
|
def try_get_netloc(url:str): |
|
try: |
|
return urlparse(url).netloc |
|
except Exception: |
|
return url |
|
|
|
|
|
def rethread_subreddit( |
|
db_path: pathlib.Path, |
|
submissions: pathlib.Path, |
|
comments: pathlib.Path, |
|
subreddit_file: pathlib.Path, |
|
global_logger: Optional[Logger] = None, |
|
hide_pbars: bool = False, |
|
wipe_db_afterdone: bool = True, |
|
): |
|
if global_logger: |
|
globals()["logger"] = global_logger |
|
if db_path.is_file(): |
|
db_path.unlink() |
|
db_sqlite = SqliteExtDatabase( |
|
str(db_path.resolve()), |
|
pragmas={"journal_mode": "off", "locking_mode": "exclusive", "synchronous": 0}, |
|
) |
|
|
|
class BaseModel(peewee.Model): |
|
class Meta: |
|
database = db_sqlite |
|
|
|
class SubComment(BaseModel): |
|
id = peewee.CharField(unique=True) |
|
thread_id = peewee.CharField(index=True) |
|
parent_id = peewee.CharField( |
|
index=True, |
|
) |
|
subreddit = peewee.CharField() |
|
is_sub = peewee.BooleanField() |
|
data = JSONField() |
|
|
|
SubComment.create_table() |
|
|
|
def jsonl_generator(file: pathlib.Path): |
|
for line in read_lines_jsonl(file, chunk_size=GB): |
|
yield orjson.loads(line) |
|
|
|
for batch in tqdm.tqdm( |
|
grouper(30_000, jsonl_generator(submissions)), |
|
desc="Submission Batches", |
|
disable=hide_pbars, |
|
): |
|
|
|
for sub in batch: |
|
sub["id"] = transform_ids(sub["id"]) |
|
batch = [ |
|
dict( |
|
id=f't3_{sub["id"]}', |
|
thread_id=f't3_{sub["id"]}', |
|
parent_id="", |
|
subreddit=sub["sub"]["name"], |
|
data=sub, |
|
is_sub=True, |
|
) |
|
for sub in batch |
|
] |
|
|
|
with db_sqlite.transaction(): |
|
SubComment.insert_many(batch).execute() |
|
|
|
del batch |
|
gc.collect() |
|
|
|
for batch in tqdm.tqdm( |
|
grouper(30_000, jsonl_generator(comments)), |
|
desc="Comment Batches", |
|
disable=hide_pbars, |
|
): |
|
|
|
for sub in batch: |
|
sub["id"] = transform_ids(sub["id"]) |
|
batch = [ |
|
dict( |
|
id=f't1_{sub["id"]}', |
|
thread_id=sub["thread_id"], |
|
parent_id=sub["parent_id"] if sub["parent_id"] else "", |
|
subreddit=sub["sub"]["name"], |
|
data=sub, |
|
is_sub=False, |
|
) |
|
for sub in batch |
|
] |
|
|
|
SubComment.insert_many(batch).on_conflict_replace().execute() |
|
del batch |
|
gc.collect() |
|
|
|
thread_query = ( |
|
SubComment.select(SubComment.thread_id, SubComment.data, SubComment.subreddit) |
|
.where(SubComment.is_sub == True) |
|
.distinct() |
|
) |
|
|
|
|
|
depth_defaults = [0, 0, 0, "", "", {}] |
|
|
|
thread_count = thread_query.count() |
|
logger.debug( |
|
f"Making Threads for /r/{db_path.stem} {thread_count} Threads found. Init pass for potential threads" |
|
) |
|
|
|
usable_threads = 0 |
|
for _, prethread_row in enumerate(db_sqlite.execute(thread_query)): |
|
|
|
comment_query = SubComment.select( |
|
SubComment.id, SubComment.parent_id, SubComment.data |
|
).where(SubComment.thread_id == prethread_row[0], SubComment.is_sub == False, SubComment.parent_id != "") |
|
|
|
pretotal_comments = comment_query.count() |
|
preroot_submission = orjson.loads(prethread_row[1]) |
|
|
|
if pretotal_comments >= SHN_MIN_REPLIES or ( |
|
preroot_submission["text"] |
|
and len(preroot_submission["text"]) > SINGLE_COMMENT_MIN |
|
): |
|
usable_threads += 1 |
|
|
|
|
|
fuzz_threads = random.randrange(FUZZY_SUBREDDIT[0], FUZZY_SUBREDDIT[1]) |
|
if usable_threads <= fuzz_threads: |
|
logger.debug( |
|
f"/r/{db_path.stem} has {usable_threads}, which is less than {fuzz_threads} (fuzzy {FUZZY_SUBREDDIT}) to be worth including. Skipping subreddit entirely..." |
|
) |
|
db_sqlite.close() |
|
if db_path.is_file(): |
|
db_path.unlink() |
|
return |
|
|
|
logger.debug( |
|
f"Init Search Done. Found {usable_threads} for /r/{db_path.stem}. Making threads..." |
|
) |
|
|
|
with open(subreddit_file, "wb") as subreddit_fp: |
|
for thread_idx, thread_row in enumerate(db_sqlite.execute(thread_query)): |
|
|
|
comment_query = SubComment.select( |
|
SubComment.id, SubComment.parent_id, SubComment.data |
|
).where(SubComment.thread_id == thread_row[0], SubComment.is_sub == False) |
|
|
|
total_comments = comment_query.count() |
|
root_submission = orjson.loads(thread_row[1]) |
|
|
|
depth_counter = {} |
|
|
|
if total_comments >= SHN_MIN_REPLIES or ( |
|
root_submission["text"] |
|
and len(root_submission["text"]) > SINGLE_COMMENT_MIN |
|
): |
|
pass |
|
else: |
|
continue |
|
|
|
|
|
for comment_id, _, comment_data in db_sqlite.execute(comment_query): |
|
comment_data = orjson.loads(comment_data) |
|
|
|
parent_depth_data = depth_counter.get( |
|
comment_data["parent_id"], depth_defaults |
|
) |
|
|
|
|
|
depth_data = [ |
|
parent_depth_data[0] + 1, |
|
parent_depth_data[1] + comment_data["score"], |
|
comment_data["score"], |
|
parent_depth_data[3], |
|
comment_data["parent_id"], |
|
comment_data, |
|
] |
|
if not depth_data[3]: |
|
if depth_data[2] <= RDT_SCORE: |
|
depth_data[3] = f"[Rdt] <{RDT_SCORE} Votes" |
|
elif total_comments > RES_DEPTH[0] and depth_data[0] > RES_DEPTH[1]: |
|
depth_data[3] = "[RES] TComment Thr" |
|
elif depth_data[1] < 0 and depth_data[2] != depth_data[3]: |
|
depth_data[3] = "[Shn] Accumulated Score" |
|
else: |
|
depth_data[3] = "Purged from Parent" |
|
|
|
depth_counter.setdefault( |
|
comment_id, |
|
depth_data, |
|
) |
|
|
|
|
|
comments_lookup = {} |
|
all_comments_data = [] |
|
|
|
for comment_id, parent_id, comment_data in tqdm.tqdm( |
|
db_sqlite.execute(comment_query), |
|
desc="Rewire query...", |
|
disable=hide_pbars, |
|
): |
|
|
|
comment_data = orjson.loads(comment_data) |
|
if depth_counter.get(comment_id, depth_defaults)[3]: |
|
continue |
|
comments_lookup[comment_id] = comment_data |
|
all_comments_data.append(comment_data) |
|
|
|
|
|
del depth_counter |
|
gc.collect() |
|
|
|
|
|
|
|
|
|
comments_lookup = { |
|
k: v |
|
for k, v in sorted( |
|
comments_lookup.items(), key=lambda item: int(item[1]["created"]) |
|
) |
|
} |
|
|
|
for comment in all_comments_data: |
|
comment["children"] = [] |
|
root_comments = [] |
|
for post in tqdm.tqdm( |
|
all_comments_data, desc="Make sorted", disable=hide_pbars |
|
): |
|
|
|
|
|
parent_id = post["parent_id"] |
|
if isinstance(parent_id, int) or isinstance(post["id"], int): |
|
continue |
|
subdebug = f"<https://reddit.com/r/{post['sub']['name']}/comments/{post['thread_id'][3:]}/a/{post['id']}>" |
|
if not isinstance(parent_id, str): |
|
|
|
continue |
|
if parent_id.startswith("t3_"): |
|
root_comments.append(post) |
|
else: |
|
if parent_id not in comments_lookup: |
|
if len(comments_lookup) < 10: |
|
logger.warning(comments_lookup) |
|
|
|
|
|
logger.warning( |
|
f"{parent_id} doesn't seem to exist for {subdebug}" |
|
) |
|
continue |
|
parent_post = comments_lookup[parent_id] |
|
|
|
|
|
parent_post["children"].append(post) |
|
|
|
del comments_lookup, all_comments_data |
|
gc.collect() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
root_comments = sorted(root_comments, key=lambda comment: comment["score"]) |
|
flatten_comments = [] |
|
for root_comment in root_comments: |
|
flatten_comments.extend(flatten_thread(root_comment, [])) |
|
flatten_comments.insert(0, root_submission) |
|
|
|
|
|
|
|
def to_namedconversation(): |
|
conversation = [] |
|
for comment in flatten_comments: |
|
time = datetime.datetime.fromtimestamp( |
|
int(comment["created"]), tz=datetime.UTC |
|
).strftime("%d %b %Y, %H:%m:%S") |
|
comment_fmt = { |
|
"sender": comment["author"]["name"] |
|
if comment["author"] |
|
else "[deleted]", |
|
"message": "", |
|
} |
|
if "title" in comment: |
|
text = f"[{time}] {comment['title']}\n\n" |
|
if "M" in comment["flags"]: |
|
text = "[R-18] " + text |
|
|
|
if "url" in comment and comment["url"]: |
|
netloc = try_get_netloc(comment["url"]) |
|
|
|
if not netloc.endswith(("www.reddit.com", "reddit.com")): |
|
netloc = SHN_NETLOC_REWRITE.get(netloc.lower(), netloc) |
|
text += f"Link: {netloc}\n\n" |
|
|
|
text = text.rstrip("\n") |
|
else: |
|
text = f"[{time}] " |
|
if "url" in comment and comment["url"]: |
|
netloc = try_get_netloc(comment["url"]) |
|
text += f"Link: {netloc}\n\n" |
|
added_text = False |
|
if "text" in comment and comment["text"]: |
|
text += f"{comment['text']}\n\n" |
|
added_text = True |
|
elif ( |
|
"text" in comment |
|
and not comment["text"] |
|
and comment_fmt["sender"].lower() |
|
in ["[removed]", "[deleted]"] |
|
): |
|
text += "[Deleted]\n\n" |
|
added_text = True |
|
else: |
|
text += "[No Comment]" |
|
logger.warning(f"Empty Text: {comment}") |
|
added_text = True |
|
|
|
if not added_text: |
|
logger.warning(f"Invalid comment data? {comment}") |
|
|
|
text = text.rstrip("\n") |
|
comment_fmt["message"] = text |
|
conversation.append(comment_fmt) |
|
return conversation |
|
|
|
thread_data = { |
|
"thread_id": thread_row[0], |
|
"subreddit": thread_row[2], |
|
"namedconversation": to_namedconversation(), |
|
"submission": root_submission, |
|
"comments": root_comments, |
|
} |
|
usable_threads += 1 |
|
subreddit_fp.write( |
|
orjson.dumps(thread_data, option=orjson.OPT_APPEND_NEWLINE) |
|
) |
|
|
|
if thread_idx % 1000 == 0 and thread_idx > 0: |
|
logger.debug( |
|
f"/r/{db_path.stem} Threading: {round((thread_idx/thread_count)*100,ndigits=2)}% ({natural.number.number(thread_count-thread_idx)} to go...) done." |
|
) |
|
logger.debug(f"/r/{db_path.stem} Threads: {100}% done.") |
|
if wipe_db_afterdone: |
|
try: |
|
db_sqlite.close() |
|
db_path.unlink() |
|
except Exception as e: |
|
logger.error(e) |
|
|
|
|
|
@app.command() |
|
def file( |
|
db_file: pathlib.Path, |
|
submission: pathlib.Path, |
|
comments: pathlib.Path, |
|
thread_output: pathlib.Path, |
|
): |
|
rethread_subreddit( |
|
db_file, submission, comments, thread_output, wipe_db_afterdone=False |
|
) |
|
|
|
|
|
def main_err_cb(err): |
|
logger.exception(err) |
|
|
|
|
|
@app.command() |
|
def folder( |
|
m700_folder: pathlib.Path, export_folder: pathlib.Path, subfilter_file: pathlib.Path |
|
): |
|
reddit_db_tmp = pathlib.Path(".reddit_tmp") |
|
if not reddit_db_tmp.is_dir(): |
|
reddit_db_tmp.mkdir(exist_ok=True, parents=True) |
|
with multiprocessing.Pool(processes=96) as pool: |
|
futures = [] |
|
selected_subs = set() |
|
with open(subfilter_file, "rb") as f: |
|
for line in f: |
|
selected_subs.add("_".join(orjson.loads(line)["file"].split("_")[:-1])) |
|
|
|
for sub in [i for i in m700_folder.iterdir() if i.stem.endswith("_Submission")]: |
|
root_sub = sub.with_stem(sub.stem[: -len("_Submission")]) |
|
comments = root_sub.with_stem(root_sub.stem + "_Comments") |
|
if sub.exists() and comments.exists(): |
|
if root_sub.stem in selected_subs: |
|
|
|
futures.append( |
|
pool.apply_async( |
|
rethread_subreddit, |
|
args=( |
|
reddit_db_tmp / f"{root_sub.stem}.sqlite.db", |
|
sub, |
|
comments, |
|
export_folder / f"{root_sub.stem}.jsonl", |
|
None, |
|
True, |
|
True, |
|
), |
|
error_callback=main_err_cb, |
|
) |
|
) |
|
else: |
|
pass |
|
|
|
|
|
|
|
logger.debug(f"Waiting for {len(futures)}") |
|
[i.wait() for i in futures] |
|
|
|
|
|
if __name__ == "__main__": |
|
app() |
|
|