Datasets:
Tasks:
Token Classification
Languages:
English
Size:
10K<n<100K
Tags:
Not-For-All-Audiences
License:
add tag frequency approximations, display and thresholding
Browse files- e6db/utils/__init__.py +32 -7
- e6db/utils/numpy.py +17 -0
- query_tags.py +49 -26
e6db/utils/__init__.py
CHANGED
@@ -3,6 +3,7 @@ from pathlib import Path
|
|
3 |
import gzip
|
4 |
import json
|
5 |
import warnings
|
|
|
6 |
from typing import Callable, Iterable
|
7 |
|
8 |
tag_categories = [
|
@@ -69,7 +70,7 @@ def load_implications(data_dir):
|
|
69 |
Load implication mappings. Returns a tuple `(implications, implications_rej)`
|
70 |
|
71 |
* `implications`: dict mapping numerical ids to a list of implied numerical
|
72 |
-
ids
|
73 |
* `implications_rej`: dict mapping tag to a list of implied numerical ids
|
74 |
keys in implications_rej are tag that have a very little usage (less than 2
|
75 |
posts) and don't have numerical ids associated with them.
|
@@ -82,19 +83,35 @@ def load_implications(data_dir):
|
|
82 |
return implications, implications_rej
|
83 |
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
MapFun = Callable[[str, int | None], str | list[str]]
|
86 |
|
87 |
|
88 |
-
#
|
89 |
-
# WARNING: this API is goofy and will chang soon
|
90 |
-
#
|
91 |
class TagNormalizer:
|
92 |
"""
|
93 |
Map tag strings to numerical ids, and vice versa.
|
94 |
|
95 |
-
Multiple strings can be mapped to a single id, while each id
|
96 |
-
string. As a result, the encode/decode process can be used to
|
97 |
-
tags.
|
|
|
|
|
|
|
98 |
"""
|
99 |
|
100 |
def __init__(self, path_or_data: str | Path | tuple[dict, list, bytes]):
|
@@ -104,6 +121,14 @@ class TagNormalizer:
|
|
104 |
data = path_or_data
|
105 |
self.tag2idx, self.idx2tag, self.tag_categories = data
|
106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
def encode(self, tag: str, default=None):
|
108 |
"Convert tag string to numerical id"
|
109 |
return self.tag2idx.get(tag, default)
|
|
|
3 |
import gzip
|
4 |
import json
|
5 |
import warnings
|
6 |
+
import math
|
7 |
from typing import Callable, Iterable
|
8 |
|
9 |
tag_categories = [
|
|
|
70 |
Load implication mappings. Returns a tuple `(implications, implications_rej)`
|
71 |
|
72 |
* `implications`: dict mapping numerical ids to a list of implied numerical
|
73 |
+
ids. Contains transitive implications.
|
74 |
* `implications_rej`: dict mapping tag to a list of implied numerical ids
|
75 |
keys in implications_rej are tag that have a very little usage (less than 2
|
76 |
posts) and don't have numerical ids associated with them.
|
|
|
83 |
return implications, implications_rej
|
84 |
|
85 |
|
86 |
+
def tag_rank_to_freq(rank: int) -> float:
|
87 |
+
"""Approximate the frequency of a tag given its rank"""
|
88 |
+
return math.exp(26.4284 * math.tanh(2.93505 * rank ** (-0.136501)) - 11.492)
|
89 |
+
|
90 |
+
|
91 |
+
def tag_freq_to_rank(freq: int) -> float:
|
92 |
+
"""Approximate the rank of a tag given its frequency"""
|
93 |
+
log_freq = math.log(freq)
|
94 |
+
return math.exp(
|
95 |
+
-7.57186
|
96 |
+
* (0.0465456 * log_freq - 1.24326)
|
97 |
+
* math.log(1.13045 - 0.0720383 * log_freq)
|
98 |
+
+ 12.1903
|
99 |
+
)
|
100 |
+
|
101 |
+
|
102 |
MapFun = Callable[[str, int | None], str | list[str]]
|
103 |
|
104 |
|
|
|
|
|
|
|
105 |
class TagNormalizer:
|
106 |
"""
|
107 |
Map tag strings to numerical ids, and vice versa.
|
108 |
|
109 |
+
Multiple strings can be mapped to a single id, while each id maps to a
|
110 |
+
single string. As a result, the encode/decode process can be used to
|
111 |
+
normalize tags to canonical spelling.
|
112 |
+
|
113 |
+
See `add_input_mappings` for adding aliases, and `rename_output` for setting
|
114 |
+
the canonical spelling of a tag.
|
115 |
"""
|
116 |
|
117 |
def __init__(self, path_or_data: str | Path | tuple[dict, list, bytes]):
|
|
|
121 |
data = path_or_data
|
122 |
self.tag2idx, self.idx2tag, self.tag_categories = data
|
123 |
|
124 |
+
def get_category(self, tag: int | str, as_string=True) -> int:
|
125 |
+
if isinstance(tag, str):
|
126 |
+
tag = self.encode(tag)
|
127 |
+
cat = self.tag_categories[tag]
|
128 |
+
if as_string:
|
129 |
+
return tag_categories[cat]
|
130 |
+
return cat
|
131 |
+
|
132 |
def encode(self, tag: str, default=None):
|
133 |
"Convert tag string to numerical id"
|
134 |
return self.tag2idx.get(tag, default)
|
e6db/utils/numpy.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import numpy as np
|
2 |
from . import load_tags as py_load_tags
|
3 |
|
|
|
4 |
def tags_to_scipy_csr(df_posts, column="stripped_tags", vocab_size=None):
|
5 |
from .polars import tags_to_csr
|
6 |
import scipy.sparse
|
@@ -19,3 +20,19 @@ def load_tags(data_dir):
|
|
19 |
idx2tag = np.array(idx2tag)
|
20 |
tag_categories = np.frombuffer(tag_categories, dtype=np.uint8)
|
21 |
return tag2idx, idx2tag, tag_categories
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import numpy as np
|
2 |
from . import load_tags as py_load_tags
|
3 |
|
4 |
+
|
5 |
def tags_to_scipy_csr(df_posts, column="stripped_tags", vocab_size=None):
|
6 |
from .polars import tags_to_csr
|
7 |
import scipy.sparse
|
|
|
20 |
idx2tag = np.array(idx2tag)
|
21 |
tag_categories = np.frombuffer(tag_categories, dtype=np.uint8)
|
22 |
return tag2idx, idx2tag, tag_categories
|
23 |
+
|
24 |
+
|
25 |
+
def tag_rank_to_freq(rank: np.ndarray) -> np.ndarray:
|
26 |
+
"""Approximate the frequency of a tag given its rank"""
|
27 |
+
return np.exp(26.4284 * np.tanh(2.93505 * rank ** (-0.136501)) - 11.492)
|
28 |
+
|
29 |
+
|
30 |
+
def tag_freq_to_rank(freq: np.ndarray) -> np.ndarray:
|
31 |
+
"""Approximate the rank of a tag given its frequency"""
|
32 |
+
log_freq = np.log(freq)
|
33 |
+
return np.exp(
|
34 |
+
-7.57186
|
35 |
+
* (0.0465456 * log_freq - 1.24326)
|
36 |
+
* np.log(1.13045 - 0.0720383 * log_freq)
|
37 |
+
+ 12.1903
|
38 |
+
)
|
query_tags.py
CHANGED
@@ -4,11 +4,15 @@ import argparse
|
|
4 |
|
5 |
import numpy as np
|
6 |
import safetensors
|
7 |
-
|
8 |
-
|
9 |
from sklearn.decomposition import PCA
|
|
|
10 |
from e6db.utils.numpy import load_tags
|
11 |
-
from e6db.utils import
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
|
14 |
def dothething(args):
|
@@ -37,14 +41,16 @@ def dothething(args):
|
|
37 |
print("Query tags:", " ".join(sel_tags))
|
38 |
|
39 |
# Select neighboring tags similar to the input tags
|
40 |
-
|
41 |
top_k = args.topk
|
42 |
if top_k is None:
|
43 |
-
top_k = int(1.5 *
|
|
|
|
|
44 |
|
45 |
# Score and filter
|
46 |
-
scores = Xt @ Xt[sel_idxs].T
|
47 |
-
scores[sel_idxs, :] = float("-inf")
|
48 |
if args.category:
|
49 |
categories = [tag_category2id[cat] for cat in args.category]
|
50 |
scores[~np.isin(tag_categories, categories), :] = float("-inf")
|
@@ -53,22 +59,23 @@ def dothething(args):
|
|
53 |
neigh_idxs = np.argpartition(-scores, top_k, axis=0)[:top_k]
|
54 |
|
55 |
for i, t in enumerate(sel_tags):
|
56 |
-
|
57 |
-
|
58 |
-
tag_list = " ".join(
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
neigh_idxs = np.unique(neigh_idxs)
|
63 |
-
scores = scores[neigh_idxs, :].sum(axis=1)
|
64 |
-
if len(neigh_idxs) > n_neighbors:
|
65 |
-
neigh_idxs = neigh_idxs[np.argpartition(-scores, n_neighbors)[:n_neighbors]]
|
66 |
|
67 |
if not args.plot_out:
|
68 |
return
|
69 |
-
|
70 |
from matplotlib import pyplot as plt
|
71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
idxs = np.concatenate([sel_idxs, neigh_idxs])
|
73 |
query_slice = slice(None, len(sel_idxs))
|
74 |
target_slice = slice(len(sel_idxs), len(sel_idxs) + args.display_topk)
|
@@ -81,7 +88,7 @@ def dothething(args):
|
|
81 |
X2 /= np.linalg.norm(X2, axis=1)[:, None]
|
82 |
X2t = PCA(2).fit_transform(X2)[:, ::-1]
|
83 |
|
84 |
-
f, ax = plt.subplots(figsize=(
|
85 |
ax.axis("off")
|
86 |
|
87 |
dx = 0.01
|
@@ -118,7 +125,7 @@ def dothething(args):
|
|
118 |
|
119 |
def parse_args():
|
120 |
parser = argparse.ArgumentParser(
|
121 |
-
description="Query similar tags and plots a local PCA",
|
122 |
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
123 |
)
|
124 |
parser.add_argument(
|
@@ -135,6 +142,13 @@ def parse_args():
|
|
135 |
action="append",
|
136 |
help="restrict the output to the specified tag category",
|
137 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
parser.add_argument(
|
139 |
"-N",
|
140 |
"--display-topk",
|
@@ -150,18 +164,18 @@ def parse_args():
|
|
150 |
help="selects the global top-k neighbors for the local PCA",
|
151 |
)
|
152 |
parser.add_argument(
|
153 |
-
"-
|
154 |
-
"--
|
155 |
type=int,
|
156 |
default=None,
|
157 |
-
help="
|
158 |
)
|
159 |
parser.add_argument(
|
160 |
-
"-
|
161 |
-
"--
|
162 |
type=int,
|
163 |
default=None,
|
164 |
-
help="
|
165 |
)
|
166 |
parser.add_argument(
|
167 |
"-o",
|
@@ -180,6 +194,15 @@ def parse_args():
|
|
180 |
return parser.parse_args()
|
181 |
|
182 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
if __name__ == "__main__":
|
184 |
args = parse_args()
|
185 |
dothething(args)
|
|
|
4 |
|
5 |
import numpy as np
|
6 |
import safetensors
|
|
|
|
|
7 |
from sklearn.decomposition import PCA
|
8 |
+
|
9 |
from e6db.utils.numpy import load_tags
|
10 |
+
from e6db.utils import (
|
11 |
+
tag_category2id,
|
12 |
+
tag_categories_colors,
|
13 |
+
tag_freq_to_rank,
|
14 |
+
tag_rank_to_freq,
|
15 |
+
)
|
16 |
|
17 |
|
18 |
def dothething(args):
|
|
|
41 |
print("Query tags:", " ".join(sel_tags))
|
42 |
|
43 |
# Select neighboring tags similar to the input tags
|
44 |
+
global_topk = args.global_topk
|
45 |
top_k = args.topk
|
46 |
if top_k is None:
|
47 |
+
top_k = int(1.5 * global_topk / len(sel_idxs))
|
48 |
+
rank_tresh = int(tag_freq_to_rank(args.min_frequency))
|
49 |
+
print(f"{rank_tresh=}")
|
50 |
|
51 |
# Score and filter
|
52 |
+
scores = Xt[:rank_tresh] @ Xt[sel_idxs].T
|
53 |
+
scores[sel_idxs, :] = float("-inf") # Mask self-matches
|
54 |
if args.category:
|
55 |
categories = [tag_category2id[cat] for cat in args.category]
|
56 |
scores[~np.isin(tag_categories, categories), :] = float("-inf")
|
|
|
59 |
neigh_idxs = np.argpartition(-scores, top_k, axis=0)[:top_k]
|
60 |
|
61 |
for i, t in enumerate(sel_tags):
|
62 |
+
order = np.argsort(scores[neigh_idxs[:, i], i])[::-1]
|
63 |
+
idxs = neigh_idxs[order[: args.display_topk], i]
|
64 |
+
tag_list = " ".join(
|
65 |
+
f"{idx2tag[i]} ({format_tagfreq(tag_rank_to_freq(i))})" for i in idxs
|
66 |
+
)
|
67 |
+
print(f"* {t} ({format_tagfreq(tag_rank_to_freq(sel_idxs[i]))}): {tag_list}")
|
|
|
|
|
|
|
|
|
68 |
|
69 |
if not args.plot_out:
|
70 |
return
|
|
|
71 |
from matplotlib import pyplot as plt
|
72 |
|
73 |
+
# Deduplicate, global top-k
|
74 |
+
neigh_idxs = np.unique(neigh_idxs)
|
75 |
+
scores = scores[neigh_idxs, :].sum(axis=1)
|
76 |
+
if len(neigh_idxs) > global_topk:
|
77 |
+
neigh_idxs = neigh_idxs[np.argpartition(-scores, global_topk)[:global_topk]]
|
78 |
+
|
79 |
idxs = np.concatenate([sel_idxs, neigh_idxs])
|
80 |
query_slice = slice(None, len(sel_idxs))
|
81 |
target_slice = slice(len(sel_idxs), len(sel_idxs) + args.display_topk)
|
|
|
88 |
X2 /= np.linalg.norm(X2, axis=1)[:, None]
|
89 |
X2t = PCA(2).fit_transform(X2)[:, ::-1]
|
90 |
|
91 |
+
f, ax = plt.subplots(figsize=(12, 12), facecolor="#152f56")
|
92 |
ax.axis("off")
|
93 |
|
94 |
dx = 0.01
|
|
|
125 |
|
126 |
def parse_args():
|
127 |
parser = argparse.ArgumentParser(
|
128 |
+
description="Query similar tags and plots a local PCA.\nUse `-o -` to get an interactive plot",
|
129 |
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
130 |
)
|
131 |
parser.add_argument(
|
|
|
142 |
action="append",
|
143 |
help="restrict the output to the specified tag category",
|
144 |
)
|
145 |
+
parser.add_argument(
|
146 |
+
"-f",
|
147 |
+
"--min_frequency",
|
148 |
+
type=int,
|
149 |
+
default=100,
|
150 |
+
help="minimal number of posts tagged for a tag to be considered",
|
151 |
+
)
|
152 |
parser.add_argument(
|
153 |
"-N",
|
154 |
"--display-topk",
|
|
|
164 |
help="selects the global top-k neighbors for the local PCA",
|
165 |
)
|
166 |
parser.add_argument(
|
167 |
+
"-k",
|
168 |
+
"--topk",
|
169 |
type=int,
|
170 |
default=None,
|
171 |
+
help="Number of neighbors to consider for each query tag. When not specified, is set to 1.5 * GLOBAL_TOPK / <number of query tags>",
|
172 |
)
|
173 |
parser.add_argument(
|
174 |
+
"-d",
|
175 |
+
"--first-pca",
|
176 |
type=int,
|
177 |
default=None,
|
178 |
+
help="truncation rank for the global PCA meant to smooth all embeddings",
|
179 |
)
|
180 |
parser.add_argument(
|
181 |
"-o",
|
|
|
194 |
return parser.parse_args()
|
195 |
|
196 |
|
197 |
+
def format_tagfreq(count):
|
198 |
+
count = int(count)
|
199 |
+
if count < 1000:
|
200 |
+
return str(count)
|
201 |
+
elif count < 1000_000:
|
202 |
+
return f"{count*1e-3:.1f}k"
|
203 |
+
return f"{count*1e-6:.1f}m"
|
204 |
+
|
205 |
+
|
206 |
if __name__ == "__main__":
|
207 |
args = parse_args()
|
208 |
dothething(args)
|