Update README.md
Browse files
README.md
CHANGED
@@ -39,12 +39,14 @@ import dask_ml.feature_extraction.text
|
|
39 |
import dask.bag as db
|
40 |
|
41 |
from transformers import AutoTokenizer, AutoModel
|
42 |
-
from huggingface_hub import notebook_login, HfApi, hf_hub_download, Repository
|
43 |
from datasets import load_dataset
|
44 |
from datasets.utils.py_utils import convert_file_size_to_int
|
45 |
|
|
|
46 |
def batch_tokenize(batch):
|
47 |
-
return {'tokenized': [' '.join(e.tokens) for e in tokenizer(batch['text']).encodings]}
|
|
|
|
|
48 |
|
49 |
dset = dset.map(batch_tokenize, batched=True, batch_size=64, num_proc=28)
|
50 |
|
@@ -78,7 +80,7 @@ svd = TruncatedSVD(n_components=160)
|
|
78 |
X_svd = svd.fit_transform(tfidf)
|
79 |
|
80 |
tsne = TSNE(
|
81 |
-
perplexity=30,
|
82 |
n_jobs=28,
|
83 |
random_state=42,
|
84 |
verbose=True,
|
|
|
39 |
import dask.bag as db
|
40 |
|
41 |
from transformers import AutoTokenizer, AutoModel
|
|
|
42 |
from datasets import load_dataset
|
43 |
from datasets.utils.py_utils import convert_file_size_to_int
|
44 |
|
45 |
+
|
46 |
def batch_tokenize(batch):
|
47 |
+
return {'tokenized': [' '.join(e.tokens) for e in tokenizer(batch['text']).encodings]} # "text" column hard encoded
|
48 |
+
|
49 |
+
dset = load_dataset(..., split="train")
|
50 |
|
51 |
dset = dset.map(batch_tokenize, batched=True, batch_size=64, num_proc=28)
|
52 |
|
|
|
80 |
X_svd = svd.fit_transform(tfidf)
|
81 |
|
82 |
tsne = TSNE(
|
83 |
+
perplexity=30, # not sure what param setting resulted in the plot
|
84 |
n_jobs=28,
|
85 |
random_state=42,
|
86 |
verbose=True,
|