|
--- |
|
dataset_info: |
|
features: |
|
- name: x |
|
dtype: float64 |
|
- name: y |
|
dtype: float64 |
|
- name: language |
|
dtype: string |
|
- name: corpus |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 247037602 |
|
num_examples: 5785741 |
|
download_size: 112131877 |
|
dataset_size: 247037602 |
|
--- |
|
|
|
# Dataset Card for "roots-viz-data" |
|
|
|
```python |
|
import os |
|
import numpy as np |
|
import pandas as pd |
|
from sklearn.feature_extraction.text import TfidfTransformer |
|
from sklearn.decomposition import TruncatedSVD |
|
from tqdm.notebook import tqdm |
|
from openTSNE import TSNE |
|
import datashader as ds |
|
import colorcet as cc |
|
|
|
import vectorizers |
|
from vectorizers.transformers import CountFeatureCompressionTransformer, InformationWeightTransformer |
|
|
|
from dask.distributed import Client, LocalCluster |
|
import dask.dataframe as dd |
|
import dask_ml.feature_extraction.text |
|
import dask.bag as db |
|
|
|
from transformers import AutoTokenizer, AutoModel |
|
from huggingface_hub import notebook_login, HfApi, hf_hub_download, Repository |
|
from datasets import load_dataset |
|
from datasets.utils.py_utils import convert_file_size_to_int |
|
|
|
def batch_tokenize(batch): |
|
return {'tokenized': [' '.join(e.tokens) for e in tokenizer(batch['text']).encodings]} |
|
|
|
dset = dset.map(batch_tokenize, batched=True, batch_size=64, num_proc=28) |
|
|
|
max_shard_size = convert_file_size_to_int('300MB') |
|
dataset_nbytes = dset.data.nbytes |
|
num_shards = int(dataset_nbytes / max_shard_size) + 1 |
|
num_shards = max(num_shards, 1) |
|
print(f"Sharding into {num_shards} files.") |
|
os.makedirs(f"{dset_name}/tokenized", exist_ok=True) |
|
for shard_index in tqdm(range(num_shards)): |
|
shard = dset.shard(num_shards=num_shards, index=shard_index, contiguous=True) |
|
shard.to_parquet(f"{dset_name}/tokenized/tokenized-{shard_index:03d}.parquet") |
|
|
|
client = Client() |
|
client |
|
|
|
df = dd.read_parquet(f'{dset_name}/tokenized/') |
|
vect = dask_ml.feature_extraction.text.CountVectorizer(tokenizer=str.split, |
|
token_pattern=None, |
|
vocabulary=vocab) |
|
tokenized_bag = df['tokenized'].to_bag() |
|
X = vect.transform(tokenized_bag) |
|
|
|
counts = X.compute() |
|
client.shutdown() |
|
|
|
tfidf_transformer = TfidfTransformer(sublinear_tf=True, norm="l2") |
|
tfidf = tfidf_transformer.fit_transform(counts) |
|
|
|
svd = TruncatedSVD(n_components=160) |
|
X_svd = svd.fit_transform(tfidf) |
|
|
|
tsne = TSNE( |
|
perplexity=30, |
|
n_jobs=28, |
|
random_state=42, |
|
verbose=True, |
|
) |
|
|
|
tsne_embedding = tsne.fit(X) |
|
|
|
df = pd.DataFrame(data=tsne_embedding, columns=['x','y']) |
|
agg = ds.Canvas(plot_height=600, plot_width=600).points(df, 'x', 'y') |
|
img = ds.tf.shade(agg, cmap=cc.fire, how='eq_hist') |
|
ds.tf.set_background(img, "black") |
|
``` |