orionweller commited on
Commit
1e35b2a
·
verified ·
1 Parent(s): 04dc961

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +31 -0
  2. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20105-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  3. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20105-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  4. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_250-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  5. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25419-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  6. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25419-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  7. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27603-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  8. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27603-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  9. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_52206-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  10. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72478-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  11. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72478-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  12. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_75736-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  13. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86897-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  14. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86897-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  15. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9339-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  16. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15753-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  17. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15753-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  18. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25297-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  19. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25297-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  20. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25539-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  21. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25539-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  22. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25912-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  23. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28098-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  24. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28098-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28645-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28645-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30185-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30185-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31903-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31903-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33450-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33450-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35306-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35306-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35994-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35994-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37242-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37242-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37444-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37444-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39918-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39918-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44290-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44290-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_45958-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_45958-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49086-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49086-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49842-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49842-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
.gitattributes CHANGED
@@ -27042,3 +27042,34 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68959-tokenized-c
27042
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_66596-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27043
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_57239-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27044
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_16299-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27042
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_66596-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27043
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_57239-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27044
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_16299-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27045
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27603-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27046
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9339-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27047
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72478-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27048
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72478-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27049
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25419-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27050
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86897-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27051
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25419-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27052
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_250-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27053
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27603-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27054
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20105-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27055
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86897-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27056
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20105-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27057
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_52206-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27058
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_75736-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27059
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_37217-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27060
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3382-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27061
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_9524-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27062
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_14282-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27063
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3382-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27064
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_33758-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27065
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_89895-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27066
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_14150-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27067
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_14282-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27068
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_42298-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27069
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_58514-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27070
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_89895-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27071
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_30060-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27072
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_32273-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27073
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_58514-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27074
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_14150-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27075
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_32273-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20105-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33b91cb6340ae9dcf92bfc0c10846aa7fa2b2c969303eac571e217760442c7b7
3
+ size 67108148
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20105-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f3911c57f28cea374309dd6f9d53a7e9b85dd8753273c9278a5836281b2172f
3
+ size 16120871
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_250-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:febff6e15e0352973043bde6c5325e722e25952f51b7523d17b69fdc2cce69a2
3
+ size 67108605
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25419-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:140eb1ce73a7967e19f32eb617f8e72ac5b15d981c4db198b63fe0251fa3e81b
3
+ size 67107355
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25419-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af4a2a7b98ac97ac8930b62c033399b4ff76f70f04aa3307d90eb9c8ea0c52ae
3
+ size 10753279
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27603-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e8e72bd41a3a136b19740c9ee15ca94db65827a0ac0c6331ebda90d7ad58ad0
3
+ size 67107700
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27603-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4e6b47a467be3b90af6c12a49014eaafa49ebb1701153ac5eadf24406ad819e
3
+ size 13666922
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_52206-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d00442ad790e480ec8ca90bbe394c2932f98779ab204fe83592d6fafa1c180d4
3
+ size 11523695
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72478-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a24d42a67c7d3f5b1750f24baae5416c3c490a9d2157dea652108bedeac7a4e5
3
+ size 67107728
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72478-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5c2fb7e3bd429b7dc31c181138f466c17e209e698e6a2af984b5ced29ec23ce
3
+ size 10431471
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_75736-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a728dd28463d63faa8f727768a5333ce2b506f5a09569b05b9bd8c46d2800259
3
+ size 12021794
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86897-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0714641d378638f7934ecfa89d625ffd773c56f8f62d6613add08442715cde8
3
+ size 67106964
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86897-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d05bef0b38bbde1db3d975d2090dd8d8d26e92088acb432b9842a7091e6103a7
3
+ size 16113019
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9339-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1c7f3f0db871c46f144a654f9d3d83784a89cef034b64600508e8009e6a4f80
3
+ size 67107177
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15753-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108489, "hashes": {}}, "samples": 44032, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47776641, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12198672, "hashes": {}}, "samples": 8005, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8709497, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15753-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38043114,
3
+ "num_truncated_tokens": 38013728
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25297-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108306, "hashes": {}}, "samples": 44590, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47755482, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10050037, "hashes": {}}, "samples": 6839, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7194058, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25297-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36987751,
3
+ "num_truncated_tokens": 36958385
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25539-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108406, "hashes": {}}, "samples": 44315, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47567182, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10536923, "hashes": {}}, "samples": 6709, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7470569, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25539-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37243482,
3
+ "num_truncated_tokens": 37215233
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25912-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107972, "hashes": {}}, "samples": 44959, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47951936, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7242576, "hashes": {}}, "samples": 4956, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5194170, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28098-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108293, "hashes": {}}, "samples": 43282, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47713963, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17338327, "hashes": {}}, "samples": 11108, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12250566, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28098-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40539784,
3
+ "num_truncated_tokens": 40506265
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28645-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107587, "hashes": {}}, "samples": 44476, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47866575, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9711598, "hashes": {}}, "samples": 6437, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6940113, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28645-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36834044,
3
+ "num_truncated_tokens": 36805501
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30185-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108204, "hashes": {}}, "samples": 44131, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47824576, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11363697, "hashes": {}}, "samples": 7542, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8105342, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30185-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37636800,
3
+ "num_truncated_tokens": 37607501
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31903-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107988, "hashes": {}}, "samples": 43293, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47926921, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15988467, "hashes": {}}, "samples": 10367, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11307714, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31903-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39887259,
3
+ "num_truncated_tokens": 39854114
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33450-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108644, "hashes": {}}, "samples": 44430, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47851720, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9595844, "hashes": {}}, "samples": 6398, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6840269, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33450-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36779319,
3
+ "num_truncated_tokens": 36750431
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35306-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107876, "hashes": {}}, "samples": 43061, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47535629, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18481804, "hashes": {}}, "samples": 11838, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13193346, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35306-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41095511,
3
+ "num_truncated_tokens": 41060766
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35994-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108696, "hashes": {}}, "samples": 43015, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48007345, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17506947, "hashes": {}}, "samples": 11326, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12465893, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35994-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40625736,
3
+ "num_truncated_tokens": 40591062
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37242-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108417, "hashes": {}}, "samples": 43756, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47669608, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16162299, "hashes": {}}, "samples": 10306, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11362820, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37242-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39962175,
3
+ "num_truncated_tokens": 39928828
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37444-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107297, "hashes": {}}, "samples": 44283, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48106079, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9767549, "hashes": {}}, "samples": 6492, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7001511, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37444-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36865905,
3
+ "num_truncated_tokens": 36837621
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39918-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107490, "hashes": {}}, "samples": 43990, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47523262, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10995079, "hashes": {}}, "samples": 7267, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7818364, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39918-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37464812,
3
+ "num_truncated_tokens": 37436319
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44290-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108123, "hashes": {}}, "samples": 42737, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47675944, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21534629, "hashes": {}}, "samples": 13548, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15197896, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44290-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42579121,
3
+ "num_truncated_tokens": 42541999
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_45958-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108174, "hashes": {}}, "samples": 42625, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47481498, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20452951, "hashes": {}}, "samples": 13112, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14461894, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_45958-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42055231,
3
+ "num_truncated_tokens": 42019296
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49086-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107523, "hashes": {}}, "samples": 43048, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47637060, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19036247, "hashes": {}}, "samples": 12180, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13507287, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49086-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41362484,
3
+ "num_truncated_tokens": 41327391
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49842-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108593, "hashes": {}}, "samples": 43839, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47719227, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12451992, "hashes": {}}, "samples": 8200, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8851682, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49842-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38169741,
3
+ "num_truncated_tokens": 38139712
4
+ }