File size: 7,013 Bytes
39e0190
 
 
 
99a750a
39e0190
 
99a750a
39e0190
 
 
99a750a
39e0190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99a750a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import gc
from datasets import load_dataset, Dataset


def batch_iterator():
    ## code
    # dataset = load_dataset('bigcode/programming-languages-keywords', split='train')
    #
    # for row in dataset:
    #     for n in row['keywords']:
    #         yield n
    #
    # del dataset
    # gc.collect()

    # code
    dataset = (
        load_dataset('bigcode/the-stack-smol-xs', lang, split='train', trust_remote_code=True)
        for lang in [
            'ada', 'agda', 'alloy', 'antlr', 'applescript', 'assembly', 'augeas', 'awk', 'batchfile', 'bison', 'bluespec', 'c',
            'c++', 'c-sharp', 'clojure', 'cmake', 'coffeescript', 'common-lisp', 'css', 'cuda', 'dart', 'dockerfile', 'elixir',
            'elm', 'emacs-lisp','erlang', 'f-sharp', 'fortran', 'glsl', 'go', 'groovy', 'haskell','html', 'idris', 'isabelle', 'java', 
            'java-server-pages', 'javascript', 'julia', 'kotlin', 'lean', 'literate-agda', 'literate-coffeescript', 'literate-haskell',
            'lua', 'makefile', 'maple', 'markdown', 'mathematica', 'matlab', 'ocaml', 'pascal', 'perl', 'php', 'powershell', 'prolog',
            'protocol-buffer', 'python', 'r', 'racket', 'restructuredtext', 'rmarkdown', 'ruby', 'rust', 'sas', 'scala', 'scheme', 
            'shell', 'smalltalk', 'solidity', 'sparql', 'sql', 'stan', 'standard-ml', 'stata', 'systemverilog', 'tcl', 'tcsh', 'tex', 
            'thrift', 'typescript', 'verilog', 'vhdl', 'visual-basic', 'xslt', 'yacc', 'zig'
        ]
    )

    for d in dataset:
        for row in d:
            yield row['content']

    del dataset
    gc.collect()

    # text
    dataset = load_dataset('nampdn-ai/tiny-textbooks', split='train')

    for row in dataset:
        yield row['text']

    del dataset
    gc.collect()

    ## text
    # dataset = (
    #     load_dataset('wikimedia/wikisource', lang, split='train')
    #     for lang in ['20231201.ar', '20231201.as', '20231201.az', '20231201.ban', '20231201.be', '20231201.bg', '20231201.bn', '20231201.br', '20231201.bs', '20231201.ca', '20231201.cs', '20231201.cy', '20231201.da', '20231201.de', '20231201.el', '20231201.en', '20231201.eo', '20231201.es', '20231201.et', '20231201.eu', '20231201.fa', '20231201.fi', '20231201.fo', '20231201.fr', '20231201.gl', '20231201.gu', '20231201.he', '20231201.hi', '20231201.hr', '20231201.hu', '20231201.hy', '20231201.id', '20231201.is', '20231201.it', '20231201.ja', '20231201.jv', '20231201.kn', '20231201.ko', '20231201.la', '20231201.li', '20231201.lij', '20231201.lt', '20231201.mk', '20231201.ml', '20231201.mr', '20231201.nap', '20231201.nl', '20231201.no', '20231201.or', '20231201.pa', '20231201.pl', '20231201.pms', '20231201.pt', '20231201.ro', '20231201.ru', '20231201.sa', '20231201.sah', '20231201.sk', '20231201.sl', '20231201.sr', '20231201.su', '20231201.sv', '20231201.ta', '20231201.te', '20231201.th', '20231201.tr', '20231201.uk', '20231201.vec', '20231201.vi', '20231201.wa', '20231201.yi', '20231201.zh', '20231201.zh-min-nan']
    # )
    #
    # for d in dataset:
    #     for row in d['text']:
    #         yield row
    #
    # del dataset
    # gc.collect()

    # text
    dataset = (
        load_dataset('xu-song/cc100-samples', lang, split='train')
        for lang in ['am', 'ar', 'as', 'az', 'be', 'bg', 'bn', 'bn_rom', 'br', 'bs', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gn', 'gu', 'ha', 'he', 'hi', 'hi_rom', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'ku', 'ky', 'la', 'lg', 'li', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'my_zaw', 'ne', 'nl', 'no', 'ns', 'om', 'or', 'pa', 'pl', 'ps', 'pt', 'qu', 'rm', 'ro', 'ru', 'sa', 'si', 'sc', 'sd', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'ta_rom', 'te', 'te_rom', 'th', 'tl', 'tn', 'tr', 'ug', 'uk', 'ur', 'ur_rom', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh-Hans', 'zh-Hant', 'zu']
    )

    for d in dataset:
        for row in d['text']:
            yield row

    del dataset
    gc.collect()

    ## text
    # dataset = (
    #     load_dataset('csebuetnlp/xlsum', lang, split='train')
    #     for lang in ['amharic', 'arabic', 'azerbaijani', 'bengali', 'burmese', 'chinese_simplified', 'chinese_traditional', 'english', 'french', 'gujarati', 'hausa', 'hindi', 'igbo', 'indonesian', 'japanese', 'kirundi', 'korean', 'kyrgyz', 'marathi', 'nepali', 'oromo', 'pashto', 'persian', 'pidgin', 'portuguese', 'punjabi', 'russian', 'scottish_gaelic', 'serbian_cyrillic', 'serbian_latin', 'sinhala', 'somali', 'spanish', 'swahili', 'tamil', 'telugu', 'thai', 'tigrinya', 'turkish', 'ukrainian', 'urdu', 'uzbek', 'vietnamese', 'welsh', 'yoruba']
    # )
    #
    # for d in dataset:
    #     for row in d['text']:
    #         yield row
    #
    # del dataset
    # gc.collect()

    ## text
    # dataset = load_dataset('recursal/SuperWikiNEXT-32B', split='train')
    #
    # for row in dataset['text']:
    #     yield row
    #
    # del dataset
    # gc.collect()

    # code
    dataset = load_dataset('m-a-p/CodeFeedback-Filtered-Instruction', split='train')

    for row in dataset:
        yield row['query'] + '\n' + row['answer']

    del dataset
    gc.collect()

    # code
    dataset = load_dataset('nampdn-ai/tiny-codes', split='train')
    
    for row in dataset:
        yield row['prompt'] + '\n' + row['response']
    
    del dataset
    gc.collect()

    # math
    dataset = load_dataset('ajibawa-2023/Maths-College', split='train')
    
    for row in dataset:
        yield row['instruction'] + '\n' + row['output']
    
    del dataset
    gc.collect()

    # math
    dataset = load_dataset('microsoft/orca-math-word-problems-200k', split='train')

    for row in dataset:
        yield row['question'] + '\n' + row['answer']

    del dataset
    gc.collect()

    # text
    dataset = load_dataset('mlabonne/FineTome-100k', split='train')

    for row in dataset['conversations']:
        yield '\n'.join(n['value'] for n in row)

    del dataset
    gc.collect()

    # instruction
    dataset = load_dataset('arcee-ai/agent-data', split='train')
    
    for row in dataset['conversations']:
        yield '\n'.join(n['value'] for n in row)
    
    del dataset
    gc.collect()

    # instruction
    dataset = (
        load_dataset('cognitivecomputations/SystemChat-2.0', data_files='SystemChat_filtered.jsonl', split='train'),
        load_dataset('cognitivecomputations/SystemChat-2.0', data_files='SystemChat_multilingual.jsonl', split='train'),
    )
    
    for d in dataset:
        for row in d['messages']:
            yield '\n'.join(n['content'] for n in row)
    
    del dataset
    gc.collect()

    # emoji
    dataset = load_dataset('badrex/llm-emoji-dataset', split='train')
    
    for row in dataset:
        yield f'{row["character"]}\n{row["unicode"]}\n{row["short description"]}\n{row["tags"]}\n{row["LLM description"]}'
    
    del dataset
    gc.collect()