ehristoforu commited on
Commit
37b856c
·
verified ·
1 Parent(s): 9aa0a43

Update requirements.txt

Browse files
Files changed (1) hide show
  1. requirements.txt +5 -305
requirements.txt CHANGED
@@ -1,305 +1,5 @@
1
- # This file was autogenerated by uv via the following command:
2
- # uv pip compile pyproject.toml -o requirements.txt
3
- accelerate==1.3.0
4
- # via
5
- # qwen2-5-bakeneko-32b-instruct-awq (pyproject.toml)
6
- # autoawq
7
- aiofiles==23.2.1
8
- # via gradio
9
- aiohappyeyeballs==2.4.6
10
- # via aiohttp
11
- aiohttp==3.11.12
12
- # via
13
- # datasets
14
- # fsspec
15
- aiosignal==1.3.2
16
- # via aiohttp
17
- annotated-types==0.7.0
18
- # via pydantic
19
- anyio==4.8.0
20
- # via
21
- # gradio
22
- # httpx
23
- # starlette
24
- async-timeout==5.0.1
25
- # via aiohttp
26
- attrs==25.1.0
27
- # via aiohttp
28
- autoawq==0.2.7.post3
29
- # via qwen2-5-bakeneko-32b-instruct-awq (pyproject.toml)
30
- certifi==2025.1.31
31
- # via
32
- # httpcore
33
- # httpx
34
- # requests
35
- charset-normalizer==3.4.1
36
- # via requests
37
- click==8.1.8
38
- # via
39
- # typer
40
- # uvicorn
41
- datasets==3.3.0
42
- # via autoawq
43
- dill==0.3.8
44
- # via
45
- # datasets
46
- # multiprocess
47
- exceptiongroup==1.2.2
48
- # via anyio
49
- fastapi==0.115.8
50
- # via gradio
51
- ffmpy==0.5.0
52
- # via gradio
53
- filelock==3.17.0
54
- # via
55
- # datasets
56
- # huggingface-hub
57
- # torch
58
- # transformers
59
- # triton
60
- frozenlist==1.5.0
61
- # via
62
- # aiohttp
63
- # aiosignal
64
- fsspec==2024.12.0
65
- # via
66
- # datasets
67
- # gradio-client
68
- # huggingface-hub
69
- # torch
70
- gradio==5.16.0
71
- # via
72
- # qwen2-5-bakeneko-32b-instruct-awq (pyproject.toml)
73
- # spaces
74
- gradio-client==1.7.0
75
- # via gradio
76
- h11==0.14.0
77
- # via
78
- # httpcore
79
- # uvicorn
80
- hf-transfer==0.1.9
81
- # via qwen2-5-bakeneko-32b-instruct-awq (pyproject.toml)
82
- httpcore==1.0.7
83
- # via httpx
84
- httpx==0.28.1
85
- # via
86
- # gradio
87
- # gradio-client
88
- # safehttpx
89
- # spaces
90
- huggingface-hub==0.28.1
91
- # via
92
- # accelerate
93
- # autoawq
94
- # datasets
95
- # gradio
96
- # gradio-client
97
- # tokenizers
98
- # transformers
99
- idna==3.10
100
- # via
101
- # anyio
102
- # httpx
103
- # requests
104
- # yarl
105
- jinja2==3.1.5
106
- # via
107
- # gradio
108
- # torch
109
- markdown-it-py==3.0.0
110
- # via rich
111
- markupsafe==2.1.5
112
- # via
113
- # gradio
114
- # jinja2
115
- mdurl==0.1.2
116
- # via markdown-it-py
117
- mpmath==1.3.0
118
- # via sympy
119
- multidict==6.1.0
120
- # via
121
- # aiohttp
122
- # yarl
123
- multiprocess==0.70.16
124
- # via datasets
125
- networkx==3.4.2
126
- # via torch
127
- numpy==2.2.3
128
- # via
129
- # accelerate
130
- # datasets
131
- # gradio
132
- # pandas
133
- # transformers
134
- nvidia-cublas-cu12==12.1.3.1
135
- # via
136
- # nvidia-cudnn-cu12
137
- # nvidia-cusolver-cu12
138
- # torch
139
- nvidia-cuda-cupti-cu12==12.1.105
140
- # via torch
141
- nvidia-cuda-nvrtc-cu12==12.1.105
142
- # via torch
143
- nvidia-cuda-runtime-cu12==12.1.105
144
- # via torch
145
- nvidia-cudnn-cu12==9.1.0.70
146
- # via torch
147
- nvidia-cufft-cu12==11.0.2.54
148
- # via torch
149
- nvidia-curand-cu12==10.3.2.106
150
- # via torch
151
- nvidia-cusolver-cu12==11.4.5.107
152
- # via torch
153
- nvidia-cusparse-cu12==12.1.0.106
154
- # via
155
- # nvidia-cusolver-cu12
156
- # torch
157
- nvidia-nccl-cu12==2.20.5
158
- # via torch
159
- nvidia-nvjitlink-cu12==12.8.61
160
- # via
161
- # nvidia-cusolver-cu12
162
- # nvidia-cusparse-cu12
163
- nvidia-nvtx-cu12==12.1.105
164
- # via torch
165
- orjson==3.10.15
166
- # via gradio
167
- packaging==24.2
168
- # via
169
- # accelerate
170
- # datasets
171
- # gradio
172
- # gradio-client
173
- # huggingface-hub
174
- # spaces
175
- # transformers
176
- pandas==2.2.3
177
- # via
178
- # datasets
179
- # gradio
180
- pillow==11.1.0
181
- # via gradio
182
- propcache==0.2.1
183
- # via
184
- # aiohttp
185
- # yarl
186
- psutil==5.9.8
187
- # via
188
- # accelerate
189
- # spaces
190
- pyarrow==19.0.0
191
- # via datasets
192
- pydantic==2.10.6
193
- # via
194
- # fastapi
195
- # gradio
196
- # spaces
197
- pydantic-core==2.27.2
198
- # via pydantic
199
- pydub==0.25.1
200
- # via gradio
201
- pygments==2.19.1
202
- # via rich
203
- python-dateutil==2.9.0.post0
204
- # via pandas
205
- python-multipart==0.0.20
206
- # via gradio
207
- pytz==2025.1
208
- # via pandas
209
- pyyaml==6.0.2
210
- # via
211
- # accelerate
212
- # datasets
213
- # gradio
214
- # huggingface-hub
215
- # transformers
216
- regex==2024.11.6
217
- # via transformers
218
- requests==2.32.3
219
- # via
220
- # datasets
221
- # huggingface-hub
222
- # spaces
223
- # transformers
224
- rich==13.9.4
225
- # via typer
226
- ruff==0.9.6
227
- # via gradio
228
- safehttpx==0.1.6
229
- # via gradio
230
- safetensors==0.5.2
231
- # via
232
- # accelerate
233
- # transformers
234
- semantic-version==2.10.0
235
- # via gradio
236
- shellingham==1.5.4
237
- # via typer
238
- six==1.17.0
239
- # via python-dateutil
240
- sniffio==1.3.1
241
- # via anyio
242
- spaces==0.32.0
243
- # via qwen2-5-bakeneko-32b-instruct-awq (pyproject.toml)
244
- starlette==0.45.3
245
- # via
246
- # fastapi
247
- # gradio
248
- sympy==1.13.3
249
- # via torch
250
- tokenizers==0.21.0
251
- # via
252
- # autoawq
253
- # transformers
254
- tomlkit==0.13.2
255
- # via gradio
256
- torch==2.4.0
257
- # via
258
- # qwen2-5-bakeneko-32b-instruct-awq (pyproject.toml)
259
- # accelerate
260
- # autoawq
261
- tqdm==4.67.1
262
- # via
263
- # datasets
264
- # huggingface-hub
265
- # transformers
266
- transformers==4.48.3
267
- # via
268
- # qwen2-5-bakeneko-32b-instruct-awq (pyproject.toml)
269
- # autoawq
270
- triton==3.0.0
271
- # via
272
- # autoawq
273
- # torch
274
- typer==0.15.1
275
- # via gradio
276
- typing-extensions==4.12.2
277
- # via
278
- # anyio
279
- # autoawq
280
- # fastapi
281
- # gradio
282
- # gradio-client
283
- # huggingface-hub
284
- # multidict
285
- # pydantic
286
- # pydantic-core
287
- # rich
288
- # spaces
289
- # torch
290
- # typer
291
- # uvicorn
292
- tzdata==2025.1
293
- # via pandas
294
- urllib3==2.3.0
295
- # via requests
296
- uvicorn==0.34.0
297
- # via gradio
298
- websockets==14.2
299
- # via gradio-client
300
- xxhash==3.5.0
301
- # via datasets
302
- yarl==1.18.3
303
- # via aiohttp
304
- zstandard==0.23.0
305
- # via autoawq
 
1
+ https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.82-cu122/llama_cpp_python-0.2.82-cp310-cp310-linux_x86_64.whl
2
+ torch
3
+ huggingface_hub>=0.22.2
4
+ scikit-build-core
5
+ llama-cpp-agent>=0.2.18