PetraAI commited on
Commit
2c2e361
1 Parent(s): 96f0fa9

Upload 21 files

Browse files
Files changed (21) hide show
  1. .gitattributes +12 -0
  2. .gitignore +160 -0
  3. BERTopic +3 -0
  4. Dockerfile +16 -0
  5. LICENSE +21 -0
  6. LICENSE.txt +126 -0
  7. MANIFEST.in +3 -0
  8. Notice +1 -0
  9. Pipfile +14 -0
  10. Pipfile.lock +864 -0
  11. README.md +323 -18
  12. README_zh.md +330 -0
  13. USE_POLICY.md +50 -0
  14. batch_throttle.py +23 -0
  15. config.json +3 -0
  16. convert.py +208 -0
  17. docker-compose.yml +9 -0
  18. example.py +61 -0
  19. requirements.txt +6 -0
  20. setup.py +139 -0
  21. test_inference.py +219 -0
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ codellama-7b-instruct.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ codellama-7b-instruct.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
38
+ codellama-7b-instruct.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ codellama-7b-instruct.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
40
+ codellama-7b-instruct.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
41
+ codellama-7b-instruct.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ codellama-7b-instruct.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ codellama-7b-instruct.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
44
+ codellama-7b-instruct.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
45
+ codellama-7b-instruct.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
46
+ codellama-7b-instruct.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
47
+ codellama-7b-instruct.Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
BERTopic ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from bertopic import BERTopic
2
+
3
+ model = BERTopic.load("PetraAI/Nashmi")
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3-alpine
2
+
3
+ WORKDIR /app
4
+ COPY ./requirements.txt /app
5
+ COPY ./src/* /app
6
+
7
+ RUN pip3 install -r requirements.txt
8
+
9
+ ENV PORT=5500
10
+ EXPOSE "$PORT/tcp"
11
+
12
+ #ENTRYPOINT nginx && uwsgi --ini /app/params.ini -w FreeGPT4_Server
13
+ #shell form necessary
14
+ SHELL ["python3","/app/FreeGPT4_Server.py"]
15
+ ENTRYPOINT ["python3","/app/FreeGPT4_Server.py"]
16
+ CMD ["--cookie-file","/cookies.json"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 潘其威(William)
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
LICENSE.txt ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LLAMA 2 COMMUNITY LICENSE AGREEMENT
2
+ Llama 2 Version Release Date: July 18, 2023
3
+
4
+ "Agreement" means the terms and conditions for use, reproduction, distribution and
5
+ modification of the Llama Materials set forth herein.
6
+
7
+ "Documentation" means the specifications, manuals and documentation
8
+ accompanying Llama 2 distributed by Meta at ai.meta.com/resources/models-and-
9
+ libraries/llama-downloads/.
10
+
11
+ "Licensee" or "you" means you, or your employer or any other person or entity (if
12
+ you are entering into this Agreement on such person or entity's behalf), of the age
13
+ required under applicable laws, rules or regulations to provide legal consent and that
14
+ has legal authority to bind your employer or such other person or entity if you are
15
+ entering in this Agreement on their behalf.
16
+
17
+ "Llama 2" means the foundational large language models and software and
18
+ algorithms, including machine-learning model code, trained model weights,
19
+ inference-enabling code, training-enabling code, fine-tuning enabling code and other
20
+ elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-
21
+ libraries/llama-downloads/.
22
+
23
+ "Llama Materials" means, collectively, Meta's proprietary Llama 2 and
24
+ Documentation (and any portion thereof) made available under this Agreement.
25
+
26
+ "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you
27
+ are an entity, your principal place of business is in the EEA or Switzerland) and Meta
28
+ Platforms, Inc. (if you are located outside of the EEA or Switzerland).
29
+
30
+ By clicking "I Accept" below or by using or distributing any portion or element of the
31
+ Llama Materials, you agree to be bound by this Agreement.
32
+
33
+ 1. License Rights and Redistribution.
34
+
35
+ a. Grant of Rights. You are granted a non-exclusive, worldwide, non-
36
+ transferable and royalty-free limited license under Meta's intellectual property or
37
+ other rights owned by Meta embodied in the Llama Materials to use, reproduce,
38
+ distribute, copy, create derivative works of, and make modifications to the Llama
39
+ Materials.
40
+
41
+ b. Redistribution and Use.
42
+
43
+ i. If you distribute or make the Llama Materials, or any derivative works
44
+ thereof, available to a third party, you shall provide a copy of this Agreement to such
45
+ third party.
46
+ ii. If you receive Llama Materials, or any derivative works thereof, from
47
+ a Licensee as part of an integrated end user product, then Section 2 of this
48
+ Agreement will not apply to you.
49
+
50
+ iii. You must retain in all copies of the Llama Materials that you
51
+ distribute the following attribution notice within a "Notice" text file distributed as a
52
+ part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License,
53
+ Copyright (c) Meta Platforms, Inc. All Rights Reserved."
54
+
55
+ iv. Your use of the Llama Materials must comply with applicable laws
56
+ and regulations (including trade compliance laws and regulations) and adhere to the
57
+ Acceptable Use Policy for the Llama Materials (available at
58
+ https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into
59
+ this Agreement.
60
+
61
+ v. You will not use the Llama Materials or any output or results of the
62
+ Llama Materials to improve any other large language model (excluding Llama 2 or
63
+ derivative works thereof).
64
+
65
+ 2. Additional Commercial Terms. If, on the Llama 2 version release date, the
66
+ monthly active users of the products or services made available by or for Licensee,
67
+ or Licensee's affiliates, is greater than 700 million monthly active users in the
68
+ preceding calendar month, you must request a license from Meta, which Meta may
69
+ grant to you in its sole discretion, and you are not authorized to exercise any of the
70
+ rights under this Agreement unless or until Meta otherwise expressly grants you
71
+ such rights.
72
+
73
+ 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE
74
+ LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE
75
+ PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
76
+ EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY
77
+ WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR
78
+ FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE
79
+ FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING
80
+ THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR
81
+ USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
82
+
83
+ 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE
84
+ LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT,
85
+ NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS
86
+ AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL,
87
+ CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN
88
+ IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF
89
+ ANY OF THE FOREGOING.
90
+
91
+ 5. Intellectual Property.
92
+
93
+ a. No trademark licenses are granted under this Agreement, and in
94
+ connection with the Llama Materials, neither Meta nor Licensee may use any name
95
+ or mark owned by or associated with the other or any of its affiliates, except as
96
+ required for reasonable and customary use in describing and redistributing the
97
+ Llama Materials.
98
+
99
+ b. Subject to Meta's ownership of Llama Materials and derivatives made by or
100
+ for Meta, with respect to any derivative works and modifications of the Llama
101
+ Materials that are made by you, as between you and Meta, you are and will be the
102
+ owner of such derivative works and modifications.
103
+
104
+ c. If you institute litigation or other proceedings against Meta or any entity
105
+ (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama
106
+ Materials or Llama 2 outputs or results, or any portion of any of the foregoing,
107
+ constitutes infringement of intellectual property or other rights owned or licensable
108
+ by you, then any licenses granted to you under this Agreement shall terminate as of
109
+ the date such litigation or claim is filed or instituted. You will indemnify and hold
110
+ harmless Meta from and against any claim by any third party arising out of or related
111
+ to your use or distribution of the Llama Materials.
112
+
113
+ 6. Term and Termination. The term of this Agreement will commence upon your
114
+ acceptance of this Agreement or access to the Llama Materials and will continue in
115
+ full force and effect until terminated in accordance with the terms and conditions
116
+ herein. Meta may terminate this Agreement if you are in breach of any term or
117
+ condition of this Agreement. Upon termination of this Agreement, you shall delete
118
+ and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the
119
+ termination of this Agreement.
120
+
121
+ 7. Governing Law and Jurisdiction. This Agreement will be governed and
122
+ construed under the laws of the State of California without regard to choice of law
123
+ principles, and the UN Convention on Contracts for the International Sale of Goods
124
+ does not apply to this Agreement. The courts of California shall have exclusive
125
+ jurisdiction of any dispute arising out of this Agreement.
126
+
MANIFEST.in ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ recursive-include exllamav2 *
2
+ global-exclude *.pyc
3
+ global-exclude dni_*
Notice ADDED
@@ -0,0 +1 @@
 
 
1
+ Llama 2 is licensed under the LLAMA 2 Community License, Copyright © Meta Platforms, Inc. All Rights Reserved.
Pipfile ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[source]]
2
+ url = "https://pypi.org/simple"
3
+ verify_ssl = true
4
+ name = "pypi"
5
+
6
+ [packages]
7
+ edgegpt = "*"
8
+ flask = {extras = ["async"], version = "*"}
9
+ aiohttp = "*"
10
+
11
+ [dev-packages]
12
+
13
+ [requires]
14
+ python_version = "3.11"
Pipfile.lock ADDED
@@ -0,0 +1,864 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_meta": {
3
+ "hash": {
4
+ "sha256": "df17fa7b0982d444252ef9397071979b8652d279db80bc14c6de62641ba71d8b"
5
+ },
6
+ "pipfile-spec": 6,
7
+ "requires": {
8
+ "python_version": "3.11"
9
+ },
10
+ "sources": [
11
+ {
12
+ "name": "pypi",
13
+ "url": "https://pypi.org/simple",
14
+ "verify_ssl": true
15
+ }
16
+ ]
17
+ },
18
+ "default": {
19
+ "aiohttp": {
20
+ "hashes": [
21
+ "sha256:00ad4b6f185ec67f3e6562e8a1d2b69660be43070bd0ef6fcec5211154c7df67",
22
+ "sha256:0175d745d9e85c40dcc51c8f88c74bfbaef9e7afeeeb9d03c37977270303064c",
23
+ "sha256:01d4c0c874aa4ddfb8098e85d10b5e875a70adc63db91f1ae65a4b04d3344cda",
24
+ "sha256:043d2299f6dfdc92f0ac5e995dfc56668e1587cea7f9aa9d8a78a1b6554e5755",
25
+ "sha256:0c413c633d0512df4dc7fd2373ec06cc6a815b7b6d6c2f208ada7e9e93a5061d",
26
+ "sha256:0d21c684808288a98914e5aaf2a7c6a3179d4df11d249799c32d1808e79503b5",
27
+ "sha256:0e584a10f204a617d71d359fe383406305a4b595b333721fa50b867b4a0a1548",
28
+ "sha256:1274477e4c71ce8cfe6c1ec2f806d57c015ebf84d83373676036e256bc55d690",
29
+ "sha256:13bf85afc99ce6f9ee3567b04501f18f9f8dbbb2ea11ed1a2e079670403a7c84",
30
+ "sha256:153c2549f6c004d2754cc60603d4668899c9895b8a89397444a9c4efa282aaf4",
31
+ "sha256:1f7372f7341fcc16f57b2caded43e81ddd18df53320b6f9f042acad41f8e049a",
32
+ "sha256:23fb25a9f0a1ca1f24c0a371523546366bb642397c94ab45ad3aedf2941cec6a",
33
+ "sha256:28c543e54710d6158fc6f439296c7865b29e0b616629767e685a7185fab4a6b9",
34
+ "sha256:2a482e6da906d5e6e653be079b29bc173a48e381600161c9932d89dfae5942ef",
35
+ "sha256:2ad5c3c4590bb3cc28b4382f031f3783f25ec223557124c68754a2231d989e2b",
36
+ "sha256:2ce2ac5708501afc4847221a521f7e4b245abf5178cf5ddae9d5b3856ddb2f3a",
37
+ "sha256:2cf57fb50be5f52bda004b8893e63b48530ed9f0d6c96c84620dc92fe3cd9b9d",
38
+ "sha256:2e1b1e51b0774408f091d268648e3d57f7260c1682e7d3a63cb00d22d71bb945",
39
+ "sha256:2e2e9839e14dd5308ee773c97115f1e0a1cb1d75cbeeee9f33824fa5144c7634",
40
+ "sha256:2e460be6978fc24e3df83193dc0cc4de46c9909ed92dd47d349a452ef49325b7",
41
+ "sha256:312fcfbacc7880a8da0ae8b6abc6cc7d752e9caa0051a53d217a650b25e9a691",
42
+ "sha256:33279701c04351a2914e1100b62b2a7fdb9a25995c4a104259f9a5ead7ed4802",
43
+ "sha256:33776e945d89b29251b33a7e7d006ce86447b2cfd66db5e5ded4e5cd0340585c",
44
+ "sha256:34dd0c107799dcbbf7d48b53be761a013c0adf5571bf50c4ecad5643fe9cfcd0",
45
+ "sha256:3562b06567c06439d8b447037bb655ef69786c590b1de86c7ab81efe1c9c15d8",
46
+ "sha256:368a42363c4d70ab52c2c6420a57f190ed3dfaca6a1b19afda8165ee16416a82",
47
+ "sha256:4149d34c32f9638f38f544b3977a4c24052042affa895352d3636fa8bffd030a",
48
+ "sha256:461908b2578955045efde733719d62f2b649c404189a09a632d245b445c9c975",
49
+ "sha256:4a01951fabc4ce26ab791da5f3f24dca6d9a6f24121746eb19756416ff2d881b",
50
+ "sha256:4e874cbf8caf8959d2adf572a78bba17cb0e9d7e51bb83d86a3697b686a0ab4d",
51
+ "sha256:4f21e83f355643c345177a5d1d8079f9f28b5133bcd154193b799d380331d5d3",
52
+ "sha256:5443910d662db951b2e58eb70b0fbe6b6e2ae613477129a5805d0b66c54b6cb7",
53
+ "sha256:5798a9aad1879f626589f3df0f8b79b3608a92e9beab10e5fda02c8a2c60db2e",
54
+ "sha256:5d20003b635fc6ae3f96d7260281dfaf1894fc3aa24d1888a9b2628e97c241e5",
55
+ "sha256:5db3a5b833764280ed7618393832e0853e40f3d3e9aa128ac0ba0f8278d08649",
56
+ "sha256:5ed1c46fb119f1b59304b5ec89f834f07124cd23ae5b74288e364477641060ff",
57
+ "sha256:62360cb771707cb70a6fd114b9871d20d7dd2163a0feafe43fd115cfe4fe845e",
58
+ "sha256:6809a00deaf3810e38c628e9a33271892f815b853605a936e2e9e5129762356c",
59
+ "sha256:68c5a82c8779bdfc6367c967a4a1b2aa52cd3595388bf5961a62158ee8a59e22",
60
+ "sha256:6e4a280e4b975a2e7745573e3fc9c9ba0d1194a3738ce1cbaa80626cc9b4f4df",
61
+ "sha256:6e6783bcc45f397fdebc118d772103d751b54cddf5b60fbcc958382d7dd64f3e",
62
+ "sha256:72a860c215e26192379f57cae5ab12b168b75db8271f111019509a1196dfc780",
63
+ "sha256:7607ec3ce4993464368505888af5beb446845a014bc676d349efec0e05085905",
64
+ "sha256:773dd01706d4db536335fcfae6ea2440a70ceb03dd3e7378f3e815b03c97ab51",
65
+ "sha256:78d847e4cde6ecc19125ccbc9bfac4a7ab37c234dd88fbb3c5c524e8e14da543",
66
+ "sha256:7dde0009408969a43b04c16cbbe252c4f5ef4574ac226bc8815cd7342d2028b6",
67
+ "sha256:80bd372b8d0715c66c974cf57fe363621a02f359f1ec81cba97366948c7fc873",
68
+ "sha256:841cd8233cbd2111a0ef0a522ce016357c5e3aff8a8ce92bcfa14cef890d698f",
69
+ "sha256:84de26ddf621d7ac4c975dbea4c945860e08cccde492269db4e1538a6a6f3c35",
70
+ "sha256:84f8ae3e09a34f35c18fa57f015cc394bd1389bce02503fb30c394d04ee6b938",
71
+ "sha256:8af740fc2711ad85f1a5c034a435782fbd5b5f8314c9a3ef071424a8158d7f6b",
72
+ "sha256:8b929b9bd7cd7c3939f8bcfffa92fae7480bd1aa425279d51a89327d600c704d",
73
+ "sha256:910bec0c49637d213f5d9877105d26e0c4a4de2f8b1b29405ff37e9fc0ad52b8",
74
+ "sha256:96943e5dcc37a6529d18766597c491798b7eb7a61d48878611298afc1fca946c",
75
+ "sha256:a0215ce6041d501f3155dc219712bc41252d0ab76474615b9700d63d4d9292af",
76
+ "sha256:a3cf433f127efa43fee6b90ea4c6edf6c4a17109d1d037d1a52abec84d8f2e42",
77
+ "sha256:a6ce61195c6a19c785df04e71a4537e29eaa2c50fe745b732aa937c0c77169f3",
78
+ "sha256:a7a75ef35f2df54ad55dbf4b73fe1da96f370e51b10c91f08b19603c64004acc",
79
+ "sha256:a94159871304770da4dd371f4291b20cac04e8c94f11bdea1c3478e557fbe0d8",
80
+ "sha256:aa1990247f02a54185dc0dff92a6904521172a22664c863a03ff64c42f9b5410",
81
+ "sha256:ab88bafedc57dd0aab55fa728ea10c1911f7e4d8b43e1d838a1739f33712921c",
82
+ "sha256:ad093e823df03bb3fd37e7dec9d4670c34f9e24aeace76808fc20a507cace825",
83
+ "sha256:ae871a964e1987a943d83d6709d20ec6103ca1eaf52f7e0d36ee1b5bebb8b9b9",
84
+ "sha256:b0ba0d15164eae3d878260d4c4df859bbdc6466e9e6689c344a13334f988bb53",
85
+ "sha256:b5411d82cddd212644cf9360879eb5080f0d5f7d809d03262c50dad02f01421a",
86
+ "sha256:b9552ec52cc147dbf1944ac7ac98af7602e51ea2dcd076ed194ca3c0d1c7d0bc",
87
+ "sha256:bfb9162dcf01f615462b995a516ba03e769de0789de1cadc0f916265c257e5d8",
88
+ "sha256:c0a9034379a37ae42dea7ac1e048352d96286626251862e448933c0f59cbd79c",
89
+ "sha256:c1161b345c0a444ebcf46bf0a740ba5dcf50612fd3d0528883fdc0eff578006a",
90
+ "sha256:c11f5b099adafb18e65c2c997d57108b5bbeaa9eeee64a84302c0978b1ec948b",
91
+ "sha256:c44e65da1de4403d0576473e2344828ef9c4c6244d65cf4b75549bb46d40b8dd",
92
+ "sha256:c48c5c0271149cfe467c0ff8eb941279fd6e3f65c9a388c984e0e6cf57538e14",
93
+ "sha256:c7a815258e5895d8900aec4454f38dca9aed71085f227537208057853f9d13f2",
94
+ "sha256:cae533195e8122584ec87531d6df000ad07737eaa3c81209e85c928854d2195c",
95
+ "sha256:cc14be025665dba6202b6a71cfcdb53210cc498e50068bc088076624471f8bb9",
96
+ "sha256:cd56db019015b6acfaaf92e1ac40eb8434847d9bf88b4be4efe5bfd260aee692",
97
+ "sha256:d827176898a2b0b09694fbd1088c7a31836d1a505c243811c87ae53a3f6273c1",
98
+ "sha256:df72ac063b97837a80d80dec8d54c241af059cc9bb42c4de68bd5b61ceb37caa",
99
+ "sha256:e5980a746d547a6ba173fd5ee85ce9077e72d118758db05d229044b469d9029a",
100
+ "sha256:e5d47ae48db0b2dcf70bc8a3bc72b3de86e2a590fc299fdbbb15af320d2659de",
101
+ "sha256:e91d635961bec2d8f19dfeb41a539eb94bd073f075ca6dae6c8dc0ee89ad6f91",
102
+ "sha256:ea353162f249c8097ea63c2169dd1aa55de1e8fecbe63412a9bc50816e87b761",
103
+ "sha256:eaeed7abfb5d64c539e2db173f63631455f1196c37d9d8d873fc316470dfbacd",
104
+ "sha256:eca4bf3734c541dc4f374ad6010a68ff6c6748f00451707f39857f429ca36ced",
105
+ "sha256:f83a552443a526ea38d064588613aca983d0ee0038801bc93c0c916428310c28",
106
+ "sha256:fb1558def481d84f03b45888473fc5a1f35747b5f334ef4e7a571bc0dfcb11f8",
107
+ "sha256:fd1ed388ea7fbed22c4968dd64bab0198de60750a25fe8c0c9d4bef5abe13824"
108
+ ],
109
+ "index": "pypi",
110
+ "version": "==3.8.5"
111
+ },
112
+ "aiosignal": {
113
+ "hashes": [
114
+ "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc",
115
+ "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"
116
+ ],
117
+ "markers": "python_version >= '3.7'",
118
+ "version": "==1.3.1"
119
+ },
120
+ "anyio": {
121
+ "hashes": [
122
+ "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780",
123
+ "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"
124
+ ],
125
+ "markers": "python_version >= '3.7'",
126
+ "version": "==3.7.1"
127
+ },
128
+ "asgiref": {
129
+ "hashes": [
130
+ "sha256:89b2ef2247e3b562a16eef663bc0e2e703ec6468e2fa8a5cd61cd449786d4f6e",
131
+ "sha256:9e0ce3aa93a819ba5b45120216b23878cf6e8525eb3848653452b4192b92afed"
132
+ ],
133
+ "version": "==3.7.2"
134
+ },
135
+ "async-timeout": {
136
+ "hashes": [
137
+ "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15",
138
+ "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"
139
+ ],
140
+ "markers": "python_version >= '3.6'",
141
+ "version": "==4.0.2"
142
+ },
143
+ "attrs": {
144
+ "hashes": [
145
+ "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04",
146
+ "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"
147
+ ],
148
+ "markers": "python_version >= '3.7'",
149
+ "version": "==23.1.0"
150
+ },
151
+ "bingimagecreator": {
152
+ "hashes": [
153
+ "sha256:322312c6b59af1210faa0ec2fbc2429b53add60d3a0af2da7f1ae793844521b5",
154
+ "sha256:3e66b0f6df6d9082ed3991f06ef1c52b327b9ecb762d2cfa0c36beb26dbbdf55"
155
+ ],
156
+ "version": "==0.4.4"
157
+ },
158
+ "blinker": {
159
+ "hashes": [
160
+ "sha256:4afd3de66ef3a9f8067559fb7a1cbe555c17dcbe15971b05d1b625c3e7abe213",
161
+ "sha256:c3d739772abb7bc2860abf5f2ec284223d9ad5c76da018234f6f50d6f31ab1f0"
162
+ ],
163
+ "markers": "python_version >= '3.7'",
164
+ "version": "==1.6.2"
165
+ },
166
+ "certifi": {
167
+ "hashes": [
168
+ "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082",
169
+ "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"
170
+ ],
171
+ "markers": "python_version >= '3.6'",
172
+ "version": "==2023.7.22"
173
+ },
174
+ "charset-normalizer": {
175
+ "hashes": [
176
+ "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96",
177
+ "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c",
178
+ "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710",
179
+ "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706",
180
+ "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020",
181
+ "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252",
182
+ "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad",
183
+ "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329",
184
+ "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a",
185
+ "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f",
186
+ "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6",
187
+ "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4",
188
+ "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a",
189
+ "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46",
190
+ "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2",
191
+ "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23",
192
+ "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace",
193
+ "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd",
194
+ "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982",
195
+ "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10",
196
+ "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2",
197
+ "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea",
198
+ "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09",
199
+ "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5",
200
+ "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149",
201
+ "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489",
202
+ "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9",
203
+ "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80",
204
+ "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592",
205
+ "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3",
206
+ "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6",
207
+ "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed",
208
+ "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c",
209
+ "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200",
210
+ "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a",
211
+ "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e",
212
+ "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d",
213
+ "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6",
214
+ "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623",
215
+ "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669",
216
+ "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3",
217
+ "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa",
218
+ "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9",
219
+ "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2",
220
+ "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f",
221
+ "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1",
222
+ "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4",
223
+ "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a",
224
+ "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8",
225
+ "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3",
226
+ "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029",
227
+ "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f",
228
+ "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959",
229
+ "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22",
230
+ "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7",
231
+ "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952",
232
+ "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346",
233
+ "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e",
234
+ "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d",
235
+ "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299",
236
+ "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd",
237
+ "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a",
238
+ "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3",
239
+ "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037",
240
+ "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94",
241
+ "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c",
242
+ "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858",
243
+ "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a",
244
+ "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449",
245
+ "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c",
246
+ "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918",
247
+ "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1",
248
+ "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c",
249
+ "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac",
250
+ "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"
251
+ ],
252
+ "markers": "python_version >= '3.7'",
253
+ "version": "==3.2.0"
254
+ },
255
+ "click": {
256
+ "hashes": [
257
+ "sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd",
258
+ "sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5"
259
+ ],
260
+ "markers": "python_version >= '3.7'",
261
+ "version": "==8.1.6"
262
+ },
263
+ "edgegpt": {
264
+ "hashes": [
265
+ "sha256:66d625a137d90bbdb3854dc0347d23ca07a227a902d107fbc28bdf62a939bac5",
266
+ "sha256:e0509afeccdd22d84b9fe52e96d2661775a4ddec340546f562514d460528efc7"
267
+ ],
268
+ "index": "pypi",
269
+ "version": "==0.3.9"
270
+ },
271
+ "flask": {
272
+ "extras": [
273
+ "async"
274
+ ],
275
+ "hashes": [
276
+ "sha256:77fd4e1249d8c9923de34907236b747ced06e5467ecac1a7bb7115ae0e9670b0",
277
+ "sha256:8c2f9abd47a9e8df7f0c3f091ce9497d011dc3b31effcf4c85a6e2b50f4114ef"
278
+ ],
279
+ "index": "pypi",
280
+ "version": "==2.3.2"
281
+ },
282
+ "frozenlist": {
283
+ "hashes": [
284
+ "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6",
285
+ "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01",
286
+ "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251",
287
+ "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9",
288
+ "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b",
289
+ "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87",
290
+ "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf",
291
+ "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f",
292
+ "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0",
293
+ "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2",
294
+ "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b",
295
+ "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc",
296
+ "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c",
297
+ "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467",
298
+ "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9",
299
+ "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1",
300
+ "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a",
301
+ "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79",
302
+ "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167",
303
+ "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300",
304
+ "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf",
305
+ "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea",
306
+ "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2",
307
+ "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab",
308
+ "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3",
309
+ "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb",
310
+ "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087",
311
+ "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc",
312
+ "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8",
313
+ "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62",
314
+ "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f",
315
+ "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326",
316
+ "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c",
317
+ "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431",
318
+ "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963",
319
+ "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7",
320
+ "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef",
321
+ "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3",
322
+ "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956",
323
+ "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781",
324
+ "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472",
325
+ "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc",
326
+ "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839",
327
+ "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672",
328
+ "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3",
329
+ "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503",
330
+ "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d",
331
+ "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8",
332
+ "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b",
333
+ "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc",
334
+ "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f",
335
+ "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559",
336
+ "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b",
337
+ "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95",
338
+ "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb",
339
+ "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963",
340
+ "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919",
341
+ "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f",
342
+ "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3",
343
+ "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1",
344
+ "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"
345
+ ],
346
+ "markers": "python_version >= '3.8'",
347
+ "version": "==1.4.0"
348
+ },
349
+ "h11": {
350
+ "hashes": [
351
+ "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d",
352
+ "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"
353
+ ],
354
+ "markers": "python_version >= '3.7'",
355
+ "version": "==0.14.0"
356
+ },
357
+ "httpcore": {
358
+ "hashes": [
359
+ "sha256:a6f30213335e34c1ade7be6ec7c47f19f50c56db36abef1a9dfa3815b1cb3888",
360
+ "sha256:c2789b767ddddfa2a5782e3199b2b7f6894540b17b16ec26b2c4d8e103510b87"
361
+ ],
362
+ "markers": "python_version >= '3.7'",
363
+ "version": "==0.17.3"
364
+ },
365
+ "httpx": {
366
+ "hashes": [
367
+ "sha256:06781eb9ac53cde990577af654bd990a4949de37a28bdb4a230d434f3a30b9bd",
368
+ "sha256:5853a43053df830c20f8110c5e69fe44d035d850b2dfe795e196f00fdb774bdd"
369
+ ],
370
+ "markers": "python_version >= '3.7'",
371
+ "version": "==0.24.1"
372
+ },
373
+ "idna": {
374
+ "hashes": [
375
+ "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4",
376
+ "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"
377
+ ],
378
+ "markers": "python_version >= '3.5'",
379
+ "version": "==3.4"
380
+ },
381
+ "itsdangerous": {
382
+ "hashes": [
383
+ "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44",
384
+ "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"
385
+ ],
386
+ "markers": "python_version >= '3.7'",
387
+ "version": "==2.1.2"
388
+ },
389
+ "jinja2": {
390
+ "hashes": [
391
+ "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852",
392
+ "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"
393
+ ],
394
+ "markers": "python_version >= '3.7'",
395
+ "version": "==3.1.2"
396
+ },
397
+ "markdown-it-py": {
398
+ "hashes": [
399
+ "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1",
400
+ "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"
401
+ ],
402
+ "markers": "python_version >= '3.8'",
403
+ "version": "==3.0.0"
404
+ },
405
+ "markupsafe": {
406
+ "hashes": [
407
+ "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e",
408
+ "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e",
409
+ "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431",
410
+ "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686",
411
+ "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559",
412
+ "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc",
413
+ "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c",
414
+ "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0",
415
+ "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4",
416
+ "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9",
417
+ "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575",
418
+ "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba",
419
+ "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d",
420
+ "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3",
421
+ "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00",
422
+ "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155",
423
+ "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac",
424
+ "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52",
425
+ "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f",
426
+ "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8",
427
+ "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b",
428
+ "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24",
429
+ "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea",
430
+ "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198",
431
+ "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0",
432
+ "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee",
433
+ "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be",
434
+ "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2",
435
+ "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707",
436
+ "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6",
437
+ "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58",
438
+ "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779",
439
+ "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636",
440
+ "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c",
441
+ "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad",
442
+ "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee",
443
+ "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc",
444
+ "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2",
445
+ "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48",
446
+ "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7",
447
+ "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e",
448
+ "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b",
449
+ "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa",
450
+ "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5",
451
+ "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e",
452
+ "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb",
453
+ "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9",
454
+ "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57",
455
+ "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc",
456
+ "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"
457
+ ],
458
+ "markers": "python_version >= '3.7'",
459
+ "version": "==2.1.3"
460
+ },
461
+ "mdurl": {
462
+ "hashes": [
463
+ "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8",
464
+ "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"
465
+ ],
466
+ "markers": "python_version >= '3.7'",
467
+ "version": "==0.1.2"
468
+ },
469
+ "multidict": {
470
+ "hashes": [
471
+ "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9",
472
+ "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8",
473
+ "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03",
474
+ "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710",
475
+ "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161",
476
+ "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664",
477
+ "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569",
478
+ "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067",
479
+ "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313",
480
+ "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706",
481
+ "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2",
482
+ "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636",
483
+ "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49",
484
+ "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93",
485
+ "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603",
486
+ "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0",
487
+ "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60",
488
+ "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4",
489
+ "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e",
490
+ "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1",
491
+ "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60",
492
+ "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951",
493
+ "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc",
494
+ "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe",
495
+ "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95",
496
+ "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d",
497
+ "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8",
498
+ "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed",
499
+ "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2",
500
+ "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775",
501
+ "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87",
502
+ "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c",
503
+ "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2",
504
+ "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98",
505
+ "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3",
506
+ "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe",
507
+ "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78",
508
+ "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660",
509
+ "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176",
510
+ "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e",
511
+ "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988",
512
+ "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c",
513
+ "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c",
514
+ "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0",
515
+ "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449",
516
+ "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f",
517
+ "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde",
518
+ "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5",
519
+ "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d",
520
+ "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac",
521
+ "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a",
522
+ "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9",
523
+ "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca",
524
+ "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11",
525
+ "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35",
526
+ "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063",
527
+ "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b",
528
+ "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982",
529
+ "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258",
530
+ "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1",
531
+ "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52",
532
+ "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480",
533
+ "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7",
534
+ "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461",
535
+ "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d",
536
+ "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc",
537
+ "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779",
538
+ "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a",
539
+ "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547",
540
+ "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0",
541
+ "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171",
542
+ "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf",
543
+ "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d",
544
+ "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"
545
+ ],
546
+ "markers": "python_version >= '3.7'",
547
+ "version": "==6.0.4"
548
+ },
549
+ "prompt-toolkit": {
550
+ "hashes": [
551
+ "sha256:04505ade687dc26dc4284b1ad19a83be2f2afe83e7a828ace0c72f3a1df72aac",
552
+ "sha256:9dffbe1d8acf91e3de75f3b544e4842382fc06c6babe903ac9acb74dc6e08d88"
553
+ ],
554
+ "markers": "python_version >= '3.7'",
555
+ "version": "==3.0.39"
556
+ },
557
+ "pygments": {
558
+ "hashes": [
559
+ "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c",
560
+ "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"
561
+ ],
562
+ "markers": "python_version >= '3.7'",
563
+ "version": "==2.15.1"
564
+ },
565
+ "regex": {
566
+ "hashes": [
567
+ "sha256:0385e73da22363778ef2324950e08b689abdf0b108a7d8decb403ad7f5191938",
568
+ "sha256:051da80e6eeb6e239e394ae60704d2b566aa6a7aed6f2890a7967307267a5dc6",
569
+ "sha256:05ed27acdf4465c95826962528f9e8d41dbf9b1aa8531a387dee6ed215a3e9ef",
570
+ "sha256:0654bca0cdf28a5956c83839162692725159f4cda8d63e0911a2c0dc76166525",
571
+ "sha256:09e4a1a6acc39294a36b7338819b10baceb227f7f7dbbea0506d419b5a1dd8af",
572
+ "sha256:0b49c764f88a79160fa64f9a7b425620e87c9f46095ef9c9920542ab2495c8bc",
573
+ "sha256:0b71e63226e393b534105fcbdd8740410dc6b0854c2bfa39bbda6b0d40e59a54",
574
+ "sha256:0c29ca1bd61b16b67be247be87390ef1d1ef702800f91fbd1991f5c4421ebae8",
575
+ "sha256:10590510780b7541969287512d1b43f19f965c2ece6c9b1c00fc367b29d8dce7",
576
+ "sha256:10cb847aeb1728412c666ab2e2000ba6f174f25b2bdc7292e7dd71b16db07568",
577
+ "sha256:12b74fbbf6cbbf9dbce20eb9b5879469e97aeeaa874145517563cca4029db65c",
578
+ "sha256:20326216cc2afe69b6e98528160b225d72f85ab080cbdf0b11528cbbaba2248f",
579
+ "sha256:2239d95d8e243658b8dbb36b12bd10c33ad6e6933a54d36ff053713f129aa536",
580
+ "sha256:25be746a8ec7bc7b082783216de8e9473803706723b3f6bef34b3d0ed03d57e2",
581
+ "sha256:271f0bdba3c70b58e6f500b205d10a36fb4b58bd06ac61381b68de66442efddb",
582
+ "sha256:29cdd471ebf9e0f2fb3cac165efedc3c58db841d83a518b082077e612d3ee5df",
583
+ "sha256:2d44dc13229905ae96dd2ae2dd7cebf824ee92bc52e8cf03dcead37d926da019",
584
+ "sha256:3676f1dd082be28b1266c93f618ee07741b704ab7b68501a173ce7d8d0d0ca18",
585
+ "sha256:36efeba71c6539d23c4643be88295ce8c82c88bbd7c65e8a24081d2ca123da3f",
586
+ "sha256:3e5219bf9e75993d73ab3d25985c857c77e614525fac9ae02b1bebd92f7cecac",
587
+ "sha256:43e1dd9d12df9004246bacb79a0e5886b3b6071b32e41f83b0acbf293f820ee8",
588
+ "sha256:457b6cce21bee41ac292d6753d5e94dcbc5c9e3e3a834da285b0bde7aa4a11e9",
589
+ "sha256:463b6a3ceb5ca952e66550a4532cef94c9a0c80dc156c4cc343041951aec1697",
590
+ "sha256:4959e8bcbfda5146477d21c3a8ad81b185cd252f3d0d6e4724a5ef11c012fb06",
591
+ "sha256:4d3850beab9f527f06ccc94b446c864059c57651b3f911fddb8d9d3ec1d1b25d",
592
+ "sha256:5708089ed5b40a7b2dc561e0c8baa9535b77771b64a8330b684823cfd5116036",
593
+ "sha256:5c6b48d0fa50d8f4df3daf451be7f9689c2bde1a52b1225c5926e3f54b6a9ed1",
594
+ "sha256:61474f0b41fe1a80e8dfa70f70ea1e047387b7cd01c85ec88fa44f5d7561d787",
595
+ "sha256:6343c6928282c1f6a9db41f5fd551662310e8774c0e5ebccb767002fcf663ca9",
596
+ "sha256:65ba8603753cec91c71de423a943ba506363b0e5c3fdb913ef8f9caa14b2c7e0",
597
+ "sha256:687ea9d78a4b1cf82f8479cab23678aff723108df3edeac098e5b2498879f4a7",
598
+ "sha256:6b2675068c8b56f6bfd5a2bda55b8accbb96c02fd563704732fd1c95e2083461",
599
+ "sha256:7117d10690c38a622e54c432dfbbd3cbd92f09401d622902c32f6d377e2300ee",
600
+ "sha256:7178bbc1b2ec40eaca599d13c092079bf529679bf0371c602edaa555e10b41c3",
601
+ "sha256:72d1a25bf36d2050ceb35b517afe13864865268dfb45910e2e17a84be6cbfeb0",
602
+ "sha256:742e19a90d9bb2f4a6cf2862b8b06dea5e09b96c9f2df1779e53432d7275331f",
603
+ "sha256:74390d18c75054947e4194019077e243c06fbb62e541d8817a0fa822ea310c14",
604
+ "sha256:74419d2b50ecb98360cfaa2974da8689cb3b45b9deff0dcf489c0d333bcc1477",
605
+ "sha256:824bf3ac11001849aec3fa1d69abcb67aac3e150a933963fb12bda5151fe1bfd",
606
+ "sha256:83320a09188e0e6c39088355d423aa9d056ad57a0b6c6381b300ec1a04ec3d16",
607
+ "sha256:837328d14cde912af625d5f303ec29f7e28cdab588674897baafaf505341f2fc",
608
+ "sha256:841d6e0e5663d4c7b4c8099c9997be748677d46cbf43f9f471150e560791f7ff",
609
+ "sha256:87b2a5bb5e78ee0ad1de71c664d6eb536dc3947a46a69182a90f4410f5e3f7dd",
610
+ "sha256:890e5a11c97cf0d0c550eb661b937a1e45431ffa79803b942a057c4fb12a2da2",
611
+ "sha256:8abbc5d54ea0ee80e37fef009e3cec5dafd722ed3c829126253d3e22f3846f1e",
612
+ "sha256:8e3f1316c2293e5469f8f09dc2d76efb6c3982d3da91ba95061a7e69489a14ef",
613
+ "sha256:8f56fcb7ff7bf7404becdfc60b1e81a6d0561807051fd2f1860b0d0348156a07",
614
+ "sha256:9427a399501818a7564f8c90eced1e9e20709ece36be701f394ada99890ea4b3",
615
+ "sha256:976d7a304b59ede34ca2921305b57356694f9e6879db323fd90a80f865d355a3",
616
+ "sha256:9a5bfb3004f2144a084a16ce19ca56b8ac46e6fd0651f54269fc9e230edb5e4a",
617
+ "sha256:9beb322958aaca059f34975b0df135181f2e5d7a13b84d3e0e45434749cb20f7",
618
+ "sha256:9edcbad1f8a407e450fbac88d89e04e0b99a08473f666a3f3de0fd292badb6aa",
619
+ "sha256:9edce5281f965cf135e19840f4d93d55b3835122aa76ccacfd389e880ba4cf82",
620
+ "sha256:a4c3b7fa4cdaa69268748665a1a6ff70c014d39bb69c50fda64b396c9116cf77",
621
+ "sha256:a8105e9af3b029f243ab11ad47c19b566482c150c754e4c717900a798806b222",
622
+ "sha256:a99b50300df5add73d307cf66abea093304a07eb017bce94f01e795090dea87c",
623
+ "sha256:aad51907d74fc183033ad796dd4c2e080d1adcc4fd3c0fd4fd499f30c03011cd",
624
+ "sha256:af4dd387354dc83a3bff67127a124c21116feb0d2ef536805c454721c5d7993d",
625
+ "sha256:b28f5024a3a041009eb4c333863d7894d191215b39576535c6734cd88b0fcb68",
626
+ "sha256:b4598b1897837067a57b08147a68ac026c1e73b31ef6e36deeeb1fa60b2933c9",
627
+ "sha256:b6192d5af2ccd2a38877bfef086d35e6659566a335b1492786ff254c168b1693",
628
+ "sha256:b862c2b9d5ae38a68b92e215b93f98d4c5e9454fa36aae4450f61dd33ff48487",
629
+ "sha256:b956231ebdc45f5b7a2e1f90f66a12be9610ce775fe1b1d50414aac1e9206c06",
630
+ "sha256:bb60b503ec8a6e4e3e03a681072fa3a5adcbfa5479fa2d898ae2b4a8e24c4591",
631
+ "sha256:bbb02fd4462f37060122e5acacec78e49c0fbb303c30dd49c7f493cf21fc5b27",
632
+ "sha256:bdff5eab10e59cf26bc479f565e25ed71a7d041d1ded04ccf9aee1d9f208487a",
633
+ "sha256:c123f662be8ec5ab4ea72ea300359023a5d1df095b7ead76fedcd8babbedf969",
634
+ "sha256:c2b867c17a7a7ae44c43ebbeb1b5ff406b3e8d5b3e14662683e5e66e6cc868d3",
635
+ "sha256:c5f8037000eb21e4823aa485149f2299eb589f8d1fe4b448036d230c3f4e68e0",
636
+ "sha256:c6a57b742133830eec44d9b2290daf5cbe0a2f1d6acee1b3c7b1c7b2f3606df7",
637
+ "sha256:ccf91346b7bd20c790310c4147eee6ed495a54ddb6737162a36ce9dbef3e4751",
638
+ "sha256:cf67ca618b4fd34aee78740bea954d7c69fdda419eb208c2c0c7060bb822d747",
639
+ "sha256:d2da3abc88711bce7557412310dfa50327d5769a31d1c894b58eb256459dc289",
640
+ "sha256:d4f03bb71d482f979bda92e1427f3ec9b220e62a7dd337af0aa6b47bf4498f72",
641
+ "sha256:d54af539295392611e7efbe94e827311eb8b29668e2b3f4cadcfe6f46df9c777",
642
+ "sha256:d77f09bc4b55d4bf7cc5eba785d87001d6757b7c9eec237fe2af57aba1a071d9",
643
+ "sha256:d831c2f8ff278179705ca59f7e8524069c1a989e716a1874d6d1aab6119d91d1",
644
+ "sha256:dbbbfce33cd98f97f6bffb17801b0576e653f4fdb1d399b2ea89638bc8d08ae1",
645
+ "sha256:dcba6dae7de533c876255317c11f3abe4907ba7d9aa15d13e3d9710d4315ec0e",
646
+ "sha256:e0bb18053dfcfed432cc3ac632b5e5e5c5b7e55fb3f8090e867bfd9b054dbcbf",
647
+ "sha256:e2fbd6236aae3b7f9d514312cdb58e6494ee1c76a9948adde6eba33eb1c4264f",
648
+ "sha256:e5087a3c59eef624a4591ef9eaa6e9a8d8a94c779dade95d27c0bc24650261cd",
649
+ "sha256:e8915cc96abeb8983cea1df3c939e3c6e1ac778340c17732eb63bb96247b91d2",
650
+ "sha256:ea353ecb6ab5f7e7d2f4372b1e779796ebd7b37352d290096978fea83c4dba0c",
651
+ "sha256:ee2d1a9a253b1729bb2de27d41f696ae893507c7db224436abe83ee25356f5c1",
652
+ "sha256:f415f802fbcafed5dcc694c13b1292f07fe0befdb94aa8a52905bd115ff41e88",
653
+ "sha256:fb5ec16523dc573a4b277663a2b5a364e2099902d3944c9419a40ebd56a118f9",
654
+ "sha256:fea75c3710d4f31389eed3c02f62d0b66a9da282521075061ce875eb5300cf23"
655
+ ],
656
+ "markers": "python_version >= '3.6'",
657
+ "version": "==2023.6.3"
658
+ },
659
+ "requests": {
660
+ "hashes": [
661
+ "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f",
662
+ "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"
663
+ ],
664
+ "markers": "python_version >= '3.7'",
665
+ "version": "==2.31.0"
666
+ },
667
+ "rich": {
668
+ "hashes": [
669
+ "sha256:8f87bc7ee54675732fa66a05ebfe489e27264caeeff3728c945d25971b6485ec",
670
+ "sha256:d653d6bccede5844304c605d5aac802c7cf9621efd700b46c7ec2b51ea914898"
671
+ ],
672
+ "markers": "python_version >= '3.7'",
673
+ "version": "==13.4.2"
674
+ },
675
+ "sniffio": {
676
+ "hashes": [
677
+ "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101",
678
+ "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"
679
+ ],
680
+ "markers": "python_version >= '3.7'",
681
+ "version": "==1.3.0"
682
+ },
683
+ "urllib3": {
684
+ "hashes": [
685
+ "sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11",
686
+ "sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4"
687
+ ],
688
+ "markers": "python_version >= '3.7'",
689
+ "version": "==2.0.4"
690
+ },
691
+ "wcwidth": {
692
+ "hashes": [
693
+ "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e",
694
+ "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"
695
+ ],
696
+ "version": "==0.2.6"
697
+ },
698
+ "websockets": {
699
+ "hashes": [
700
+ "sha256:01f5567d9cf6f502d655151645d4e8b72b453413d3819d2b6f1185abc23e82dd",
701
+ "sha256:03aae4edc0b1c68498f41a6772d80ac7c1e33c06c6ffa2ac1c27a07653e79d6f",
702
+ "sha256:0ac56b661e60edd453585f4bd68eb6a29ae25b5184fd5ba51e97652580458998",
703
+ "sha256:0ee68fe502f9031f19d495dae2c268830df2760c0524cbac5d759921ba8c8e82",
704
+ "sha256:1553cb82942b2a74dd9b15a018dce645d4e68674de2ca31ff13ebc2d9f283788",
705
+ "sha256:1a073fc9ab1c8aff37c99f11f1641e16da517770e31a37265d2755282a5d28aa",
706
+ "sha256:1d2256283fa4b7f4c7d7d3e84dc2ece74d341bce57d5b9bf385df109c2a1a82f",
707
+ "sha256:1d5023a4b6a5b183dc838808087033ec5df77580485fc533e7dab2567851b0a4",
708
+ "sha256:1fdf26fa8a6a592f8f9235285b8affa72748dc12e964a5518c6c5e8f916716f7",
709
+ "sha256:2529338a6ff0eb0b50c7be33dc3d0e456381157a31eefc561771ee431134a97f",
710
+ "sha256:279e5de4671e79a9ac877427f4ac4ce93751b8823f276b681d04b2156713b9dd",
711
+ "sha256:2d903ad4419f5b472de90cd2d40384573b25da71e33519a67797de17ef849b69",
712
+ "sha256:332d126167ddddec94597c2365537baf9ff62dfcc9db4266f263d455f2f031cb",
713
+ "sha256:34fd59a4ac42dff6d4681d8843217137f6bc85ed29722f2f7222bd619d15e95b",
714
+ "sha256:3580dd9c1ad0701169e4d6fc41e878ffe05e6bdcaf3c412f9d559389d0c9e016",
715
+ "sha256:3ccc8a0c387629aec40f2fc9fdcb4b9d5431954f934da3eaf16cdc94f67dbfac",
716
+ "sha256:41f696ba95cd92dc047e46b41b26dd24518384749ed0d99bea0a941ca87404c4",
717
+ "sha256:42cc5452a54a8e46a032521d7365da775823e21bfba2895fb7b77633cce031bb",
718
+ "sha256:4841ed00f1026dfbced6fca7d963c4e7043aa832648671b5138008dc5a8f6d99",
719
+ "sha256:4b253869ea05a5a073ebfdcb5cb3b0266a57c3764cf6fe114e4cd90f4bfa5f5e",
720
+ "sha256:54c6e5b3d3a8936a4ab6870d46bdd6ec500ad62bde9e44462c32d18f1e9a8e54",
721
+ "sha256:619d9f06372b3a42bc29d0cd0354c9bb9fb39c2cbc1a9c5025b4538738dbffaf",
722
+ "sha256:6505c1b31274723ccaf5f515c1824a4ad2f0d191cec942666b3d0f3aa4cb4007",
723
+ "sha256:660e2d9068d2bedc0912af508f30bbeb505bbbf9774d98def45f68278cea20d3",
724
+ "sha256:6681ba9e7f8f3b19440921e99efbb40fc89f26cd71bf539e45d8c8a25c976dc6",
725
+ "sha256:68b977f21ce443d6d378dbd5ca38621755f2063d6fdb3335bda981d552cfff86",
726
+ "sha256:69269f3a0b472e91125b503d3c0b3566bda26da0a3261c49f0027eb6075086d1",
727
+ "sha256:6f1a3f10f836fab6ca6efa97bb952300b20ae56b409414ca85bff2ad241d2a61",
728
+ "sha256:7622a89d696fc87af8e8d280d9b421db5133ef5b29d3f7a1ce9f1a7bf7fcfa11",
729
+ "sha256:777354ee16f02f643a4c7f2b3eff8027a33c9861edc691a2003531f5da4f6bc8",
730
+ "sha256:84d27a4832cc1a0ee07cdcf2b0629a8a72db73f4cf6de6f0904f6661227f256f",
731
+ "sha256:8531fdcad636d82c517b26a448dcfe62f720e1922b33c81ce695d0edb91eb931",
732
+ "sha256:86d2a77fd490ae3ff6fae1c6ceaecad063d3cc2320b44377efdde79880e11526",
733
+ "sha256:88fc51d9a26b10fc331be344f1781224a375b78488fc343620184e95a4b27016",
734
+ "sha256:8a34e13a62a59c871064dfd8ffb150867e54291e46d4a7cf11d02c94a5275bae",
735
+ "sha256:8c82f11964f010053e13daafdc7154ce7385ecc538989a354ccc7067fd7028fd",
736
+ "sha256:92b2065d642bf8c0a82d59e59053dd2fdde64d4ed44efe4870fa816c1232647b",
737
+ "sha256:97b52894d948d2f6ea480171a27122d77af14ced35f62e5c892ca2fae9344311",
738
+ "sha256:9d9acd80072abcc98bd2c86c3c9cd4ac2347b5a5a0cae7ed5c0ee5675f86d9af",
739
+ "sha256:9f59a3c656fef341a99e3d63189852be7084c0e54b75734cde571182c087b152",
740
+ "sha256:aa5003845cdd21ac0dc6c9bf661c5beddd01116f6eb9eb3c8e272353d45b3288",
741
+ "sha256:b16fff62b45eccb9c7abb18e60e7e446998093cdcb50fed33134b9b6878836de",
742
+ "sha256:b30c6590146e53149f04e85a6e4fcae068df4289e31e4aee1fdf56a0dead8f97",
743
+ "sha256:b58cbf0697721120866820b89f93659abc31c1e876bf20d0b3d03cef14faf84d",
744
+ "sha256:b67c6f5e5a401fc56394f191f00f9b3811fe843ee93f4a70df3c389d1adf857d",
745
+ "sha256:bceab846bac555aff6427d060f2fcfff71042dba6f5fca7dc4f75cac815e57ca",
746
+ "sha256:bee9fcb41db2a23bed96c6b6ead6489702c12334ea20a297aa095ce6d31370d0",
747
+ "sha256:c114e8da9b475739dde229fd3bc6b05a6537a88a578358bc8eb29b4030fac9c9",
748
+ "sha256:c1f0524f203e3bd35149f12157438f406eff2e4fb30f71221c8a5eceb3617b6b",
749
+ "sha256:c792ea4eabc0159535608fc5658a74d1a81020eb35195dd63214dcf07556f67e",
750
+ "sha256:c7f3cb904cce8e1be667c7e6fef4516b98d1a6a0635a58a57528d577ac18a128",
751
+ "sha256:d67ac60a307f760c6e65dad586f556dde58e683fab03323221a4e530ead6f74d",
752
+ "sha256:dcacf2c7a6c3a84e720d1bb2b543c675bf6c40e460300b628bab1b1efc7c034c",
753
+ "sha256:de36fe9c02995c7e6ae6efe2e205816f5f00c22fd1fbf343d4d18c3d5ceac2f5",
754
+ "sha256:def07915168ac8f7853812cc593c71185a16216e9e4fa886358a17ed0fd9fcf6",
755
+ "sha256:df41b9bc27c2c25b486bae7cf42fccdc52ff181c8c387bfd026624a491c2671b",
756
+ "sha256:e052b8467dd07d4943936009f46ae5ce7b908ddcac3fda581656b1b19c083d9b",
757
+ "sha256:e063b1865974611313a3849d43f2c3f5368093691349cf3c7c8f8f75ad7cb280",
758
+ "sha256:e1459677e5d12be8bbc7584c35b992eea142911a6236a3278b9b5ce3326f282c",
759
+ "sha256:e1a99a7a71631f0efe727c10edfba09ea6bee4166a6f9c19aafb6c0b5917d09c",
760
+ "sha256:e590228200fcfc7e9109509e4d9125eace2042fd52b595dd22bbc34bb282307f",
761
+ "sha256:e6316827e3e79b7b8e7d8e3b08f4e331af91a48e794d5d8b099928b6f0b85f20",
762
+ "sha256:e7837cb169eca3b3ae94cc5787c4fed99eef74c0ab9506756eea335e0d6f3ed8",
763
+ "sha256:e848f46a58b9fcf3d06061d17be388caf70ea5b8cc3466251963c8345e13f7eb",
764
+ "sha256:ed058398f55163a79bb9f06a90ef9ccc063b204bb346c4de78efc5d15abfe602",
765
+ "sha256:f2e58f2c36cc52d41f2659e4c0cbf7353e28c8c9e63e30d8c6d3494dc9fdedcf",
766
+ "sha256:f467ba0050b7de85016b43f5a22b46383ef004c4f672148a8abf32bc999a87f0",
767
+ "sha256:f61bdb1df43dc9c131791fbc2355535f9024b9a04398d3bd0684fc16ab07df74",
768
+ "sha256:fb06eea71a00a7af0ae6aefbb932fb8a7df3cb390cc217d51a9ad7343de1b8d0",
769
+ "sha256:ffd7dcaf744f25f82190856bc26ed81721508fc5cbf2a330751e135ff1283564"
770
+ ],
771
+ "markers": "python_version >= '3.7'",
772
+ "version": "==11.0.3"
773
+ },
774
+ "werkzeug": {
775
+ "hashes": [
776
+ "sha256:935539fa1413afbb9195b24880778422ed620c0fc09670945185cce4d91a8890",
777
+ "sha256:98c774df2f91b05550078891dee5f0eb0cb797a522c757a2452b9cee5b202330"
778
+ ],
779
+ "markers": "python_version >= '3.8'",
780
+ "version": "==2.3.6"
781
+ },
782
+ "yarl": {
783
+ "hashes": [
784
+ "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571",
785
+ "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3",
786
+ "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3",
787
+ "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c",
788
+ "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7",
789
+ "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04",
790
+ "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191",
791
+ "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea",
792
+ "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4",
793
+ "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4",
794
+ "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095",
795
+ "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e",
796
+ "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74",
797
+ "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef",
798
+ "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33",
799
+ "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde",
800
+ "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45",
801
+ "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf",
802
+ "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b",
803
+ "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac",
804
+ "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0",
805
+ "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528",
806
+ "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716",
807
+ "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb",
808
+ "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18",
809
+ "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72",
810
+ "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6",
811
+ "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582",
812
+ "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5",
813
+ "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368",
814
+ "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc",
815
+ "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9",
816
+ "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be",
817
+ "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a",
818
+ "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80",
819
+ "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8",
820
+ "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6",
821
+ "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417",
822
+ "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574",
823
+ "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59",
824
+ "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608",
825
+ "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82",
826
+ "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1",
827
+ "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3",
828
+ "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d",
829
+ "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8",
830
+ "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc",
831
+ "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac",
832
+ "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8",
833
+ "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955",
834
+ "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0",
835
+ "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367",
836
+ "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb",
837
+ "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a",
838
+ "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623",
839
+ "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2",
840
+ "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6",
841
+ "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7",
842
+ "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4",
843
+ "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051",
844
+ "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938",
845
+ "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8",
846
+ "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9",
847
+ "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3",
848
+ "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5",
849
+ "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9",
850
+ "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333",
851
+ "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185",
852
+ "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3",
853
+ "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560",
854
+ "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b",
855
+ "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7",
856
+ "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78",
857
+ "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"
858
+ ],
859
+ "markers": "python_version >= '3.7'",
860
+ "version": "==1.9.2"
861
+ }
862
+ },
863
+ "develop": {}
864
+ }
README.md CHANGED
@@ -1,31 +1,336 @@
1
  ---
2
- license: afl-3.0
3
  datasets:
4
- - bigcode/the-stack-v2
5
- - hollyyfc/tidytuesday_for_python
6
- - microsoft/orca-math-word-problems-200k
7
- - Cohere/wikipedia-2023-11-embed-multilingual-v3
8
- - storytracer/US-PD-Books
9
- - HuggingFaceTB/cosmopedia
10
- - abacusai/SystemChat
11
- - fka/awesome-chatgpt-prompts
12
- - m-a-p/COIG-CQIA
13
- - MarkrAI/KoCommercial-Dataset
14
  language:
15
  - ar
16
  - en
 
 
17
  metrics:
18
  - accuracy
19
  - bertscore
20
  - bleu
21
- - bleurt
22
- - brier_score
23
- - cer
24
- - character
25
- - charcut_mt
26
  - chrf
27
  - code_eval
28
- library_name: bertopic
29
  tags:
 
 
 
 
 
30
  - code
31
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: apache-2.0
3
  datasets:
4
+ - PetraAI/PetraAI
 
 
 
 
 
 
 
 
 
5
  language:
6
  - ar
7
  - en
8
+ - ch
9
+ - zh
10
  metrics:
11
  - accuracy
12
  - bertscore
13
  - bleu
 
 
 
 
 
14
  - chrf
15
  - code_eval
16
+ - brier_score
17
  tags:
18
+ - chemistry
19
+ - biology
20
+ - finance
21
+ - legal
22
+ - music
23
  - code
24
+ - art
25
+ - climate
26
+ - medical
27
+ - text-generation-inference
28
+ ---
29
+
30
+ ### Inference Speed
31
+ > The result is generated using [this script](examples/benchmark/generation_speed.py), batch size of input is 1, decode strategy is beam search and enforce the model to generate 512 tokens, speed metric is tokens/s (the larger, the better).
32
+ >
33
+ > The quantized model is loaded using the setup that can gain the fastest inference speed.
34
+
35
+ | model | GPU | num_beams | fp16 | gptq-int4 |
36
+ |---------------|---------------|-----------|-------|-----------|
37
+ | llama-7b | 1xA100-40G | 1 | 18.87 | 25.53 |
38
+ | llama-7b | 1xA100-40G | 4 | 68.79 | 91.30 |
39
+ | moss-moon 16b | 1xA100-40G | 1 | 12.48 | 15.25 |
40
+ | moss-moon 16b | 1xA100-40G | 4 | OOM | 42.67 |
41
+ | moss-moon 16b | 2xA100-40G | 1 | 06.83 | 06.78 |
42
+ | moss-moon 16b | 2xA100-40G | 4 | 13.10 | 10.80 |
43
+ | gpt-j 6b | 1xRTX3060-12G | 1 | OOM | 29.55 |
44
+ | gpt-j 6b | 1xRTX3060-12G | 4 | OOM | 47.36 |
45
+
46
+
47
+ ### Perplexity
48
+ For perplexity comparison, you can turn to [here](https://github.com/qwopqwop200/GPTQ-for-LLaMa#result) and [here](https://github.com/qwopqwop200/GPTQ-for-LLaMa#gptq-vs-bitsandbytes)
49
+
50
+ ## Installation
51
+
52
+ ### Quick Installation
53
+ You can install the latest stable release of AutoGPTQ from pip with pre-built wheels compatible with PyTorch 2.0.1:
54
+
55
+ * For CUDA 11.7: `pip install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu117/`
56
+ * For CUDA 11.8: `pip install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/`
57
+ * For RoCm 5.4.2: `pip install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/rocm542/`
58
+
59
+ **Warning:** These wheels are not expected to work on PyTorch nightly. Please install AutoGPTQ from source when using PyTorch nightly.
60
+
61
+ #### disable cuda extensions
62
+ By default, cuda extensions will be installed when `torch` and `cuda` is already installed in your machine, if you don't want to use them, using:
63
+ ```shell
64
+ BUILD_CUDA_EXT=0 pip install auto-gptq
65
+ ```
66
+ And to make sure `autogptq_cuda` is not ever in your virtual environment, run:
67
+ ```shell
68
+ pip uninstall autogptq_cuda -y
69
+ ```
70
+
71
+ #### to support triton speedup
72
+ To integrate with `triton`, using:
73
+ > warning: currently triton only supports linux; 3-bit quantization is not supported when using triton
74
+
75
+ ```shell
76
+ pip install auto-gptq[triton]
77
+ ```
78
+
79
+ ### Install from source
80
+ <details>
81
+ <summary>click to see details</summary>
82
+
83
+ Clone the source code:
84
+ ```shell
85
+ git clone https://github.com/PanQiWei/AutoGPTQ.git && cd AutoGPTQ
86
+ ```
87
+ Then, install from source:
88
+ ```shell
89
+ pip install .
90
+ ```
91
+ Like quick installation, you can also set `BUILD_CUDA_EXT=0` to disable pytorch extension building.
92
+
93
+ Use `.[triton]` if you want to integrate with triton and it's available on your operating system.
94
+
95
+ To install from source for AMD GPUs supporting RoCm, please specify the `ROCM_VERSION` environment variable. The compilation can be speeded up by specifying the `PYTORCH_ROCM_ARCH` variable ([reference](https://github.com/pytorch/pytorch/blob/7b73b1e8a73a1777ebe8d2cd4487eb13da55b3ba/setup.py#L132)), for example `gfx90a` for MI200 series devices. Example:
96
+
97
+ ```
98
+ ROCM_VERSION=5.6 pip install .
99
+ ```
100
+
101
+ For RoCm systems, the packages `rocsparse-dev`, `hipsparse-dev`, `rocthrust-dev`, `rocblas-dev` and `hipblas-dev` are required to build.
102
+
103
+ </details>
104
+
105
+ ## Quick Tour
106
+
107
+ ### Quantization and Inference
108
+ > warning: this is just a showcase of the usage of basic apis in AutoGPTQ, which uses only one sample to quantize a much small model, quality of quantized model using such little samples may not good.
109
+
110
+ Below is an example for the simplest use of `auto_gptq` to quantize a model and inference after quantization:
111
+ ```python
112
+ from transformers import AutoTokenizer, TextGenerationPipeline
113
+ from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
114
+ import logging
115
+
116
+ logging.basicConfig(
117
+ format="%(asctime)s %(levelname)s [%(name)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S"
118
+ )
119
+
120
+ pretrained_model_dir = "facebook/opt-125m"
121
+ quantized_model_dir = "opt-125m-4bit"
122
+
123
+ tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
124
+ examples = [
125
+ tokenizer(
126
+ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."
127
+ )
128
+ ]
129
+
130
+ quantize_config = BaseQuantizeConfig(
131
+ bits=4, # quantize model to 4-bit
132
+ group_size=128, # it is recommended to set the value to 128
133
+ desc_act=False, # set to False can significantly speed up inference but the perplexity may slightly bad
134
+ )
135
+
136
+ # load un-quantized model, by default, the model will always be loaded into CPU memory
137
+ model = AutoGPTQForCausalLM.from_pretrained(pretrained_model_dir, quantize_config)
138
+
139
+ # quantize model, the examples should be list of dict whose keys can only be "input_ids" and "attention_mask"
140
+ model.quantize(examples)
141
+
142
+ # save quantized model
143
+ model.save_quantized(quantized_model_dir)
144
+
145
+ # save quantized model using safetensors
146
+ model.save_quantized(quantized_model_dir, use_safetensors=True)
147
+
148
+ # push quantized model to Hugging Face Hub.
149
+ # to use use_auth_token=True, Login first via huggingface-cli login.
150
+ # or pass explcit token with: use_auth_token="hf_xxxxxxx"
151
+ # (uncomment the following three lines to enable this feature)
152
+ # repo_id = f"YourUserName/{quantized_model_dir}"
153
+ # commit_message = f"AutoGPTQ model for {pretrained_model_dir}: {quantize_config.bits}bits, gr{quantize_config.group_size}, desc_act={quantize_config.desc_act}"
154
+ # model.push_to_hub(repo_id, commit_message=commit_message, use_auth_token=True)
155
+
156
+ # alternatively you can save and push at the same time
157
+ # (uncomment the following three lines to enable this feature)
158
+ # repo_id = f"YourUserName/{quantized_model_dir}"
159
+ # commit_message = f"AutoGPTQ model for {pretrained_model_dir}: {quantize_config.bits}bits, gr{quantize_config.group_size}, desc_act={quantize_config.desc_act}"
160
+ # model.push_to_hub(repo_id, save_dir=quantized_model_dir, use_safetensors=True, commit_message=commit_message, use_auth_token=True)
161
+
162
+ # load quantized model to the first GPU
163
+ model = AutoGPTQForCausalLM.from_quantized(quantized_model_dir, device="cuda:0")
164
+
165
+ # download quantized model from Hugging Face Hub and load to the first GPU
166
+ # model = AutoGPTQForCausalLM.from_quantized(repo_id, device="cuda:0", use_safetensors=True, use_triton=False)
167
+
168
+ # inference with model.generate
169
+ print(tokenizer.decode(model.generate(**tokenizer("auto_gptq is", return_tensors="pt").to(model.device))[0]))
170
+
171
+ # or you can also use pipeline
172
+ pipeline = TextGenerationPipeline(model=model, tokenizer=tokenizer)
173
+ print(pipeline("auto-gptq is")[0]["generated_text"])
174
+ ```
175
+
176
+ For more advanced features of model quantization, please reference to [this script](examples/quantization/quant_with_alpaca.py)
177
+
178
+ ### Customize Model
179
+ <details>
180
+
181
+ <summary>Below is an example to extend `auto_gptq` to support `OPT` model, as you will see, it's very easy:</summary>
182
+
183
+ ```python
184
+ from auto_gptq.modeling import BaseGPTQForCausalLM
185
+
186
+
187
+ class OPTGPTQForCausalLM(BaseGPTQForCausalLM):
188
+ # chained attribute name of transformer layer block
189
+ layers_block_name = "model.decoder.layers"
190
+ # chained attribute names of other nn modules that in the same level as the transformer layer block
191
+ outside_layer_modules = [
192
+ "model.decoder.embed_tokens", "model.decoder.embed_positions", "model.decoder.project_out",
193
+ "model.decoder.project_in", "model.decoder.final_layer_norm"
194
+ ]
195
+ # chained attribute names of linear layers in transformer layer module
196
+ # normally, there are four sub lists, for each one the modules in it can be seen as one operation,
197
+ # and the order should be the order when they are truly executed, in this case (and usually in most cases),
198
+ # they are: attention q_k_v projection, attention output projection, MLP project input, MLP project output
199
+ inside_layer_modules = [
200
+ ["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
201
+ ["self_attn.out_proj"],
202
+ ["fc1"],
203
+ ["fc2"]
204
+ ]
205
+ ```
206
+ After this, you can use `OPTGPTQForCausalLM.from_pretrained` and other methods as shown in Basic.
207
+
208
+ </details>
209
+
210
+ ### Evaluation on Downstream Tasks
211
+ You can use tasks defined in `auto_gptq.eval_tasks` to evaluate model's performance on specific down-stream task before and after quantization.
212
+
213
+ The predefined tasks support all causal-language-models implemented in [🤗 transformers](https://github.com/huggingface/transformers) and in this project.
214
+
215
+ <details>
216
+
217
+ <summary>Below is an example to evaluate `EleutherAI/gpt-j-6b` on sequence-classification task using `cardiffnlp/tweet_sentiment_multilingual` dataset:</summary>
218
+
219
+ ```python
220
+ from functools import partial
221
+
222
+ import datasets
223
+ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
224
+
225
+ from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
226
+ from auto_gptq.eval_tasks import SequenceClassificationTask
227
+
228
+
229
+ MODEL = "EleutherAI/gpt-j-6b"
230
+ DATASET = "cardiffnlp/tweet_sentiment_multilingual"
231
+ TEMPLATE = "Question:What's the sentiment of the given text? Choices are {labels}.\nText: {text}\nAnswer:"
232
+ ID2LABEL = {
233
+ 0: "negative",
234
+ 1: "neutral",
235
+ 2: "positive"
236
+ }
237
+ LABELS = list(ID2LABEL.values())
238
+
239
+
240
+ def ds_refactor_fn(samples):
241
+ text_data = samples["text"]
242
+ label_data = samples["label"]
243
+
244
+ new_samples = {"prompt": [], "label": []}
245
+ for text, label in zip(text_data, label_data):
246
+ prompt = TEMPLATE.format(labels=LABELS, text=text)
247
+ new_samples["prompt"].append(prompt)
248
+ new_samples["label"].append(ID2LABEL[label])
249
+
250
+ return new_samples
251
+
252
+
253
+ # model = AutoModelForCausalLM.from_pretrained(MODEL).eval().half().to("cuda:0")
254
+ model = AutoGPTQForCausalLM.from_pretrained(MODEL, BaseQuantizeConfig())
255
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
256
+
257
+ task = SequenceClassificationTask(
258
+ model=model,
259
+ tokenizer=tokenizer,
260
+ classes=LABELS,
261
+ data_name_or_path=DATASET,
262
+ prompt_col_name="prompt",
263
+ label_col_name="label",
264
+ **{
265
+ "num_samples": 1000, # how many samples will be sampled to evaluation
266
+ "sample_max_len": 1024, # max tokens for each sample
267
+ "block_max_len": 2048, # max tokens for each data block
268
+ # function to load dataset, one must only accept data_name_or_path as input
269
+ # and return datasets.Dataset
270
+ "load_fn": partial(datasets.load_dataset, name="english"),
271
+ # function to preprocess dataset, which is used for datasets.Dataset.map,
272
+ # must return Dict[str, list] with only two keys: [prompt_col_name, label_col_name]
273
+ "preprocess_fn": ds_refactor_fn,
274
+ # truncate label when sample's length exceed sample_max_len
275
+ "truncate_prompt": False
276
+ }
277
+ )
278
+
279
+ # note that max_new_tokens will be automatically specified internally based on given classes
280
+ print(task.run())
281
+
282
+ # self-consistency
283
+ print(
284
+ task.run(
285
+ generation_config=GenerationConfig(
286
+ num_beams=3,
287
+ num_return_sequences=3,
288
+ do_sample=True
289
+ )
290
+ )
291
+ )
292
+ ```
293
+
294
+ </details>
295
+
296
+ ## Learn More
297
+ [tutorials](docs/tutorial) provide step-by-step guidance to integrate `auto_gptq` with your own project and some best practice principles.
298
+
299
+ [examples](examples/README.md) provide plenty of example scripts to use `auto_gptq` in different ways.
300
+
301
+ ## Supported Models
302
+
303
+ > you can use `model.config.model_type` to compare with the table below to check whether the model you use is supported by `auto_gptq`.
304
+ >
305
+ > for example, model_type of `WizardLM`, `vicuna` and `gpt4all` are all `llama`, hence they are all supported by `auto_gptq`.
306
+
307
+ | model type | quantization | inference | peft-lora | peft-ada-lora | peft-adaption_prompt |
308
+ |------------------------------------|--------------|-----------|-----------|---------------|-------------------------------------------------------------------------------------------------|
309
+ | bloom | ✅ | ✅ | ✅ | ✅ | |
310
+ | gpt2 | ✅ | ✅ | ✅ | ✅ | |
311
+ | gpt_neox | ✅ | ✅ | ✅ | ✅ | ✅[requires this peft branch](https://github.com/PanQiWei/peft/tree/multi_modal_adaption_prompt) |
312
+ | gptj | ✅ | ✅ | ✅ | ✅ | ✅[requires this peft branch](https://github.com/PanQiWei/peft/tree/multi_modal_adaption_prompt) |
313
+ | llama | ✅ | ✅ | ✅ | ✅ | ✅ |
314
+ | moss | ✅ | ✅ | ✅ | ✅ | ✅[requires this peft branch](https://github.com/PanQiWei/peft/tree/multi_modal_adaption_prompt) |
315
+ | opt | ✅ | ✅ | ✅ | ✅ | |
316
+ | gpt_bigcode | ✅ | ✅ | ✅ | ✅ | |
317
+ | codegen | ✅ | ✅ | ✅ | ✅ | |
318
+ | falcon(RefinedWebModel/RefinedWeb) | ✅ | ✅ | ✅ | ✅ | |
319
+
320
+ ## Supported Evaluation Tasks
321
+ Currently, `auto_gptq` supports: `LanguageModelingTask`, `SequenceClassificationTask` and `TextSummarizationTask`; more Tasks will come soon!
322
+
323
+ ## Running tests
324
+
325
+ Tests can be run with:
326
+
327
+ ```
328
+ pytest tests/ -s
329
+ ```
330
+
331
+ ## Acknowledgement
332
+ - Specially thanks **Elias Frantar**, **Saleh Ashkboos**, **Torsten Hoefler** and **Dan Alistarh** for proposing **GPTQ** algorithm and open source the [code](https://github.com/IST-DASLab/gptq).
333
+ - Specially thanks **qwopqwop200**, for code in this project that relevant to quantization are mainly referenced from [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa/tree/cuda).
334
+
335
+
336
+ [![Star History Chart](https://api.star-history.com/svg?repos=PanQiwei/AutoGPTQ&type=Date)](https://star-history.com/#PanQiWei/AutoGPTQ&Date)
README_zh.md ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h1 align="center">AutoGPTQ</h1>
2
+ <p align="center">一个基于 GPTQ 算法,简单易用且拥有用户友好型接口的大语言模型量化工具包。</p>
3
+ <p align="center">
4
+ <a href="https://github.com/PanQiWei/AutoGPTQ/releases">
5
+ <img alt="GitHub release" src="https://img.shields.io/github/release/PanQiWei/AutoGPTQ.svg">
6
+ </a>
7
+ <a href="https://pypi.org/project/auto-gptq/">
8
+ <img alt="PyPI - Downloads" src="https://img.shields.io/pypi/dd/auto-gptq">
9
+ </a>
10
+ </p>
11
+ <h4 align="center">
12
+ <p>
13
+ <a href="https://github.com/PanQiWei/AutoGPTQ/blob/main/README.md">English</a> |
14
+ <b>中文</b>
15
+ </p>
16
+ </h4>
17
+
18
+ *<center>📣 好久不见!👋 七月和八月将会迎来架构升级,性能优化和新特性,敬请关注!🥂</center>*
19
+
20
+ ## 新闻或更新
21
+
22
+ - 2023-08-21 - (新闻) - 通义千问团队发布了基于 `auto-gptq` 的 Qwen-7B 4bit 量化版本模型,并提供了[详尽的测评结果](https://huggingface.co/Qwen/Qwen-7B-Chat-Int4#%E9%87%8F%E5%8C%96-quantization)
23
+ - 2023-08-06 - (更新) - 支持 exllama 的 q4 CUDA 算子使得 int4 量化模型能够获得至少1.3倍的推理速度提升.
24
+ - 2023-08-04 - (更新) - 支持 RoCm 使得 AMD GPU 的用户能够使用 auto-gptq 的 CUDA 拓展.
25
+ - 2023-07-26 - (更新) - 一个优雅的 [PPL 测评脚本](examples/benchmark/perplexity.py)以获得可以与诸如 `llama.cpp` 等代码库进行公平比较的结果。
26
+ - 2023-06-05 - (更新) - 集成 🤗 peft 来使用 gptq 量化过的模型训练适应层,支持 LoRA,AdaLoRA,AdaptionPrompt 等。
27
+ - 2023-05-30 - (更新) - 支持从 🤗 Hub 下载量化好的模型或上次量化好的模型到 🤗 Hub。
28
+
29
+ *获取更多的历史信息,请转至[这里](docs/NEWS_OR_UPDATE.md)*
30
+
31
+ ## 性能对比
32
+
33
+ ### 推理速度
34
+ > 以下结果通过[这个脚本](examples/benchmark/generation_speed.py)生成,文本输入的 batch size 为1,解码策略为 beam search 并且强制模型生成512个 token,速度的计量单位为 tokens/s(越大越好)。
35
+ >
36
+ > 量化模型通过能够最大化推理速度的方式加载。
37
+
38
+ | model | GPU | num_beams | fp16 | gptq-int4 |
39
+ |---------------|---------------|-----------|-------|-----------|
40
+ | llama-7b | 1xA100-40G | 1 | 18.87 | 25.53 |
41
+ | llama-7b | 1xA100-40G | 4 | 68.79 | 91.30 |
42
+ | moss-moon 16b | 1xA100-40G | 1 | 12.48 | 15.25 |
43
+ | moss-moon 16b | 1xA100-40G | 4 | OOM | 42.67 |
44
+ | moss-moon 16b | 2xA100-40G | 1 | 06.83 | 06.78 |
45
+ | moss-moon 16b | 2xA100-40G | 4 | 13.10 | 10.80 |
46
+ | gpt-j 6b | 1xRTX3060-12G | 1 | OOM | 29.55 |
47
+ | gpt-j 6b | 1xRTX3060-12G | 4 | OOM | 47.36 |
48
+
49
+
50
+ ### 困惑度(PPL)
51
+ 对于困惑度的对比, 你可以参考 [这里](https://github.com/qwopqwop200/GPTQ-for-LLaMa#result) 和 [这里](https://github.com/qwopqwop200/GPTQ-for-LLaMa#gptq-vs-bitsandbytes)
52
+
53
+ ## 安装
54
+
55
+ ### 快速安装
56
+ 你可以通过 pip 来安装 AutoGPTQ 当前最新的稳定版本:
57
+ ```shell
58
+ pip install auto-gptq
59
+ ```
60
+ 从 0.2.0 版本开始,你可以从每次版本发布的资产文件列表中下载预构建好的符合你系统配置情况的轮子文件,并通过安装这些轮子文件来跳过漫长的构建过程以达到最快的安装速度。如下是一个例子:
61
+ ```shell
62
+ # 首先,进入轮子文件存放的目录,然后执行下面的命令
63
+ pip install auto_gptq-0.2.0+cu118-cp310-cp310-linux_x86_64.whl # 在 linux 操作系统的一个 python=3.10 且 cuda=11.8 的环境下安装 0.2.0 版本的 auto_gptq
64
+ ```
65
+ #### 取消 cuda 拓展的安装
66
+ 默认情况下,在 `torch` 和 `cuda` 已经于你的机器上被安装时,cuda 拓展将被自动安装,如果你不想要这些拓展的话,采用以下安装命令:
67
+ ```shell
68
+ BUILD_CUDA_EXT=0 pip install auto-gptq
69
+ ```
70
+ 同时为确保该拓展——`autogptq_cuda` 不再存在于你的虚拟环境,执行以下命令:
71
+ ```shell
72
+ pip uninstall autogptq_cuda -y
73
+ ```
74
+
75
+ #### 支持使用 triton 加速
76
+ 若想使用 `triton` 加速模型推理,使用以下命令:
77
+ > 警告:目前 triton 仅支持 linux 操作系统;当使用 triton 时 3-bit 数值类型的量化将不被支持
78
+
79
+ ```shell
80
+ pip install auto-gptq[triton]
81
+ ```
82
+
83
+ ### 从源码安装
84
+ <details>
85
+ <summary>点击以查看详情</summary>
86
+
87
+ 克隆源码:
88
+ ```shell
89
+ git clone https://github.com/PanQiWei/AutoGPTQ.git && cd AutoGPTQ
90
+ ```
91
+ 然后,从项目目录安装:
92
+ ```shell
93
+ pip install .
94
+ ```
95
+ 正如在快速安装一节,你可以使用 `BUILD_CUDA_EXT=0` 来取消构建 cuda 拓展。
96
+
97
+ 如果你想要使用 triton 加速且其能够被你的操作系统所支持,请使用 `.[triton]`。
98
+
99
+ 对应 AMD GPUs,为了从源码安装以支持 RoCm,请设置 `ROCM_VERSION` 环境变量。同时通过设置 `PYTORCH_ROCM_ARCH` ([reference](https://github.com/pytorch/pytorch/blob/7b73b1e8a73a1777ebe8d2cd4487eb13da55b3ba/setup.py#L132)) 可提升编译���度,例如:对于 MI200 系列设备,该变量可设为 `gfx90a`。例子:
100
+
101
+ ```
102
+ ROCM_VERSION=5.6 pip install .
103
+ ```
104
+
105
+ 对于 RoCm 系统,在从源码安装时额外需要提前安装以下包:`rocsparse-dev`, `hipsparse-dev`, `rocthrust-dev`, `rocblas-dev` and `hipblas-dev`。
106
+
107
+ </details>
108
+
109
+ ## 快速开始
110
+
111
+ ### 量化和推理
112
+ > 警告:这里仅是对 AutoGPTQ 中基本接口的用法展示,只使用了一条文本来量化一个特别小的模型,因此其结果的表现可能不如在大模型上执行量化后预期的那样好。
113
+
114
+ 以下展示了使用 `auto_gptq` 进行量化和推理的最简单用法:
115
+ ```python
116
+ from transformers import AutoTokenizer, TextGenerationPipeline
117
+ from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
118
+
119
+
120
+ pretrained_model_dir = "facebook/opt-125m"
121
+ quantized_model_dir = "opt-125m-4bit"
122
+
123
+
124
+ tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
125
+ examples = [
126
+ tokenizer(
127
+ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."
128
+ )
129
+ ]
130
+
131
+ quantize_config = BaseQuantizeConfig(
132
+ bits=4, # 将模型量化为 4-bit 数值类型
133
+ group_size=128, # 一般推荐将此参数的值设置为 128
134
+ desc_act=False, # 设为 False 可以显著提升推理速度,但是 ppl 可能会轻微地变差
135
+ )
136
+
137
+ # 加载未量化的模型,默认情况下,模型总是会被加载到 CPU 内存中
138
+ model = AutoGPTQForCausalLM.from_pretrained(pretrained_model_dir, quantize_config)
139
+
140
+ # 量化模型, 样本的数据类型应该为 List[Dict],其中字典的键有且仅有 input_ids 和 attention_mask
141
+ model.quantize(examples)
142
+
143
+ # 保存量化好的模型
144
+ model.save_quantized(quantized_model_dir)
145
+
146
+ # 使用 safetensors 保存量化好的模型
147
+ model.save_quantized(quantized_model_dir, use_safetensors=True)
148
+
149
+ # 将量化好的模型直接上传至 Hugging Face Hub
150
+ # 当使用 use_auth_token=True 时, 确保你已经首先使用 huggingface-cli login 进行了登录
151
+ # 或者可以使用 use_auth_token="hf_xxxxxxx" 来显式地添加账户认证 token
152
+ # (取消下面三行代码的注释来使用该功能)
153
+ # repo_id = f"YourUserName/{quantized_model_dir}"
154
+ # commit_message = f"AutoGPTQ model for {pretrained_model_dir}: {quantize_config.bits}bits, gr{quantize_config.group_size}, desc_act={quantize_config.desc_act}"
155
+ # model.push_to_hub(repo_id, commit_message=commit_message, use_auth_token=True)
156
+
157
+ # 或者你也可以同时将量化好的模型保存到本地并上传至 Hugging Face Hub
158
+ # (取消下面三行代码的注释来使用该功能)
159
+ # repo_id = f"YourUserName/{quantized_model_dir}"
160
+ # commit_message = f"AutoGPTQ model for {pretrained_model_dir}: {quantize_config.bits}bits, gr{quantize_config.group_size}, desc_act={quantize_config.desc_act}"
161
+ # model.push_to_hub(repo_id, save_dir=quantized_model_dir, use_safetensors=True, commit_message=commit_message, use_auth_token=True)
162
+
163
+ # 加载量化好的模型到能被识别到的第一块显卡中
164
+ model = AutoGPTQForCausalLM.from_quantized(quantized_model_dir, device="cuda:0")
165
+
166
+ # 从 Hugging Face Hub 下载量化好的模型并加载到能被识别到的第一块显卡中
167
+ # model = AutoGPTQForCausalLM.from_quantized(repo_id, device="cuda:0", use_safetensors=True, use_triton=False)
168
+
169
+ # 使用 model.generate 执行推理
170
+ print(tokenizer.decode(model.generate(**tokenizer("auto_gptq is", return_tensors="pt").to(model.device))[0]))
171
+
172
+ # 或者使用 TextGenerationPipeline
173
+ pipeline = TextGenerationPipeline(model=model, tokenizer=tokenizer)
174
+ print(pipeline("auto-gptq is")[0]["generated_text"])
175
+ ```
176
+
177
+ 参考 [此样例脚本](examples/quantization/quant_with_alpaca.py) 以了解进阶的用法。
178
+
179
+ ### 自定义模型
180
+
181
+ <details>
182
+
183
+ <summary>以下展示了如何拓展 `auto_gptq` 以支持 `OPT` 模型,如你所见,这非常简单:</summary>
184
+
185
+ ```python
186
+ from auto_gptq.modeling import BaseGPTQForCausalLM
187
+
188
+
189
+ class OPTGPTQForCausalLM(BaseGPTQForCausalLM):
190
+ # chained attribute name of transformer layer block
191
+ layers_block_name = "model.decoder.layers"
192
+ # chained attribute names of other nn modules that in the same level as the transformer layer block
193
+ outside_layer_modules = [
194
+ "model.decoder.embed_tokens", "model.decoder.embed_positions", "model.decoder.project_out",
195
+ "model.decoder.project_in", "model.decoder.final_layer_norm"
196
+ ]
197
+ # chained attribute names of linear layers in transformer layer module
198
+ # normally, there are four sub lists, for each one the modules in it can be seen as one operation,
199
+ # and the order should be the order when they are truly executed, in this case (and usually in most cases),
200
+ # they are: attention q_k_v projection, attention output projection, MLP project input, MLP project output
201
+ inside_layer_modules = [
202
+ ["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
203
+ ["self_attn.out_proj"],
204
+ ["fc1"],
205
+ ["fc2"]
206
+ ]
207
+ ```
208
+ 然后, 你就可以像在基本用法一节中展示的那样使用 `OPTGPTQForCausalLM.from_pretrained` 和其他方法。
209
+
210
+ </details>
211
+
212
+
213
+ ### 在下游任务上执行评估
214
+ 你可以使用在 `auto_gptq.eval_tasks` 中定义的任务来评估量化前后的模型在某个特定下游任务上的表现。
215
+
216
+ 这些预定义的模型支持所有在 [🤗 transformers](https://github.com/huggingface/transformers)和本项目中被实现了的 causal-language-models。
217
+
218
+ <details>
219
+
220
+ <summary>以下是使用 `cardiffnlp/tweet_sentiment_multilingual` 数据集在序列分类(文本分类)任务上评估 `EleutherAI/gpt-j-6b` 模型的示例:</summary>
221
+
222
+ ```python
223
+ from functools import partial
224
+
225
+ import datasets
226
+ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
227
+
228
+ from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
229
+ from auto_gptq.eval_tasks import SequenceClassificationTask
230
+
231
+
232
+ MODEL = "EleutherAI/gpt-j-6b"
233
+ DATASET = "cardiffnlp/tweet_sentiment_multilingual"
234
+ TEMPLATE = "Question:What's the sentiment of the given text? Choices are {labels}.\nText: {text}\nAnswer:"
235
+ ID2LABEL = {
236
+ 0: "negative",
237
+ 1: "neutral",
238
+ 2: "positive"
239
+ }
240
+ LABELS = list(ID2LABEL.values())
241
+
242
+
243
+ def ds_refactor_fn(samples):
244
+ text_data = samples["text"]
245
+ label_data = samples["label"]
246
+
247
+ new_samples = {"prompt": [], "label": []}
248
+ for text, label in zip(text_data, label_data):
249
+ prompt = TEMPLATE.format(labels=LABELS, text=text)
250
+ new_samples["prompt"].append(prompt)
251
+ new_samples["label"].append(ID2LABEL[label])
252
+
253
+ return new_samples
254
+
255
+
256
+ # model = AutoModelForCausalLM.from_pretrained(MODEL).eval().half().to("cuda:0")
257
+ model = AutoGPTQForCausalLM.from_pretrained(MODEL, BaseQuantizeConfig())
258
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
259
+
260
+ task = SequenceClassificationTask(
261
+ model=model,
262
+ tokenizer=tokenizer,
263
+ classes=LABELS,
264
+ data_name_or_path=DATASET,
265
+ prompt_col_name="prompt",
266
+ label_col_name="label",
267
+ **{
268
+ "num_samples": 1000, # how many samples will be sampled to evaluation
269
+ "sample_max_len": 1024, # max tokens for each sample
270
+ "block_max_len": 2048, # max tokens for each data block
271
+ # function to load dataset, one must only accept data_name_or_path as input
272
+ # and return datasets.Dataset
273
+ "load_fn": partial(datasets.load_dataset, name="english"),
274
+ # function to preprocess dataset, which is used for datasets.Dataset.map,
275
+ # must return Dict[str, list] with only two keys: [prompt_col_name, label_col_name]
276
+ "preprocess_fn": ds_refactor_fn,
277
+ # truncate label when sample's length exceed sample_max_len
278
+ "truncate_prompt": False
279
+ }
280
+ )
281
+
282
+ # note that max_new_tokens will be automatically specified internally based on given classes
283
+ print(task.run())
284
+
285
+ # self-consistency
286
+ print(
287
+ task.run(
288
+ generation_config=GenerationConfig(
289
+ num_beams=3,
290
+ num_return_sequences=3,
291
+ do_sample=True
292
+ )
293
+ )
294
+ )
295
+ ```
296
+
297
+ </details>
298
+
299
+ ## 了解更多
300
+ [教程](docs/tutorial) 提供了将 `auto_gptq` 集成到你的项目中的手把手指导和最佳实践准则。
301
+
302
+ [示例](examples/README.md) 提供了大量示例脚本以将 `auto_gptq` 用于不同领域。
303
+
304
+ ## 支持的模型
305
+
306
+ > 你可以使用 `model.config.model_type` 来对照下表以检查你正在使用的一个模型是否被 `auto_gptq` 所支持。
307
+ >
308
+ > 比如, `WizardLM`,`vicuna` 和 `gpt4all` 模型的 `model_type` 皆为 `llama`, 因此这些模型皆被 `auto_gptq` 所支持。
309
+
310
+ | model type | quantization | inference | peft-lora | peft-ada-lora | peft-adaption_prompt |
311
+ |------------------------------------|--------------|-----------|-----------|---------------|-----------------------------------------------------------------------------------|
312
+ | bloom | ✅ | ✅ | ✅ | ✅ | |
313
+ | gpt2 | ✅ | ✅ | ✅ | ✅ | |
314
+ | gpt_neox | ✅ | ✅ | ✅ | ✅ | ✅[要求该分支的 peft](https://github.com/PanQiWei/peft/tree/multi_modal_adaption_prompt) |
315
+ | gptj | ✅ | ✅ | ✅ | ✅ | ✅[要求该分支的 peft](https://github.com/PanQiWei/peft/tree/multi_modal_adaption_prompt) |
316
+ | llama | ✅ | ✅ | ✅ | ✅ | ✅ |
317
+ | moss | ✅ | ✅ | ✅ | ✅ | ✅[要求该分支的 peft](https://github.com/PanQiWei/peft/tree/multi_modal_adaption_prompt) |
318
+ | opt | ✅ | ✅ | ✅ | ✅ | |
319
+ | gpt_bigcode | ✅ | ✅ | ✅ | ✅ | |
320
+ | codegen | ✅ | ✅ | ✅ | ✅ | |
321
+ | falcon(RefinedWebModel/RefinedWeb) | ✅ | ✅ | ✅ | ✅ | |
322
+
323
+ ## 支持的评估任务
324
+ 目前, `auto_gptq` 支持以下评估任务: `LanguageModelingTask`, `SequenceClassificationTask` 和 `TextSummarizationTask`;更多的评估任务即将到来!
325
+
326
+ ## 致谢
327
+ - 特别感谢 **Elias Frantar**, **Saleh Ashkboos**, **Torsten Hoefler** 和 **Dan Alistarh** 提出 **GPTQ** 算法并开源[代码](https://github.com/IST-DASLab/gptq)。
328
+ - 特别感谢 **qwopqwop200**, 本项目中涉及到模型量化的代码主要参考自 [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa/tree/cuda)。
329
+
330
+ [![Star History Chart](https://api.star-history.com/svg?repos=PanQiwei/AutoGPTQ&type=Date)](https://star-history.com/#PanQiWei/AutoGPTQ&Date)
USE_POLICY.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Llama 2 Acceptable Use Policy
2
+
3
+ Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
4
+
5
+ ## Prohibited Uses
6
+ We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to:
7
+
8
+ 1. Violate the law or others’ rights, including to:
9
+ 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
10
+ 1. Violence or terrorism
11
+ 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
12
+ 3. Human trafficking, exploitation, and sexual violence
13
+ 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
14
+ 5. Sexual solicitation
15
+ 6. Any other criminal activity
16
+ 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
17
+ 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
18
+ 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
19
+ 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
20
+ 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
21
+ 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
22
+
23
+
24
+
25
+ 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
26
+ 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
27
+ 2. Guns and illegal weapons (including weapon development)
28
+ 3. Illegal drugs and regulated/controlled substances
29
+ 4. Operation of critical infrastructure, transportation technologies, or heavy machinery
30
+ 5. Self-harm or harm to others, including suicide, cutting, and eating disorders
31
+ 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
32
+
33
+
34
+
35
+ 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
36
+ 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
37
+ 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
38
+ 3. Generating, promoting, or further distributing spam
39
+ 4. Impersonating another individual without consent, authorization, or legal right
40
+ 5. Representing that the use of Llama 2 or outputs are human-generated
41
+ 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
42
+ 4. Fail to appropriately disclose to end users any known dangers of your AI system
43
+
44
+ Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
45
+
46
+ * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
47
+ * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
48
+ * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
49
+ * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected])
50
+
batch_throttle.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from hfapi import Client
2
+
3
+ client = Client()
4
+
5
+ BATCH_SIZE = 4
6
+
7
+ LONG_LIST_OF_INPUTS = [
8
+ "I like you. </s></s> I love you.",
9
+ "At the other end of Pennsylvania Avenue, people began to line up for a White House tour. </s></s> People formed a line at the end of Pennsylvania Avenue.",
10
+ ] * 500
11
+
12
+ def chunker(seq, size):
13
+ return (seq[pos:pos + size] for pos in range(0, len(seq), size))
14
+
15
+ all_results = []
16
+
17
+ for inputs in chunker(LONG_LIST_OF_INPUTS, BATCH_SIZE):
18
+ result = client.text_classification(inputs, model="roberta-large-mnli")
19
+ print(result)
20
+ all_results += result
21
+
22
+
23
+ print("Done!")
config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "model_type": "llama"
3
+ }
convert.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from exllamav2 import ExLlamaV2, ExLlamaV2Config, ExLlamaV2Tokenizer
2
+ import argparse, os
3
+ import sys
4
+ import json
5
+ from conversion.tokenize import tokenize
6
+ from conversion.quantize import embeddings, measure_quant, quant
7
+ from conversion.optimize import optimize
8
+ from conversion.compile import compile_model
9
+
10
+ # import tracemalloc
11
+ # tracemalloc.start()
12
+
13
+ parser = argparse.ArgumentParser(description = "Convert model to ExLlamaV2")
14
+ parser.add_argument("-i", "--in_dir", type = str, help = "Input directory", default = "")
15
+ parser.add_argument("-o", "--out_dir", type = str, help = "Output directory")
16
+ parser.add_argument("-c", "--cal_dataset", type = str, help = "Calibration dataset (.parquet file)", default = "")
17
+ parser.add_argument("-r", "--dataset_rows", type = int, default = 100, help = "Number of rows to apply from dataset")
18
+ parser.add_argument("-mr", "--measurement_rows", type = int, default = 16, help = "Number of rows to apply from dataset when measuring")
19
+ parser.add_argument("-gr", "--gpu_rows", type = int, default = 16, help = "Threshold for paging hidden state to CPU")
20
+ parser.add_argument("-l", "--length", type = int, default = 2048, help = "Max no. tokens per sample")
21
+ parser.add_argument("-ml", "--measurement_length", type = int, default = 2048, help = "Max no. tokens per sample when measuring")
22
+ parser.add_argument("-b", "--bits", type = float, default = 4.156, help = "Target bits per weight")
23
+ parser.add_argument("-hb", "--head_bits", type = int, default = 6, help = "Target bits per weight (head layer)")
24
+ parser.add_argument("-m", "--measurement", type = str, help = "Reuse previous measurement")
25
+
26
+ args = parser.parse_args()
27
+
28
+ # Arguments
29
+
30
+ in_dir = None if args.in_dir == "" else os.path.abspath(args.in_dir)
31
+ out_dir = os.path.abspath(args.out_dir)
32
+ cal_dataset = None if args.cal_dataset == "" else os.path.abspath(args.cal_dataset)
33
+ dataset_rows = args.dataset_rows
34
+ measurement_rows = args.measurement_rows
35
+ gpu_rows = args.gpu_rows
36
+ length = args.length
37
+ measurement_length = args.measurement_length
38
+ bits = args.bits
39
+ head_bits = args.head_bits
40
+ reuse_measurement = args.measurement
41
+
42
+ if not os.path.exists(out_dir):
43
+ print(f" ## Error: Directory not found: {out_dir}")
44
+ sys.exit()
45
+
46
+ # Create model without loading weights
47
+
48
+ config = ExLlamaV2Config()
49
+ config.model_dir = in_dir
50
+ config.prepare()
51
+
52
+ model = ExLlamaV2(config)
53
+ model.load(lazy = True)
54
+
55
+ tokenizer = ExLlamaV2Tokenizer(config)
56
+
57
+ # Job file
58
+
59
+ job_file = os.path.join(out_dir, "job.json")
60
+
61
+ # Create new job
62
+
63
+ def save_job():
64
+ global job_file, job
65
+ with open(job_file, "w") as f:
66
+ f.write(json.dumps(job, indent = 4))
67
+
68
+ if not os.path.exists(job_file):
69
+
70
+ print(f" -- Beginning new job")
71
+
72
+ if len(os.listdir(out_dir)) != 0:
73
+ print(f" !! Warning: Output directory is not empty: {out_dir}")
74
+
75
+ if in_dir is None:
76
+ print(f" ## Error: No input directory specified")
77
+ sys.exit()
78
+
79
+ if cal_dataset is None:
80
+ print(f" ## Error: No calibration dataset specified")
81
+ sys.exit()
82
+
83
+ job = { "in_dir": in_dir,
84
+ "out_dir": out_dir,
85
+ "cal_dataset": cal_dataset,
86
+ "dataset_rows": dataset_rows,
87
+ "measurement_rows": measurement_rows,
88
+ "gpu_rows": gpu_rows,
89
+ "length": length,
90
+ "measurement_length": measurement_length,
91
+ "bits": bits,
92
+ "head_bits": head_bits,
93
+ "progress": "begin",
94
+ }
95
+
96
+ if reuse_measurement is not None:
97
+
98
+ with open(reuse_measurement, "r") as f:
99
+
100
+ imp_measurement = json.load(f)
101
+ job["measurement"] = imp_measurement["measurement"]
102
+ job["last_module_idx"] = imp_measurement["last_module_idx"]
103
+ job["base_perplexity"] = imp_measurement["base_perplexity"]
104
+ job["reuse_measurement"] = reuse_measurement
105
+
106
+ save_job()
107
+
108
+ # Resume existing job
109
+
110
+ else:
111
+
112
+ print(f" -- Resuming job")
113
+ print(f" !! Note: Overriding options with settings from existing job")
114
+
115
+ with open(job_file, "r") as f:
116
+ job = json.load(f)
117
+
118
+ if "invalid" in job:
119
+ print(" ** Error: Corrupted job")
120
+ sys.exit()
121
+
122
+ job["out_dir"] = out_dir
123
+
124
+ # Feedback
125
+
126
+ print(f" -- Input: {job['in_dir']}")
127
+ print(f" -- Output: {out_dir}")
128
+ print(f" -- Calibration dataset: {job['cal_dataset']}, {job['dataset_rows']} / {job['measurement_rows']} ({job['gpu_rows']}) rows, {job['length']} tokens per sample")
129
+ print(f" -- Target bits per weight: {job['bits']} (decoder), {job['head_bits']} (head)")
130
+
131
+ # Make sure subfolders exist
132
+
133
+ out_tensor_dir = os.path.join(job["out_dir"], "out_tensor")
134
+ if not os.path.exists(out_tensor_dir):
135
+ os.makedirs(out_tensor_dir)
136
+
137
+ # Do the things
138
+
139
+ while True:
140
+
141
+ progress = job["progress"]
142
+
143
+ if progress == "begin":
144
+
145
+ if "reuse_measurement" in job:
146
+
147
+ print(f" -- Reusing measurement: {job['reuse_measurement']}")
148
+ job["progress"] = "optimize"
149
+ save_job()
150
+
151
+ else:
152
+
153
+ print(f" -- Tokenizing samples (measurement)...")
154
+ tokenize(job, save_job, tokenizer, measure = True)
155
+ job["progress"] = "initial_embeddings"
156
+ save_job()
157
+
158
+ if progress == "initial_embeddings":
159
+
160
+ print(f" -- Token embeddings (measurement)...")
161
+ embeddings(job, save_job, model)
162
+ job["progress"] = "measure_quant"
163
+ save_job()
164
+
165
+ if progress == "measure_quant":
166
+
167
+ print(f" -- Measuring quantization impact...")
168
+ measure_quant(job, save_job, model)
169
+ job["progress"] = "optimize"
170
+ save_job()
171
+
172
+ if progress == "optimize":
173
+
174
+ print(f" -- Optimizing...")
175
+ optimize(job, save_job)
176
+ job["progress"] = "tokens_cal"
177
+ save_job()
178
+
179
+ if progress == "tokens_cal":
180
+
181
+ print(f" -- Tokenizing samples...")
182
+ tokenize(job, save_job, tokenizer)
183
+ job["progress"] = "embeddings"
184
+ save_job()
185
+
186
+ if progress == "embeddings":
187
+ print(f" -- Token embeddings again...")
188
+ embeddings(job, save_job, model)
189
+ job["progress"] = "quant"
190
+ save_job()
191
+
192
+ if progress == "quant":
193
+
194
+ print(f" -- Quantizing...")
195
+ quant(job, save_job, model)
196
+ job["progress"] = "compile"
197
+ save_job()
198
+
199
+ if progress == "compile":
200
+
201
+ print(f" -- Compiling output file...")
202
+ compile_model(job, save_job, model)
203
+ job["progress"] = "finished"
204
+ save_job()
205
+
206
+ if progress == "finished": break
207
+
208
+ print(f" -- Finished")
docker-compose.yml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ version: "3.9"
2
+ services:
3
+ api:
4
+ image: "d0ckmg/free-gpt4-web-api:latest"
5
+ ports:
6
+ - "5500:5500"
7
+ volumes:
8
+ - ./cookies.json:/cookies.json
9
+
example.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hfapi
2
+ client = hfapi.Client()
3
+
4
+ print("""
5
+
6
+ ```python
7
+ import hfapi
8
+ client = hfapi.Client()
9
+ ```
10
+
11
+ """)
12
+
13
+ print("""```python
14
+ client.question_answering("Where does she live?", "She lives in Berlin.")
15
+ ```
16
+ """)
17
+
18
+ print(">", client.question_answering("Where does she live?", "She lives in Berlin."))
19
+
20
+ print("""```python
21
+ client.text_generation("My name is Julien and I like to ")
22
+ ```
23
+ """)
24
+ print("```")
25
+ print(">", client.text_generation("My name is Julien and I like to ", model="gpt2"))
26
+ print("```")
27
+ print()
28
+
29
+ print("""```python
30
+ client.summarization("The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.")
31
+ ```
32
+ """)
33
+
34
+ print(">", client.summarization("The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."))
35
+ print()
36
+
37
+ print("""```python
38
+ client.fill_mask("Paris is the [MASK] of France."))
39
+ ```
40
+ """)
41
+
42
+ print(">",client.fill_mask("Paris is the [MASK] of France."))
43
+ print()
44
+
45
+
46
+ print("""```python
47
+ client.text_classification("I hated the movie!")
48
+ ```
49
+ """)
50
+
51
+ print(">", client.text_classification("I hated the movie!"))
52
+ print()
53
+
54
+
55
+ print("""```python
56
+ client.token_classification("My name is Sarah and I live in London")
57
+ ```
58
+ """)
59
+
60
+ print(">", client.token_classification("My name is Sarah and I live in London"))
61
+ print()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ pandas
2
+ ninja
3
+ fastparquet
4
+ torch>=2.0.1
5
+ safetensors>=0.3.2
6
+ sentencepiece>=0.1.97
setup.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from pathlib import Path
4
+ from setuptools import setup, find_packages
5
+
6
+
7
+ common_setup_kwargs = {
8
+ "version": "0.4.1",
9
+ "name": "auto_gptq",
10
+ "author": "PanQiWei",
11
+ "description": "An easy-to-use LLMs quantization package with user-friendly apis, based on GPTQ algorithm.",
12
+ "long_description": (Path(__file__).parent / "README.md").read_text(encoding="UTF-8"),
13
+ "long_description_content_type": "text/markdown",
14
+ "url": "https://github.com/PanQiWei/AutoGPTQ",
15
+ "keywords": ["gptq", "quantization", "large-language-models", "transformers"],
16
+ "platforms": ["windows", "linux"],
17
+ "classifiers": [
18
+ "Environment :: GPU :: NVIDIA CUDA :: 11.7",
19
+ "Environment :: GPU :: NVIDIA CUDA :: 11.8",
20
+ "Environment :: GPU :: NVIDIA CUDA :: 12.0",
21
+ "License :: OSI Approved :: MIT License",
22
+ "Natural Language :: Chinese (Simplified)",
23
+ "Natural Language :: English",
24
+ "Programming Language :: Python :: 3.8",
25
+ "Programming Language :: Python :: 3.9",
26
+ "Programming Language :: Python :: 3.10",
27
+ "Programming Language :: Python :: 3.11",
28
+ "Programming Language :: C++",
29
+ ]
30
+ }
31
+
32
+
33
+ BUILD_CUDA_EXT = int(os.environ.get('BUILD_CUDA_EXT', '1')) == 1
34
+ if BUILD_CUDA_EXT:
35
+ try:
36
+ import torch
37
+ except:
38
+ print("Building cuda extension requires PyTorch(>=1.13.0) been installed, please install PyTorch first!")
39
+ sys.exit(-1)
40
+
41
+ CUDA_VERSION = None
42
+ ROCM_VERSION = os.environ.get('ROCM_VERSION', None)
43
+ if ROCM_VERSION and not torch.version.hip:
44
+ print(
45
+ f"Trying to compile auto-gptq for RoCm, but PyTorch {torch.__version__} "
46
+ "is installed without RoCm support."
47
+ )
48
+ sys.exit(-1)
49
+
50
+ if not ROCM_VERSION:
51
+ default_cuda_version = torch.version.cuda
52
+ CUDA_VERSION = "".join(os.environ.get("CUDA_VERSION", default_cuda_version).split("."))
53
+
54
+ if ROCM_VERSION:
55
+ common_setup_kwargs['version'] += f"+rocm{ROCM_VERSION}"
56
+ else:
57
+ if not CUDA_VERSION:
58
+ print(
59
+ f"Trying to compile auto-gptq for CUDA, byt Pytorch {torch.__version__} "
60
+ "is installed without CUDA support."
61
+ )
62
+ sys.exit(-1)
63
+ common_setup_kwargs['version'] += f"+cu{CUDA_VERSION}"
64
+
65
+
66
+ requirements = [
67
+ "accelerate>=0.19.0",
68
+ "datasets",
69
+ "numpy",
70
+ "rouge",
71
+ "torch>=1.13.0",
72
+ "safetensors",
73
+ "transformers>=4.31.0",
74
+ "peft"
75
+ ]
76
+
77
+ extras_require = {
78
+ "triton": ["triton==2.0.0"],
79
+ "test": ["parameterized"]
80
+ }
81
+
82
+ include_dirs = ["autogptq_cuda"]
83
+
84
+ additional_setup_kwargs = dict()
85
+ if BUILD_CUDA_EXT:
86
+ from torch.utils import cpp_extension
87
+
88
+ if not ROCM_VERSION:
89
+ from distutils.sysconfig import get_python_lib
90
+ conda_cuda_include_dir = os.path.join(get_python_lib(), "nvidia/cuda_runtime/include")
91
+
92
+ print("conda_cuda_include_dir", conda_cuda_include_dir)
93
+ if os.path.isdir(conda_cuda_include_dir):
94
+ include_dirs.append(conda_cuda_include_dir)
95
+ print(f"appending conda cuda include dir {conda_cuda_include_dir}")
96
+ extensions = [
97
+ cpp_extension.CUDAExtension(
98
+ "autogptq_cuda_64",
99
+ [
100
+ "autogptq_cuda/autogptq_cuda_64.cpp",
101
+ "autogptq_cuda/autogptq_cuda_kernel_64.cu"
102
+ ]
103
+ ),
104
+ cpp_extension.CUDAExtension(
105
+ "autogptq_cuda_256",
106
+ [
107
+ "autogptq_cuda/autogptq_cuda_256.cpp",
108
+ "autogptq_cuda/autogptq_cuda_kernel_256.cu"
109
+ ]
110
+ )
111
+ ]
112
+
113
+ if os.environ.get("INCLUDE_EXLLAMA_KERNELS", "1") == "1": # TODO: improve github action to always compile exllama_kernels
114
+ extensions.append(
115
+ cpp_extension.CUDAExtension(
116
+ "exllama_kernels",
117
+ [
118
+ "autogptq_cuda/exllama/exllama_ext.cpp",
119
+ "autogptq_cuda/exllama/cuda_buffers.cu",
120
+ "autogptq_cuda/exllama/cuda_func/column_remap.cu",
121
+ "autogptq_cuda/exllama/cuda_func/q4_matmul.cu",
122
+ "autogptq_cuda/exllama/cuda_func/q4_matrix.cu"
123
+ ]
124
+ )
125
+ )
126
+
127
+ additional_setup_kwargs = {
128
+ "ext_modules": extensions,
129
+ "cmdclass": {'build_ext': cpp_extension.BuildExtension}
130
+ }
131
+ common_setup_kwargs.update(additional_setup_kwargs)
132
+ setup(
133
+ packages=find_packages(),
134
+ install_requires=requirements,
135
+ extras_require=extras_require,
136
+ include_dirs=include_dirs,
137
+ python_requires=">=3.8.0",
138
+ **common_setup_kwargs
139
+ )
test_inference.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from exllamav2 import(
3
+ ExLlamaV2,
4
+ ExLlamaV2Config,
5
+ ExLlamaV2Cache,
6
+ ExLlamaV2Tokenizer,
7
+ model_init,
8
+ )
9
+
10
+ import argparse, os, math, time
11
+ import pandas, fastparquet
12
+ import torch
13
+ import torch.nn.functional as F
14
+ from conversion.tokenize import get_tokens
15
+ from conversion.quantize import list_live_tensors
16
+
17
+ import sys
18
+ import json
19
+
20
+ torch.cuda._lazy_init()
21
+ torch.set_printoptions(precision = 10)
22
+ # torch.backends.cuda.matmul.allow_tf32 = True
23
+ # torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = True
24
+ # torch.set_float32_matmul_precision("medium")
25
+
26
+ parser = argparse.ArgumentParser(description = "Test inference on ExLlamaV2 model")
27
+ parser.add_argument("-ed", "--eval_dataset", type = str, help = "Perplexity evaluation dataset (.parquet file)")
28
+ parser.add_argument("-er", "--eval_rows", type = int, default = 128, help = "Number of rows to apply from dataset")
29
+ parser.add_argument("-el", "--eval_length", type = int, default = 2048, help = "Max no. tokens per sample")
30
+ parser.add_argument("-p", "--prompt", type = str, help = "Generate from prompt")
31
+ parser.add_argument("-t", "--tokens", type = int, default = 128, help = "Max no. tokens")
32
+ parser.add_argument("-ps", "--prompt_speed", action = "store_true", help = "Test prompt processing (batch) speed over context length")
33
+ parser.add_argument("-s", "--speed", action = "store_true", help = "Test raw generation speed over context length")
34
+
35
+ # Initialize model and tokenizer
36
+
37
+ model_init.add_args(parser)
38
+ args = parser.parse_args()
39
+ model_init.check_args(args)
40
+ model_init.print_options(args)
41
+ model, tokenizer = model_init.init(args)
42
+
43
+ # Test generation
44
+
45
+ if args.prompt:
46
+
47
+ with torch.inference_mode():
48
+
49
+ cache = ExLlamaV2Cache(model)
50
+
51
+ ids = tokenizer.encode(args.prompt)
52
+ tokens_prompt = ids.shape[-1]
53
+
54
+ print(f" -- Warmup...")
55
+
56
+ model.forward(ids[:, -1:])
57
+
58
+ print(f" -- Generating (greedy sampling)...")
59
+ print()
60
+ print(args.prompt, end = "")
61
+ sys.stdout.flush()
62
+
63
+ time_begin = time.time()
64
+
65
+ if ids.shape[-1] > 1: model.forward(ids[:, :-1], cache, preprocess_only = True)
66
+
67
+ torch.cuda.synchronize()
68
+ time_prompt = time.time()
69
+
70
+ for i in range(args.tokens):
71
+
72
+ text1 = tokenizer.decode(ids[:, -2:])[0]
73
+
74
+ logits = model.forward(ids[:, -1:], cache)
75
+ sample = torch.argmax(logits[0, -1]).cpu().unsqueeze(0).unsqueeze(0)
76
+ ids = torch.cat((ids, sample), dim = -1)
77
+
78
+ text2 = tokenizer.decode(ids[:, -3:])[0]
79
+ text2 = text2[len(text1):]
80
+
81
+ print (text2, end = "")
82
+ # sys.stdout.flush()
83
+
84
+ time_end = time.time()
85
+
86
+ print()
87
+ print()
88
+
89
+ total_prompt = time_prompt - time_begin
90
+ total_gen = time_end - time_prompt
91
+ print(f"Prompt processed in {total_prompt:.2f} seconds, {tokens_prompt} tokens, {tokens_prompt / total_prompt:.2f} tokens/second")
92
+ print(f"Response generated in {total_gen:.2f} seconds, {args.tokens} tokens, {args.tokens / total_gen:.2f} tokens/second")
93
+
94
+ cache = None
95
+
96
+
97
+ # Test perplexity
98
+
99
+ if args.eval_dataset:
100
+
101
+ with torch.inference_mode():
102
+
103
+ eval_dataset = args.eval_dataset
104
+ eval_rows = args.eval_rows
105
+ eval_length = args.eval_length
106
+
107
+ print(f" -- Running perplexity test")
108
+ print(f" -- Dataset: {eval_dataset}")
109
+ print(f" -- Tokenizing eval data, {eval_rows} rows x {eval_length} tokens...")
110
+
111
+ eval_tokens = get_tokens(eval_rows, eval_length, eval_dataset, tokenizer)
112
+
113
+ print(f" -- Inference", end = "")
114
+ sys.stdout.flush()
115
+
116
+ logprob_sum = 0.0
117
+ logprob_count = 0
118
+
119
+ for i in range(eval_tokens.shape[0]):
120
+ #for i in range(126, 127):
121
+
122
+ if i % 10 == 0: print(".", end = "")
123
+ sys.stdout.flush()
124
+
125
+ input_ids = eval_tokens[i:i+1, :]
126
+
127
+ input_ids = input_ids[:, :-1]
128
+ logits = model.forward(input_ids)
129
+
130
+ # print (tokenizer.decode(input_ids))
131
+
132
+ target_ids = input_ids[:, 1:].to(logits.device)
133
+
134
+ log_probs = F.log_softmax(logits, dim=-1)
135
+ token_log_probs = log_probs.gather(-1, target_ids.unsqueeze(-1)).squeeze(-1)
136
+ logprob_sum += token_log_probs.sum().item()
137
+ logprob_count += target_ids.numel()
138
+
139
+ print()
140
+
141
+ mean_log_prob = logprob_sum / logprob_count
142
+ perplexity = math.exp(-mean_log_prob)
143
+
144
+ print(f" -- Evaluation perplexity: {perplexity:.4f}")
145
+
146
+ xx = 0
147
+
148
+
149
+ # Test prompt speed
150
+
151
+ if args.prompt_speed:
152
+
153
+ with torch.inference_mode():
154
+
155
+ cache = ExLlamaV2Cache(model)
156
+
157
+ ids = torch.randint(0, model.config.vocab_size - 1, (1, model.config.max_seq_len))
158
+
159
+ print(f" -- Warmup...")
160
+
161
+ model.forward(ids[:, -1:])
162
+
163
+ print(f" -- Measuring prompt speed...")
164
+
165
+ current_len = 128
166
+ while True:
167
+
168
+ time_begin = time.time()
169
+
170
+ cache.current_seq_len = 0
171
+ model.forward(ids[:, :current_len], cache, preprocess_only = True)
172
+ torch.cuda.synchronize()
173
+
174
+ time_end = time.time()
175
+ tps = current_len / (time_end - time_begin)
176
+
177
+ print(f" ** Length {current_len:>5} tokens: {tps:>11.4f} t/s")
178
+
179
+ current_len_ = current_len
180
+ current_len = min(current_len + 128, model.config.max_seq_len)
181
+ if current_len == current_len_: break
182
+
183
+ cache = None
184
+
185
+
186
+ # Test token speed
187
+
188
+ if args.speed:
189
+
190
+ with torch.inference_mode():
191
+
192
+ cache = ExLlamaV2Cache(model)
193
+
194
+ print(f" -- Measuring token speed...")
195
+ ids = tokenizer.encode("X")
196
+ model.forward(ids[:, :])
197
+
198
+ current_idx = ids.shape[-1]
199
+ next_stop = 128
200
+
201
+ while True:
202
+
203
+ time_begin = time.time()
204
+
205
+ tokens = next_stop - current_idx
206
+ for i in range(tokens):
207
+
208
+ logits = model.forward(ids[:, -1:], cache)
209
+ sample = torch.argmax(logits[0, -1]).cpu().unsqueeze(0).unsqueeze(0)
210
+ ids = torch.cat((ids, sample), dim=-1)
211
+
212
+ time_end = time.time()
213
+ tps = tokens / (time_end - time_begin)
214
+
215
+ print(f" ** Position {current_idx:>5} + {tokens:>3} tokens: {tps:>9.4f} t/s")
216
+
217
+ current_idx = next_stop
218
+ next_stop = min(next_stop + 128, model.config.max_seq_len)
219
+ if next_stop == current_idx: break