ffreemt
commited on
Commit
•
5080c22
1
Parent(s):
9523a2b
Fix download gguf file
Browse files- .ruff.toml +21 -0
- README.md +1 -1
- app.py +15 -1
.ruff.toml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Assume Python 3.10.
|
2 |
+
target-version = "py310"
|
3 |
+
# Decrease the maximum line length to 79 characters.
|
4 |
+
line-length = 300
|
5 |
+
|
6 |
+
# pyflakes, pycodestyle, isort
|
7 |
+
# flake8 YTT, pydocstyle D, pylint PLC
|
8 |
+
select = ["F", "E", "W", "I001", "YTT", "D", "PLC"]
|
9 |
+
# select = ["ALL"]
|
10 |
+
|
11 |
+
# E501 Line too long
|
12 |
+
# D102 Missing docstring in public method
|
13 |
+
# D100 Missing docstring in public module
|
14 |
+
# E501 Line too long
|
15 |
+
# D103 Missing docstring in public function
|
16 |
+
# D101 Missing docstring in public class
|
17 |
+
# `multi-line-summary-first-line` (D212)
|
18 |
+
# `one-blank-line-before-class` (D203)
|
19 |
+
extend-ignore = ["E501", "D100", "D101", "D102", "D103", "D212", "D203"]
|
20 |
+
|
21 |
+
exclude = [".venv", "ultrachat-13B-test.py"]
|
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: ⚡
|
|
4 |
colorFrom: yellow
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: true
|
10 |
---
|
|
|
4 |
colorFrom: yellow
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.5.0
|
8 |
app_file: app.py
|
9 |
pinned: true
|
10 |
---
|
app.py
CHANGED
@@ -3,12 +3,26 @@ from __future__ import annotations
|
|
3 |
from typing import Iterable
|
4 |
|
5 |
import gradio as gr
|
|
|
6 |
from ctransformers import AutoModelForCausalLM
|
7 |
from gradio.themes.base import Base
|
8 |
from gradio.themes.utils import colors, fonts, sizes
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
|
11 |
-
model = AutoModelForCausalLM.from_pretrained("TheBloke/openbuddy-mistral-7B-v13-GGUF", model_file="openbuddy-mistral-7b-v13.Q4_K_S.gguf", model_type="mistral", gpu_layers=0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
ins = '''[INST] <<FRIDAY>>
|
13 |
Remember that your English name is "Shi-Ci" and your name in Chinese is "兮辞". You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
14 |
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|
|
|
3 |
from typing import Iterable
|
4 |
|
5 |
import gradio as gr
|
6 |
+
import torch
|
7 |
from ctransformers import AutoModelForCausalLM
|
8 |
from gradio.themes.base import Base
|
9 |
from gradio.themes.utils import colors, fonts, sizes
|
10 |
+
from huggingface_hub import hf_hub_download # snapshot_download,
|
11 |
+
|
12 |
+
repo_id = 'TheBloke/openbuddy-mistral-7B-v13-GGUF'
|
13 |
+
filename = 'openbuddy-mistral-7b-v13.Q4_K_S.gguf' # 4.17G
|
14 |
+
|
15 |
+
model_path = hf_hub_download(repo_id=repo_id, filename=filename, revision="main")
|
16 |
|
17 |
# Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
|
18 |
+
# model = AutoModelForCausalLM.from_pretrained("TheBloke/openbuddy-mistral-7B-v13-GGUF", model_file="openbuddy-mistral-7b-v13.Q4_K_S.gguf", model_type="mistral", gpu_layers=0)
|
19 |
+
|
20 |
+
if torch.cuda.is_available():
|
21 |
+
gpu_layers = 50 # set to what you like for GPU
|
22 |
+
else:
|
23 |
+
gpu_layers = 0
|
24 |
+
model = AutoModelForCausalLM.from_pretrained(model_path, model_type="mistral", gpu_layers=gpu_layers)
|
25 |
+
|
26 |
ins = '''[INST] <<FRIDAY>>
|
27 |
Remember that your English name is "Shi-Ci" and your name in Chinese is "兮辞". You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
28 |
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|