ffreemt
Update wget aria2 in packages.txt
c4eee19
"""Run."""
# pylint: disable=invalid-name,line-too-long,broad-except,missing-function-docstring
from __future__ import annotations
import os
import time
from typing import Iterable
import gradio as gr
import pynvml
# import torch
from ctransformers import AutoModelForCausalLM
from gradio.themes.base import Base
from gradio.themes.utils import colors, fonts, sizes
from huggingface_hub import hf_hub_download, hf_hub_url # snapshot_download,
from loguru import logger
from python_run_cmd import run_cmd
ret = run_cmd("which aria2c", mute_stdout=False)
logger.debug(ret)
os.environ["TZ"] = "Asia/Shanghai"
try:
time.tzset() # type: ignore
logger.debug(f"Timezone set to {os.environ['TZ']=}")
except AttributeError:
... # Windows
repo_id = "TheBloke/openbuddy-mistral-7B-v13-GGUF"
filename = "openbuddy-mistral-7b-v13.Q4_K_S.gguf" # 4.17G
filename = "openbuddy-mistral-7b-v13.Q4_K_M.gguf" # 4.39G
model_ready = True
logger.debug("Start dl")
# try to download 5 times:
model_path = f"./{filename}"
for idx in range(5):
logger.debug(f"attempt {idx + 1}")
try:
model_path = hf_hub_download(
repo_id=repo_id, filename=filename, revision="main"
)
break
except Exception as exc:
logger.error(f"failed to download {filename}: {exc}")
# raise SystemExit("hf acting up, can't donwload the model {filename=}, exiting")
time.sleep(3)
else:
logger.warning("Tried 5 times to no vain")
# raise gr.Error(f"hf acting up, can't donwload the model {filename=}, exiting")
# raise SystemExit("hf acting up, can't donwload the model {filename=}, exiting")
model_ready = False
logger.debug(f"Done dl, {model_ready=}")
if not model_ready: # try aria2c
logger.debug("Try wget...")
url = hf_hub_url(
repo_id,
filename,
# revision="main",
)
logger.debug(f"{url=}")
ret = run_cmd(f"wget -c {url}", mute_stdout=False)
logger.debug(ret)
model_path = f"./{filename}"
# both successful
if not ret.returncode:
model_ready = True
# Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
# model = AutoModelForCausalLM.from_pretrained("TheBloke/openbuddy-mistral-7B-v13-GGUF", model_file="openbuddy-mistral-7b-v13.Q4_K_S.gguf", model_type="mistral", gpu_layers=0)
has_cuda = False
try:
pynvml.nvmlInit()
has_cuda = True
logger.debug("has cuda")
except pynvml.nvml.NVMLError_LibraryNotFound: # type: ignore
logger.debug("no cuda")
# if torch.cuda.is_available():
if has_cuda:
gpu_layers = 50 # set to what you like for GPU
else:
gpu_layers = 0
logger.debug("Start loading the model")
try:
model = AutoModelForCausalLM.from_pretrained(
model_path, model_type="mistral", gpu_layers=gpu_layers
)
except Exception as exc:
logger.error(exc)
model_ready = False
model = None
logger.debug(f"Done loading the model, {model_ready=}")
ins = """[INST] <<FRIDAY>>
Remember that your English name is "openbuddy" and your name in Chinese is "开友". You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
<</FRIDAY>>
{} [/INST]
"""
theme = gr.themes.Monochrome(
primary_hue="indigo",
secondary_hue="blue",
neutral_hue="slate",
radius_size=gr.themes.sizes.radius_sm,
font=[
gr.themes.GoogleFont("Open Sans"),
"ui-sans-serif",
"system-ui",
"sans-serif",
],
)
def response(question):
if model is None:
res = "model not ready (got a problem with downloading the file {filename=} from hf.co)"
else:
res = model(ins.format(question))
yield res
examples = ["Hello!"]
def process_example(args):
x = None
for x in response(args):
pass
return x
css = ".generating {visibility: hidden}"
# Based on the gradio theming guide and borrowed from https://huggingface.co./spaces/shivi/dolly-v2-demo
class SeafoamCustom(Base):
"""Define."""
def __init__(
self,
*,
primary_hue: colors.Color | str = colors.emerald,
secondary_hue: colors.Color | str = colors.blue,
neutral_hue: colors.Color | str = colors.blue,
spacing_size: sizes.Size | str = sizes.spacing_md,
radius_size: sizes.Size | str = sizes.radius_md,
font: fonts.Font
| str
| Iterable[fonts.Font | str] = (
fonts.GoogleFont("Quicksand"),
"ui-sans-serif",
"sans-serif",
),
font_mono: fonts.Font
| str
| Iterable[fonts.Font | str] = (
fonts.GoogleFont("IBM Plex Mono"),
"ui-monospace",
"monospace",
),
):
"""Init."""
super().__init__(
primary_hue=primary_hue,
secondary_hue=secondary_hue,
neutral_hue=neutral_hue,
spacing_size=spacing_size,
radius_size=radius_size,
font=font,
font_mono=font_mono,
)
super().set(
button_primary_background_fill="linear-gradient(90deg, *primary_300, *secondary_400)",
button_primary_background_fill_hover="linear-gradient(90deg, *primary_200, *secondary_300)",
button_primary_text_color="white",
button_primary_background_fill_dark="linear-gradient(90deg, *primary_600, *secondary_800)",
block_shadow="*shadow_drop_lg",
button_shadow="*shadow_drop_lg",
input_background_fill="zinc",
input_border_color="*secondary_300",
input_shadow="*shadow_drop",
input_shadow_focus="*shadow_drop_lg",
)
seafoam = SeafoamCustom()
with gr.Blocks(theme=seafoam, analytics_enabled=False, css=css) as demo:
with gr.Column():
gr.Markdown(
""" ## Testrun
Type in the box below and click the button to generate answers to your most pressing questions!
"""
)
with gr.Row():
with gr.Column(scale=3):
instruction = gr.Textbox(
placeholder="Enter your question here",
label="Question",
elem_id="q-input",
)
with gr.Box():
gr.Markdown("**Answer**")
output = gr.Markdown(elem_id="q-output")
submit = gr.Button("Generate", variant="primary")
gr.Examples(
examples=examples,
inputs=[instruction],
# cache_examples=True,
cache_examples=False,
fn=process_example,
outputs=[output],
)
submit.click(response, inputs=[instruction], outputs=[output])
instruction.submit(response, inputs=[instruction], outputs=[output])
# demo.queue(concurrency_count=1, max_size=5).launch(debug=False, share=True)
demo.queue(concurrency_count=1, max_size=5).launch(debug=False)