File size: 1,399 Bytes
ebabf70
 
 
 
 
 
 
 
136e5a5
 
ebabf70
 
 
 
136e5a5
 
 
ebabf70
 
 
 
 
 
 
 
 
 
 
 
 
 
136e5a5
ebabf70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import streamlit as st
from transformers import AutoTokenizer
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
from huggingface_hub import snapshot_download

cwd = os.getcwd()
cachedir = cwd + '/cache'

local_folder = cachedir + "/model"

# Check if the directory exists before creating it
if not os.path.exists(cachedir):
    os.mkdir(cachedir)

# Define pretrained and quantized model directories
pretrained_quantized_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
quantized_model_dir = "opt-125m-4bit"

quantized_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"

# Check if the model has already been downloaded
model_path = os.path.join(local_folder, 'pytorch_model.bin')
if not os.path.isfile(model_path):
    snapshot_download(repo_id=quantized_model_dir, local_dir=local_folder, local_dir_use_symlinks=True)

model_basename = cachedir + "/model/Jackson2-4bit-128g-GPTQ"

use_strict = False
use_triton = False

# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(local_folder, use_fast=True)

quantize_config = BaseQuantizeConfig(
    bits=4,
    group_size=128,
    desc_act=False
)

model = AutoGPTQForCausalLM.from_quantized(
    local_folder,
    use_safetensors=True,
    strict=use_strict,
    model_basename=model_basename,
    device="cuda:0",
    use_triton=use_triton,
    quantize_config=quantize_config
)

st.write(model.hf_device_map)