Spaces:
Paused
Paused
rodrigomasini
commited on
Commit
•
e9589b1
1
Parent(s):
136e5a5
Update app_v2.py
Browse files
app_v2.py
CHANGED
@@ -1,51 +1,38 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer
|
3 |
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
|
4 |
-
|
5 |
-
|
6 |
-
cwd = os.getcwd()
|
7 |
-
cachedir = cwd + '/cache'
|
8 |
-
|
9 |
-
local_folder = cachedir + "/model"
|
10 |
-
|
11 |
-
# Check if the directory exists before creating it
|
12 |
-
if not os.path.exists(cachedir):
|
13 |
-
os.mkdir(cachedir)
|
14 |
|
15 |
# Define pretrained and quantized model directories
|
16 |
-
|
17 |
-
quantized_model_dir = "
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
#
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
)
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
)
|
49 |
-
|
50 |
-
st.write(model.hf_device_map)
|
51 |
-
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer
|
3 |
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
|
4 |
+
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
# Define pretrained and quantized model directories
|
7 |
+
pretrained_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
|
8 |
+
quantized_model_dir = "./Jackson2-4bit-128g-GPTQ"
|
9 |
+
|
10 |
+
# Create the cache directory if it doesn't exist
|
11 |
+
os.makedirs(quantized_model_dir, exist_ok=True)
|
12 |
+
|
13 |
+
# Quantization configuration
|
14 |
+
quantize_config = BaseQuantizeConfig(bits=4, group_size=128, desc_act=False)
|
15 |
+
|
16 |
+
# Load the tokenizer
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
|
18 |
+
|
19 |
+
# Load the model using Option 1
|
20 |
+
model = AutoGPTQForCausalLM.from_pretrained(pretrained_model_dir, quantize_config)
|
21 |
+
|
22 |
+
# Starting Streamlit app
|
23 |
+
st.title("AutoGPTQ Streamlit App")
|
24 |
+
|
25 |
+
user_input = st.text_input("Input a phrase")
|
26 |
+
|
27 |
+
# Generate output when the "Generate" button is pressed
|
28 |
+
if st.button("Generate"):
|
29 |
+
inputs = tokenizer(user_input, return_tensors="pt")
|
30 |
+
outputs = model.generate(
|
31 |
+
**inputs,
|
32 |
+
max_length=512 + inputs['input_ids'].size(-1),
|
33 |
+
temperature=0.1,
|
34 |
+
top_p=0.95,
|
35 |
+
repetition_penalty=1.15
|
36 |
+
)
|
37 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
38 |
+
st.text(generated_text)
|
|
|
|
|
|
|
|