rodrigomasini commited on
Commit
136e5a5
1 Parent(s): 3ac5658

Update app_v2.py

Browse files
Files changed (1) hide show
  1. app_v2.py +6 -4
app_v2.py CHANGED
@@ -6,13 +6,15 @@ from huggingface_hub import snapshot_download
6
  cwd = os.getcwd()
7
  cachedir = cwd + '/cache'
8
 
 
 
9
  # Check if the directory exists before creating it
10
  if not os.path.exists(cachedir):
11
  os.mkdir(cachedir)
12
 
13
- os.environ['HF_HOME'] = cachedir
14
-
15
- local_folder = cachedir + "/model"
16
 
17
  quantized_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
18
 
@@ -27,7 +29,7 @@ use_strict = False
27
  use_triton = False
28
 
29
  # Load tokenizer and model
30
- tokenizer = AutoTokenizer.from_pretrained(local_folder, use_fast=False)
31
 
32
  quantize_config = BaseQuantizeConfig(
33
  bits=4,
 
6
  cwd = os.getcwd()
7
  cachedir = cwd + '/cache'
8
 
9
+ local_folder = cachedir + "/model"
10
+
11
  # Check if the directory exists before creating it
12
  if not os.path.exists(cachedir):
13
  os.mkdir(cachedir)
14
 
15
+ # Define pretrained and quantized model directories
16
+ pretrained_quantized_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
17
+ quantized_model_dir = "opt-125m-4bit"
18
 
19
  quantized_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
20
 
 
29
  use_triton = False
30
 
31
  # Load tokenizer and model
32
+ tokenizer = AutoTokenizer.from_pretrained(local_folder, use_fast=True)
33
 
34
  quantize_config = BaseQuantizeConfig(
35
  bits=4,