rodrigomasini commited on
Commit
f5ab0cc
1 Parent(s): b0602a1

Update app_v3.py

Browse files
Files changed (1) hide show
  1. app_v3.py +14 -1
app_v3.py CHANGED
@@ -1,8 +1,21 @@
1
  from transformers import AutoTokenizer, TextStreamer, pipeline
2
  from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
3
  import time
 
 
4
 
5
- model_name_or_path = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
 
 
 
 
 
 
 
 
 
 
 
6
  model_basename = "Jackson2-4bit-128g-GPTQg"
7
 
8
  use_triton = False
 
1
  from transformers import AutoTokenizer, TextStreamer, pipeline
2
  from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
3
  import time
4
+ from huggingface_hub import snapshot_download
5
+ import os
6
 
7
+ # Define pretrained and quantized model directories
8
+ pretrained_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
9
+ cwd = os.getcwd()
10
+
11
+ quantized_model_dir = cwd + "/Jackson2-4bit-128g-GPTQ"
12
+
13
+ # Create the cache directory if it doesn't exist
14
+ os.makedirs(quantized_model_dir, exist_ok=True)
15
+
16
+ snapshot_download(repo_id=pretrained_model_dir, local_dir=quantized_model_dir, local_dir_use_symlinks=True)
17
+
18
+ model_name_or_path = quantized_model_dir
19
  model_basename = "Jackson2-4bit-128g-GPTQg"
20
 
21
  use_triton = False