rodrigomasini commited on
Commit
7843ac8
1 Parent(s): 5c1d871

Update app_v3.py

Browse files
Files changed (1) hide show
  1. app_v3.py +3 -14
app_v3.py CHANGED
@@ -1,9 +1,9 @@
1
- from transformers import AutoTokenizer, TextStreamer, pipeline, logging
2
  from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
3
  import time
4
 
5
- model_name_or_path = "TheBloke/llama2_7b_chat_uncensored-GPTQ"
6
- model_basename = "gptq_model-4bit-128g"
7
 
8
  use_triton = False
9
 
@@ -17,17 +17,6 @@ model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
17
  use_triton=use_triton,
18
  quantize_config=None)
19
 
20
- """
21
- To download from a specific branch, use the revision parameter, as in this example:
22
-
23
- model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
24
- revision="gptq-4bit-32g-actorder_True",
25
- model_basename=model_basename,
26
- use_safetensors=True,
27
- trust_remote_code=True,
28
- device="cuda:0",
29
- quantize_config=None)
30
- """
31
 
32
  prompt = "Tell me about AI"
33
  prompt_template=f'''### HUMAN:
 
1
+ from transformers import AutoTokenizer, TextStreamer, pipeline
2
  from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
3
  import time
4
 
5
+ model_name_or_path = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
6
+ model_basename = "Jackson2-gptq_model-4bit-128g"
7
 
8
  use_triton = False
9
 
 
17
  use_triton=use_triton,
18
  quantize_config=None)
19
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  prompt = "Tell me about AI"
22
  prompt_template=f'''### HUMAN: