captainkyd commited on
Commit
a41e143
1 Parent(s): 86edbb1

Update app.py

Browse files

testing to see if this gets this model up and online

Files changed (1) hide show
  1. app.py +7 -9
app.py CHANGED
@@ -1,9 +1,7 @@
1
  import gradio as gr
2
  import torch
3
  import transformers
4
- import bitsandbytes
5
- import accelerate
6
- from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
7
  import os
8
 
9
  title = """# Welcome to 🌟Tonic's🐇🥷🏻Neo
@@ -21,17 +19,17 @@ hf_token = os.getenv("HF_TOKEN")
21
  if not hf_token:
22
  raise ValueError("Hugging Face token not found. Please set the HF_TOKEN environment variable.")
23
 
24
- quantization_config = BitsAndBytesConfig(
25
- load_in_4bit=True,
26
- bnb_4bit_use_double_quant=True,
27
- bnb_4bit_compute_dtype=torch.bfloat16
28
- )
29
 
30
  model = AutoModelForCausalLM.from_pretrained(
31
  model_path,
32
  device_map="auto",
33
  trust_remote_code=True,
34
- quantization_config=quantization_config
35
  )
36
 
37
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 
1
  import gradio as gr
2
  import torch
3
  import transformers
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
5
  import os
6
 
7
  title = """# Welcome to 🌟Tonic's🐇🥷🏻Neo
 
19
  if not hf_token:
20
  raise ValueError("Hugging Face token not found. Please set the HF_TOKEN environment variable.")
21
 
22
+ #quantization_config = BitsAndBytesConfig(
23
+ # load_in_4bit=True,
24
+ # bnb_4bit_use_double_quant=True,
25
+ # bnb_4bit_compute_dtype=torch.bfloat16
26
+ #)
27
 
28
  model = AutoModelForCausalLM.from_pretrained(
29
  model_path,
30
  device_map="auto",
31
  trust_remote_code=True,
32
+ # quantization_config=quantization_config
33
  )
34
 
35
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)