Spaces:
Runtime error
Runtime error
mattcracker
commited on
Commit
β’
f5262bf
1
Parent(s):
2bfdfb6
Update app.py
Browse files
app.py
CHANGED
@@ -1,14 +1,23 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import torch
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
# ε 载樑εεεθ―ε¨
|
6 |
model_path = "meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8"
|
7 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
|
|
|
|
|
|
8 |
model = AutoModelForCausalLM.from_pretrained(
|
9 |
model_path,
|
10 |
-
|
11 |
device_map="auto",
|
|
|
12 |
)
|
13 |
|
14 |
def generate_text(prompt, max_length=512, temperature=0.7, top_p=0.9):
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import torch
|
4 |
+
import os
|
5 |
+
|
6 |
+
|
7 |
+
# θ·εη―ε’ειδΈη token
|
8 |
+
hf_token = os.getenv("HF_TOKEN")
|
9 |
|
10 |
# ε 载樑εεεθ―ε¨
|
11 |
model_path = "meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8"
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
13 |
+
model_path,
|
14 |
+
token=hf_token
|
15 |
+
)
|
16 |
model = AutoModelForCausalLM.from_pretrained(
|
17 |
model_path,
|
18 |
+
token=hf_token,
|
19 |
device_map="auto",
|
20 |
+
load_in_4bit=True, # ε―η¨4-bitιεε θ½½
|
21 |
)
|
22 |
|
23 |
def generate_text(prompt, max_length=512, temperature=0.7, top_p=0.9):
|