Aarifkhan commited on
Commit
adceffe
·
verified ·
1 Parent(s): 0e33fb2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  import os
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
4
  from threading import Thread
5
-
6
  # Set an environment variable
7
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
8
 
@@ -30,7 +30,7 @@ h1 {
30
  tokenizer = AutoTokenizer.from_pretrained("UnfilteredAI/DAN-L3-R1-8B")
31
  model = AutoModelForCausalLM.from_pretrained("UnfilteredAI/DAN-L3-R1-8B", device_map="auto")
32
  terminators = [tokenizer.eos_token_id]
33
-
34
  def chat_dan_l3_r1_8b(message: str, history: list, temperature: float, max_new_tokens: int) -> str:
35
  """
36
  Generate a streaming response using the DAN-L3-R1-8B model.
 
2
  import os
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
4
  from threading import Thread
5
+ import spaces
6
  # Set an environment variable
7
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
8
 
 
30
  tokenizer = AutoTokenizer.from_pretrained("UnfilteredAI/DAN-L3-R1-8B")
31
  model = AutoModelForCausalLM.from_pretrained("UnfilteredAI/DAN-L3-R1-8B", device_map="auto")
32
  terminators = [tokenizer.eos_token_id]
33
+ @spaces.GPU(duration=30)
34
  def chat_dan_l3_r1_8b(message: str, history: list, temperature: float, max_new_tokens: int) -> str:
35
  """
36
  Generate a streaming response using the DAN-L3-R1-8B model.