James Cox-Morton commited on
Commit
231c5cd
·
1 Parent(s): 788296f

Start trying to zero-GPU

Browse files
Files changed (2) hide show
  1. app.py +86 -30
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,49 +1,105 @@
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("microsoft/Phi-3.5-mini-instruct")
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
 
 
 
 
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
 
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
25
 
26
- messages.append({"role": "user", "content": message})
27
 
28
- response = ""
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
 
 
 
35
  top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- response += token
40
- yield response
41
 
42
  """
43
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
  """
45
  demo = gr.ChatInterface(
46
- respond,
47
  additional_inputs=[
48
  gr.Textbox(value="You are a belligerent Chatbot.", label="System message"),
49
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
 
1
+ import sys
2
+ from threading import Thread
3
+
4
  import gradio as gr
5
+ import spaces
6
+ from transformers import (
7
+ AutoModelForCausalLM,
8
+ AutoTokenizer,
9
+ TextIteratorStreamer,
10
+ BitsAndBytesConfig,
11
+ )
12
+
13
+
14
+ import torch
15
+
16
+
17
+ MODEL = "microsoft/Phi-3.5-mini-instruct"
18
+
19
+ if torch.cuda.is_available():
20
+ device = "cuda"
21
+ elif sys.platform == "darwin" and torch.backends.mps.is_available():
22
+ device = "mps"
23
+ else:
24
+ device = "cpu"
25
 
 
 
 
 
26
 
27
+ # TODO understand this
28
+ quantization_config = BitsAndBytesConfig(
29
+ load_in_4bit=True,
30
+ bnb_4bit_compute_dtype=torch.bfloat16,
31
+ bnb_4bit_use_double_quant=True,
32
+ bnb_4bit_quant_type="nf4",
33
+ )
34
+
35
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
36
+ model = AutoModelForCausalLM.from_pretrained(
37
+ MODEL,
38
+ torch_dtype=torch.bfloat16,
39
+ device_map="auto",
40
+ quantization_config=quantization_config,
41
+ )
42
 
43
+
44
+ @spaces.GPU()
45
+ def stream_chat(
46
+ message: str,
47
+ history: list,
48
+ system_prompt: str,
49
+ temperature: float = 0.8,
50
+ max_new_tokens: int = 1024,
51
+ top_p: float = 1.0,
52
+ top_k: int = 20,
53
+ penalty: float = 1.2,
54
  ):
55
+ print(f"message: {message}")
56
+ print(f"history: {history}")
57
 
58
+ conversation = [{"role": "system", "content": system_prompt}]
59
+ for prompt, answer in history:
60
+ conversation.extend(
61
+ [
62
+ {"role": "user", "content": prompt},
63
+ {"role": "assistant", "content": answer},
64
+ ]
65
+ )
66
 
67
+ conversation.append({"role": "user", "content": message})
68
 
69
+ input_ids = tokenizer.apply_chat_template(
70
+ conversation, add_generation_prompt=True, return_tensors="pt"
71
+ ).to(model.device)
72
 
73
+ streamer = TextIteratorStreamer(
74
+ tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True
75
+ )
76
+
77
+ generate_kwargs = dict(
78
+ input_ids=input_ids,
79
+ max_new_tokens=max_new_tokens,
80
+ do_sample=False if temperature == 0 else True,
81
  top_p=top_p,
82
+ top_k=top_k,
83
+ temperature=temperature,
84
+ eos_token_id=[128001, 128008, 128009],
85
+ streamer=streamer,
86
+ )
87
+
88
+ with torch.no_grad():
89
+ thread = Thread(target=model.generate, kwargs=generate_kwargs)
90
+ thread.start()
91
+
92
+ buffer = ""
93
+ for new_text in streamer:
94
+ buffer += new_text
95
+ yield buffer
96
 
 
 
97
 
98
  """
99
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
100
  """
101
  demo = gr.ChatInterface(
102
+ stream_chat,
103
  additional_inputs=[
104
  gr.Textbox(value="You are a belligerent Chatbot.", label="System message"),
105
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- huggingface_hub==0.22.2
 
 
1
+ torch
2
+ transformers