adj7354 commited on
Commit
8edbd59
1 Parent(s): f4d3418

commit app

Browse files
Files changed (3) hide show
  1. app.py +129 -0
  2. requirements.txt +9 -0
  3. style.css +16 -0
app.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from threading import Thread
3
+ from typing import Iterator
4
+
5
+ import gradio as gr
6
+ import spaces
7
+ import torch
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
+
10
+ MAX_MAX_NEW_TOKENS = 2048
11
+ DEFAULT_MAX_NEW_TOKENS = 1024
12
+
13
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
14
+
15
+ DESCRIPTION = """\
16
+ # DeepSeek-6.7B-Chat
17
+
18
+ This Space demonstrates model [DeepSeek-Coder](https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-instruct) by DeepSeek, a code model with 6.7B parameters fine-tuned for chat instructions.
19
+ """
20
+
21
+ if not torch.cuda.is_available():
22
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
23
+
24
+
25
+ if torch.cuda.is_available():
26
+ model_id = "deepseek-ai/deepseek-coder-6.7b-instruct"
27
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
28
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
29
+ tokenizer.use_default_system_prompt = False
30
+
31
+
32
+
33
+ @spaces.GPU
34
+ def generate(
35
+ message: str,
36
+ chat_history: list,
37
+ system_prompt: str,
38
+ max_new_tokens: int = 1024,
39
+ temperature: float = 0.6,
40
+ top_p: float = 0.9,
41
+ top_k: int = 50,
42
+ repetition_penalty: float = 1,
43
+ ) -> Iterator[str]:
44
+ conversation = []
45
+ if system_prompt:
46
+ conversation.append({"role": "system", "content": system_prompt})
47
+ for user, assistant in chat_history:
48
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
49
+ conversation.append({"role": "user", "content": message})
50
+
51
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
52
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
53
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
54
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
55
+ input_ids = input_ids.to(model.device)
56
+
57
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
58
+ generate_kwargs = dict(
59
+ {"input_ids": input_ids},
60
+ streamer=streamer,
61
+ max_new_tokens=max_new_tokens,
62
+ do_sample=False,
63
+ num_beams=1,
64
+ repetition_penalty=repetition_penalty,
65
+ eos_token_id=32021
66
+ )
67
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
68
+ t.start()
69
+
70
+ outputs = []
71
+ for text in streamer:
72
+ outputs.append(text)
73
+ yield "".join(outputs).replace("<|EOT|>","")
74
+
75
+
76
+ chat_interface = gr.ChatInterface(
77
+ fn=generate,
78
+ additional_inputs=[
79
+ gr.Textbox(label="System prompt", lines=6),
80
+ gr.Slider(
81
+ label="Max new tokens",
82
+ minimum=1,
83
+ maximum=MAX_MAX_NEW_TOKENS,
84
+ step=1,
85
+ value=DEFAULT_MAX_NEW_TOKENS,
86
+ ),
87
+ gr.Slider(
88
+ label="Temperature",
89
+ minimum=0,
90
+ maximum=4.0,
91
+ step=0.1,
92
+ value=0,
93
+ ),
94
+ gr.Slider(
95
+ label="Top-p (nucleus sampling)",
96
+ minimum=0.05,
97
+ maximum=1.0,
98
+ step=0.05,
99
+ value=0.9,
100
+ ),
101
+ gr.Slider(
102
+ label="Top-k",
103
+ minimum=1,
104
+ maximum=1000,
105
+ step=1,
106
+ value=50,
107
+ ),
108
+ gr.Slider(
109
+ label="Repetition penalty",
110
+ minimum=1.0,
111
+ maximum=2.0,
112
+ step=0.05,
113
+ value=1,
114
+ ),
115
+ ],
116
+ stop_btn=None,
117
+ examples=[
118
+ ["implement snake game using pygame"],
119
+ ["Can you explain briefly to me what is the Python programming language?"],
120
+ ["write a program to find the factorial of a number"],
121
+ ],
122
+ )
123
+
124
+ with gr.Blocks(css="style.css") as demo:
125
+ gr.Markdown(DESCRIPTION)
126
+ chat_interface.render()
127
+
128
+ if __name__ == "__main__":
129
+ demo.queue().launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.23.0
2
+ bitsandbytes==0.41.1
3
+ gradio==3.48.0
4
+ protobuf==3.20.3
5
+ scipy==1.10.1
6
+ sentencepiece==0.1.99
7
+ spaces==0.16.1
8
+ torch==2.0.0
9
+ transformers>=4.35.0
style.css ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }
4
+
5
+ #duplicate-button {
6
+ margin: auto;
7
+ color: white;
8
+ background: #1565c0;
9
+ border-radius: 100vh;
10
+ }
11
+
12
+ .contain {
13
+ max-width: 900px;
14
+ margin: auto;
15
+ padding-top: 1.5rem;
16
+ }