Spaces:
Runtime error
Runtime error
tonic
commited on
Commit
•
ad35440
1
Parent(s):
5d090de
changing to starcoder
Browse files
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.19.0
|
8 |
app_file: app.py
|
|
|
1 |
---
|
2 |
+
title: StarCoder2
|
3 |
+
emoji: ✨2️⃣✨
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: blue
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.19.0
|
8 |
app_file: app.py
|
app.py
CHANGED
@@ -5,15 +5,14 @@ import transformers
|
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
import os
|
7 |
|
8 |
-
title = """# Welcome to 🌟Tonic's
|
9 |
-
|
10 |
Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) Math 🔍 [introspector](https://huggingface.co/introspector) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [SciTonic](https://github.com/Tonic-AI/scitonic)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
|
11 |
"""
|
12 |
|
13 |
-
|
14 |
default_system_prompt = """SYSTEM: You are an AI that code. Answer with code."""
|
15 |
|
16 |
-
model_path = "
|
17 |
|
18 |
|
19 |
hf_token = os.getenv("HF_TOKEN")
|
@@ -22,61 +21,43 @@ if not hf_token:
|
|
22 |
|
23 |
model = AutoModelForCausalLM.from_pretrained(
|
24 |
model_path,
|
25 |
-
torch_dtype=torch.
|
26 |
device_map="auto",
|
27 |
-
|
28 |
-
load_in_8bit=False,
|
29 |
-
trust_remote_code=True,
|
30 |
token=hf_token,
|
31 |
)
|
32 |
|
33 |
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
system_prompt = custom_prompt if custom_prompt else default_system_prompt
|
38 |
-
llm_prompt = f"{system_prompt} \nUSER: {user_input} \nASSISTANT: "
|
39 |
-
|
40 |
-
tokens = tokenizer.encode(llm_prompt, return_tensors="pt")
|
41 |
-
tokens = tokens.to("cuda")
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
)
|
53 |
-
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
54 |
-
answer = generated_text[len(llm_prompt):].strip()
|
55 |
-
|
56 |
-
return answer
|
57 |
|
58 |
def gradio_app():
|
59 |
with gr.Blocks() as demo:
|
60 |
gr.Markdown(title)
|
|
|
61 |
with gr.Row():
|
62 |
-
|
63 |
-
instruction = gr.Textbox(label="Your Instruction", placeholder="Type your question here...")
|
64 |
with gr.Row():
|
65 |
temperature = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.5, label="Temperature")
|
66 |
-
|
67 |
-
top_p = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=1.0, label="Top P")
|
68 |
-
top_k = gr.Slider(minimum=0, maximum=100, step=1, value=50, label="Top K")
|
69 |
with gr.Row():
|
70 |
-
|
71 |
-
output = gr.Code(label="🐇🥷🏻Neo:", lines=10)
|
72 |
|
73 |
generate_btn.click(
|
74 |
fn=generate_text,
|
75 |
-
inputs=[
|
76 |
outputs=output
|
77 |
)
|
78 |
|
79 |
-
demo.launch()
|
80 |
-
|
81 |
-
if __name__ == "__main__":
|
82 |
-
gradio_app()
|
|
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
import os
|
7 |
|
8 |
+
title = """# Welcome to 🌟Tonic's✨StarCoder
|
9 |
+
✨StarCoder StarCoder2-15B model is a 15B parameter model trained on 600+ programming languages from The Stack v2, with opt-out requests excluded. The model uses Grouped Query Attention, a context window of 16,384 tokens with a sliding window attention of 4,096 tokens, and was trained using the Fill-in-the-Middle objective on 4+ trillion tokens. The model was trained with NVIDIA NeMo™ Framework using the NVIDIA Eos Supercomputer built with NVIDIA DGX H100 systems. You can build with this endpoint using✨StarCoder available here : [bigcode/starcoder2-15b](https://huggingface.co/bigcode/starcoder2-15b). You can also use ✨StarCoder by cloning this space. Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/starcoder2?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
|
10 |
Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) Math 🔍 [introspector](https://huggingface.co/introspector) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [SciTonic](https://github.com/Tonic-AI/scitonic)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
|
11 |
"""
|
12 |
|
|
|
13 |
default_system_prompt = """SYSTEM: You are an AI that code. Answer with code."""
|
14 |
|
15 |
+
model_path = "bigcode/starcoder2-15b"
|
16 |
|
17 |
|
18 |
hf_token = os.getenv("HF_TOKEN")
|
|
|
21 |
|
22 |
model = AutoModelForCausalLM.from_pretrained(
|
23 |
model_path,
|
24 |
+
torch_dtype=torch.bfloat16,
|
25 |
device_map="auto",
|
26 |
+
# trust_remote_code=True,
|
|
|
|
|
27 |
token=hf_token,
|
28 |
)
|
29 |
|
30 |
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
31 |
|
32 |
+
# import gradio as gr
|
33 |
+
# from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
# checkpoint = "bigcode/starcoder2-15b"
|
36 |
+
# quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
37 |
+
# tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
38 |
+
# model = AutoModelForCausalLM.from_pretrained(checkpoint, quantization_config=quantization_config).to("cuda")
|
39 |
+
|
40 |
+
def generate_text(prompt, temperature, max_length):
|
41 |
+
inputs = tokenizer.encode(prompt, return_tensors="pt").to("cuda")
|
42 |
+
outputs = model.generate(inputs, max_length=max_length, top_p=0.9, temperature=temperature)
|
43 |
+
return tokenizer.decode(outputs[0])
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
def gradio_app():
|
46 |
with gr.Blocks() as demo:
|
47 |
gr.Markdown(title)
|
48 |
+
output = gr.Code(label="Generated Code", lines=40)
|
49 |
with gr.Row():
|
50 |
+
generate_btn = gr.Button("Generate")
|
|
|
51 |
with gr.Row():
|
52 |
temperature = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.5, label="Temperature")
|
53 |
+
max_length = gr.Slider(minimum=100, maximum=1024, step=10, value=100, label="Generate Length")
|
|
|
|
|
54 |
with gr.Row():
|
55 |
+
prompt = gr.Textbox(label="Enter your code prompt", placeholder="def print_hello_world():")
|
|
|
56 |
|
57 |
generate_btn.click(
|
58 |
fn=generate_text,
|
59 |
+
inputs=[prompt, temperature, max_length],
|
60 |
outputs=output
|
61 |
)
|
62 |
|
63 |
+
demo.launch()
|
|
|
|
|
|