freddyaboulton HF staff commited on
Commit
ea5b10f
·
verified ·
1 Parent(s): 5e43a82

Upload folder using huggingface_hub

Browse files
Files changed (7) hide show
  1. README.md +7 -3
  2. app.py +35 -0
  3. assets/sandbox.html +37 -0
  4. assets/spinner.html +60 -0
  5. handler.py +72 -0
  6. requirements.txt +277 -0
  7. ui.py +74 -0
README.md CHANGED
@@ -1,12 +1,16 @@
1
  ---
2
  title: Llama Code Editor
3
- emoji:
4
- colorFrom: pink
5
- colorTo: blue
6
  sdk: gradio
7
  sdk_version: 5.16.0
8
  app_file: app.py
9
  pinned: false
 
 
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Llama Code Editor
3
+ emoji: 🦙
4
+ colorFrom: indigo
5
+ colorTo: pink
6
  sdk: gradio
7
  sdk_version: 5.16.0
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
+ short_description: Create interactive HTML web pages with your voice
12
+ tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN,
13
+ secret|SAMBANOVA_API_KEY, secret|GROQ_API_KEY]
14
  ---
15
 
16
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastrtc import Stream
2
+ from fastapi.responses import RedirectResponse
3
+ from gradio.utils import get_space
4
+
5
+ try:
6
+ from demo.llama_code_editor.handler import (
7
+ CodeHandler,
8
+ )
9
+ from demo.llama_code_editor.ui import demo as ui
10
+ except (ImportError, ModuleNotFoundError):
11
+ from handler import CodeHandler
12
+ from ui import demo as ui
13
+
14
+
15
+ stream = Stream(
16
+ handler=CodeHandler,
17
+ modality="audio",
18
+ mode="send-receive",
19
+ concurrency_limit=10,
20
+ time_limit=90,
21
+ )
22
+
23
+ stream.ui = ui
24
+
25
+
26
+ @stream.get("/")
27
+ async def _():
28
+ url = "/ui" if not get_space() else "https://fastrtc-llama-code-editor.hf.space/ui/"
29
+ return RedirectResponse(url)
30
+
31
+
32
+ if __name__ == "__main__":
33
+ import uvicorn
34
+
35
+ uvicorn.run(stream, host="0.0.0.0", port=7860)
assets/sandbox.html ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div style="
2
+ display: flex;
3
+ flex-direction: column;
4
+ align-items: center;
5
+ justify-content: center;
6
+ min-height: 400px;
7
+ background: linear-gradient(135deg, #f5f7fa 0%, #e4e8ec 100%);
8
+ border-radius: 8px;
9
+ border: 2px dashed #cbd5e1;
10
+ padding: 2rem;
11
+ text-align: center;
12
+ color: #64748b;
13
+ font-family: system-ui, -apple-system, sans-serif;
14
+ ">
15
+ <div style="
16
+ width: 80px;
17
+ height: 80px;
18
+ margin-bottom: 1.5rem;
19
+ border: 3px solid #cbd5e1;
20
+ border-radius: 12px;
21
+ position: relative;
22
+ ">
23
+ <div style="
24
+ position: absolute;
25
+ top: 50%;
26
+ left: 50%;
27
+ transform: translate(-50%, -50%);
28
+ font-size: 2rem;
29
+ ">📦</div>
30
+ </div>
31
+ <h2 style="
32
+ margin: 0 0 0.5rem 0;
33
+ font-size: 1.5rem;
34
+ font-weight: 600;
35
+ color: #475569;
36
+ ">No Application Created</h2>
37
+ </div>
assets/spinner.html ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div style="
2
+ display: flex;
3
+ flex-direction: column;
4
+ align-items: center;
5
+ justify-content: center;
6
+ min-height: 400px;
7
+ background: linear-gradient(135deg, #f8fafc 0%, #f1f5f9 100%);
8
+ border-radius: 8px;
9
+ padding: 2rem;
10
+ text-align: center;
11
+ font-family: system-ui, -apple-system, sans-serif;
12
+ ">
13
+ <!-- Spinner container -->
14
+ <div style="
15
+ position: relative;
16
+ width: 64px;
17
+ height: 64px;
18
+ margin-bottom: 1.5rem;
19
+ ">
20
+ <!-- Static ring -->
21
+ <div style="
22
+ position: absolute;
23
+ width: 100%;
24
+ height: 100%;
25
+ border: 4px solid #e2e8f0;
26
+ border-radius: 50%;
27
+ "></div>
28
+ <!-- Animated spinner -->
29
+ <div style="
30
+ position: absolute;
31
+ width: 100%;
32
+ height: 100%;
33
+ border: 4px solid transparent;
34
+ border-top-color: #3b82f6;
35
+ border-radius: 50%;
36
+ animation: spin 1s linear infinite;
37
+ "></div>
38
+ </div>
39
+
40
+ <!-- Text content -->
41
+ <h2 style="
42
+ margin: 0 0 0.5rem 0;
43
+ font-size: 1.25rem;
44
+ font-weight: 600;
45
+ color: #475569;
46
+ ">Generating your application...</h2>
47
+
48
+ <p style="
49
+ margin: 0;
50
+ font-size: 0.875rem;
51
+ color: #64748b;
52
+ ">This may take a few moments</p>
53
+
54
+ <style>
55
+ @keyframes spin {
56
+ 0% { transform: rotate(0deg); }
57
+ 100% { transform: rotate(360deg); }
58
+ }
59
+ </style>
60
+ </div>
handler.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastrtc import (
2
+ ReplyOnPause,
3
+ AdditionalOutputs,
4
+ audio_to_bytes,
5
+ )
6
+ import numpy as np
7
+ import os
8
+ import base64
9
+ import openai
10
+ import re
11
+ from groq import Groq
12
+ from dotenv import load_dotenv
13
+ from pathlib import Path
14
+
15
+ load_dotenv()
16
+
17
+ groq_client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
18
+
19
+ client = openai.OpenAI(
20
+ api_key=os.environ.get("SAMBANOVA_API_KEY"),
21
+ base_url="https://api.sambanova.ai/v1",
22
+ )
23
+
24
+ path = Path(__file__).parent / "assets"
25
+
26
+ spinner_html = open(path / "spinner.html").read()
27
+
28
+
29
+ system_prompt = "You are an AI coding assistant. Your task is to write single-file HTML applications based on a user's request. Only return the necessary code. Include all necessary imports and styles. You may also be asked to edit your original response."
30
+ user_prompt = "Please write a single-file HTML application to fulfill the following request.\nThe message:{user_message}\nCurrent code you have written:{code}"
31
+
32
+
33
+ def extract_html_content(text):
34
+ """
35
+ Extract content including HTML tags.
36
+ """
37
+ match = re.search(r"<!DOCTYPE html>.*?</html>", text, re.DOTALL)
38
+ return match.group(0) if match else None
39
+
40
+
41
+ def display_in_sandbox(code):
42
+ encoded_html = base64.b64encode(code.encode("utf-8")).decode("utf-8")
43
+ data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
44
+ return f'<iframe src="{data_uri}" width="100%" height="600px"></iframe>'
45
+
46
+
47
+ def generate(user_message: tuple[int, np.ndarray], history: list[dict], code: str):
48
+ yield AdditionalOutputs(history, spinner_html)
49
+
50
+ text = groq_client.audio.transcriptions.create(
51
+ file=("audio-file.mp3", audio_to_bytes(user_message)),
52
+ model="whisper-large-v3-turbo",
53
+ response_format="verbose_json",
54
+ ).text
55
+
56
+ user_msg_formatted = user_prompt.format(user_message=text, code=code)
57
+ history.append({"role": "user", "content": user_msg_formatted})
58
+
59
+ response = client.chat.completions.create(
60
+ model="Meta-Llama-3.1-70B-Instruct",
61
+ messages=history, # type: ignore
62
+ temperature=0.1,
63
+ top_p=0.1,
64
+ )
65
+
66
+ output = response.choices[0].message.content
67
+ html_code = extract_html_content(output)
68
+ history.append({"role": "assistant", "content": output})
69
+ yield AdditionalOutputs(history, html_code)
70
+
71
+
72
+ CodeHandler = ReplyOnPause(generate) # type: ignore
requirements.txt ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile requirements.in -o requirements.txt
3
+ aiofiles==23.2.1
4
+ # via gradio
5
+ aiohappyeyeballs==2.4.3
6
+ # via aiohttp
7
+ aiohttp==3.11.3
8
+ # via
9
+ # aiohttp-retry
10
+ # twilio
11
+ aiohttp-retry==2.8.3
12
+ # via twilio
13
+ aioice==0.9.0
14
+ # via aiortc
15
+ aiosignal==1.3.1
16
+ # via aiohttp
17
+ annotated-types==0.7.0
18
+ # via pydantic
19
+ anyio==4.6.2.post1
20
+ # via
21
+ # gradio
22
+ # httpx
23
+ # openai
24
+ # starlette
25
+ attrs==24.2.0
26
+ # via aiohttp
27
+ audioread==3.0.1
28
+ # via librosa
29
+ av==12.3.0
30
+ # via aiortc
31
+ certifi==2024.8.30
32
+ # via
33
+ # httpcore
34
+ # httpx
35
+ # requests
36
+ cffi==1.17.1
37
+ # via
38
+ # aiortc
39
+ # cryptography
40
+ # pylibsrtp
41
+ # soundfile
42
+ charset-normalizer==3.4.0
43
+ # via requests
44
+ click==8.1.7
45
+ # via
46
+ # typer
47
+ # uvicorn
48
+ coloredlogs==15.0.1
49
+ # via onnxruntime
50
+ cryptography==43.0.3
51
+ # via
52
+ # aiortc
53
+ # pyopenssl
54
+ decorator==5.1.1
55
+ # via librosa
56
+ distro==1.9.0
57
+ # via openai
58
+ dnspython==2.7.0
59
+ # via aioice
60
+ fastapi==0.115.5
61
+ # via gradio
62
+ ffmpy==0.4.0
63
+ # via gradio
64
+ filelock==3.16.1
65
+ # via huggingface-hub
66
+ flatbuffers==24.3.25
67
+ # via onnxruntime
68
+ frozenlist==1.5.0
69
+ # via
70
+ # aiohttp
71
+ # aiosignal
72
+ fsspec==2024.10.0
73
+ # via
74
+ # gradio-client
75
+ # huggingface-hub
76
+ google-crc32c==1.6.0
77
+ # via aiortc
78
+ fastrtc
79
+ # via -r requirements.in
80
+ h11==0.14.0
81
+ # via
82
+ # httpcore
83
+ # uvicorn
84
+ httpcore==1.0.7
85
+ # via httpx
86
+ httpx==0.27.2
87
+ # via
88
+ # gradio
89
+ # gradio-client
90
+ # openai
91
+ # safehttpx
92
+ huggingface-hub==0.26.2
93
+ # via
94
+ # gradio
95
+ # gradio-client
96
+ humanfriendly==10.0
97
+ # via coloredlogs
98
+ idna==3.10
99
+ # via
100
+ # anyio
101
+ # httpx
102
+ # requests
103
+ # yarl
104
+ ifaddr==0.2.0
105
+ # via aioice
106
+ jinja2==3.1.4
107
+ # via gradio
108
+ jiter==0.7.1
109
+ # via openai
110
+ joblib==1.4.2
111
+ # via
112
+ # librosa
113
+ # scikit-learn
114
+ lazy-loader==0.4
115
+ llvmlite==0.43.0
116
+ # via numba
117
+ markdown-it-py==3.0.0
118
+ # via rich
119
+ markupsafe==2.1.5
120
+ # via
121
+ # gradio
122
+ # jinja2
123
+ mdurl==0.1.2
124
+ # via markdown-it-py
125
+ mpmath==1.3.0
126
+ # via sympy
127
+ msgpack==1.1.0
128
+ # via librosa
129
+ multidict==6.1.0
130
+ # via
131
+ # aiohttp
132
+ # yarl
133
+ numba==0.60.0
134
+ # via
135
+ # -r requirements.in
136
+ # librosa
137
+ numpy==2.0.2
138
+ # via
139
+ # gradio
140
+ # librosa
141
+ # numba
142
+ # onnxruntime
143
+ # pandas
144
+ # scikit-learn
145
+ # scipy
146
+ # soxr
147
+ openai==1.54.4
148
+ # via -r requirements.in
149
+ orjson==3.10.11
150
+ # via gradio
151
+ packaging==24.2
152
+ # via
153
+ # gradio
154
+ # gradio-client
155
+ # huggingface-hub
156
+ # lazy-loader
157
+ # onnxruntime
158
+ # pooch
159
+ pandas==2.2.3
160
+ # via gradio
161
+ pillow==11.0.0
162
+ # via gradio
163
+ platformdirs==4.3.6
164
+ # via pooch
165
+ pooch==1.8.2
166
+ # via librosa
167
+ propcache==0.2.0
168
+ # via
169
+ # aiohttp
170
+ # yarl
171
+ protobuf==5.28.3
172
+ # via onnxruntime
173
+ pycparser==2.22
174
+ # via cffi
175
+ pydantic==2.9.2
176
+ # via
177
+ # fastapi
178
+ # gradio
179
+ # openai
180
+ pydantic-core==2.23.4
181
+ # via pydantic
182
+ pydub==0.25.1
183
+ # via gradio
184
+ pyee==12.1.1
185
+ # via aiortc
186
+ pygments==2.18.0
187
+ # via rich
188
+ pyjwt==2.10.0
189
+ # via twilio
190
+ pylibsrtp==0.10.0
191
+ # via aiortc
192
+ pyopenssl==24.2.1
193
+ # via aiortc
194
+ python-dateutil==2.9.0.post0
195
+ # via pandas
196
+ python-dotenv==1.0.1
197
+ # via -r requirements.in
198
+ python-multipart==0.0.12
199
+ # via gradio
200
+ pytz==2024.2
201
+ # via pandas
202
+ pyyaml==6.0.2
203
+ # via
204
+ # gradio
205
+ # huggingface-hub
206
+ requests==2.32.3
207
+ # via
208
+ # huggingface-hub
209
+ # pooch
210
+ # twilio
211
+ rich==13.9.4
212
+ # via typer
213
+ ruff==0.7.4
214
+ # via gradio
215
+ safehttpx==0.1.1
216
+ # via gradio
217
+ scikit-learn==1.5.2
218
+ # via librosa
219
+ scipy==1.14.1
220
+ # via
221
+ # librosa
222
+ # scikit-learn
223
+ semantic-version==2.10.0
224
+ # via gradio
225
+ shellingham==1.5.4
226
+ # via typer
227
+ six==1.16.0
228
+ # via python-dateutil
229
+ sniffio==1.3.1
230
+ # via
231
+ # anyio
232
+ # httpx
233
+ # openai
234
+ soundfile==0.12.1
235
+ # via librosa
236
+ soxr==0.5.0.post1
237
+ # via librosa
238
+ starlette==0.41.3
239
+ # via
240
+ # fastapi
241
+ # gradio
242
+ sympy==1.13.3
243
+ # via onnxruntime
244
+ threadpoolctl==3.5.0
245
+ # via scikit-learn
246
+ tomlkit==0.12.0
247
+ # via gradio
248
+ tqdm==4.67.0
249
+ # via
250
+ # huggingface-hub
251
+ # openai
252
+ twilio==9.3.7
253
+ # via -r requirements.in
254
+ typer==0.13.1
255
+ # via gradio
256
+ typing-extensions==4.12.2
257
+ # via
258
+ # fastapi
259
+ # gradio
260
+ # gradio-client
261
+ # huggingface-hub
262
+ # librosa
263
+ # openai
264
+ # pydantic
265
+ # pydantic-core
266
+ # pyee
267
+ # typer
268
+ tzdata==2024.2
269
+ # via pandas
270
+ urllib3==2.2.3
271
+ # via requests
272
+ uvicorn==0.32.0
273
+ # via gradio
274
+ websockets==12.0
275
+ # via gradio-client
276
+ yarl==1.17.2
277
+ # via aiohttp
ui.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ from fastrtc import WebRTC, get_twilio_turn_credentials
3
+ import gradio as gr
4
+ from gradio.utils import get_space
5
+ from pathlib import Path
6
+
7
+ try:
8
+ from demo.llama_code_editor.handler import (
9
+ CodeHandler,
10
+ system_prompt,
11
+ display_in_sandbox,
12
+ )
13
+ except (ImportError, ModuleNotFoundError):
14
+ from handler import CodeHandler, system_prompt, display_in_sandbox
15
+
16
+ load_dotenv()
17
+
18
+ path = Path(__file__).parent / "assets"
19
+
20
+ with gr.Blocks(css=".code-component {max-height: 500px !important}") as demo:
21
+ history = gr.State([{"role": "system", "content": system_prompt}])
22
+ with gr.Row():
23
+ with gr.Column(scale=1):
24
+ gr.HTML(
25
+ """
26
+ <h1 style='text-align: center'>
27
+ Llama Code Editor
28
+ </h1>
29
+ <h2 style='text-align: center'>
30
+ Powered by SambaNova and Gradio-WebRTC ⚡️
31
+ </h2>
32
+ <p style='text-align: center'>
33
+ Create and edit single-file HTML applications with just your voice!
34
+ </p>
35
+ <p style='text-align: center'>
36
+ Each conversation is limited to 90 seconds. Once the time limit is up you can rejoin the conversation.
37
+ </p>
38
+ """
39
+ )
40
+ webrtc = WebRTC(
41
+ rtc_configuration=get_twilio_turn_credentials()
42
+ if get_space()
43
+ else None,
44
+ mode="send",
45
+ modality="audio",
46
+ )
47
+ with gr.Column(scale=10):
48
+ with gr.Tabs():
49
+ with gr.Tab("Sandbox"):
50
+ sandbox = gr.HTML(value=open(path / "sandbox.html").read())
51
+ with gr.Tab("Code"):
52
+ code = gr.Code(
53
+ language="html",
54
+ max_lines=50,
55
+ interactive=False,
56
+ elem_classes="code-component",
57
+ )
58
+ with gr.Tab("Chat"):
59
+ cb = gr.Chatbot(type="messages")
60
+
61
+ webrtc.stream(
62
+ CodeHandler,
63
+ inputs=[webrtc, history, code],
64
+ outputs=[webrtc],
65
+ time_limit=90,
66
+ concurrency_limit=10,
67
+ )
68
+ webrtc.on_additional_outputs(
69
+ lambda history, code: (history, code, history), outputs=[history, code, cb]
70
+ )
71
+ code.change(display_in_sandbox, code, sandbox, queue=False)
72
+
73
+ if __name__ == "__main__":
74
+ demo.launch()