ginipick commited on
Commit
ef3113e
Β·
verified Β·
1 Parent(s): 06dc656

Create app-backup.py

Browse files
Files changed (1) hide show
  1. app-backup.py +450 -0
app-backup.py ADDED
@@ -0,0 +1,450 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from gradio import ChatMessage
4
+ from typing import Iterator, List, Dict, Tuple, Any
5
+ import google.generativeai as genai
6
+ from huggingface_hub import HfApi
7
+ import requests
8
+ import re
9
+ import traceback
10
+
11
+ # HuggingFace κ΄€λ ¨ API ν‚€ (슀페이슀 뢄석 용)
12
+ HF_TOKEN = os.getenv("HF_TOKEN")
13
+ hf_api = HfApi(token=HF_TOKEN)
14
+
15
+ # Gemini 2.0 Flash Thinking λͺ¨λΈ κ΄€λ ¨ API ν‚€ 및 ν΄λΌμ΄μ–ΈνŠΈ (LLM 용)
16
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
17
+ genai.configure(api_key=GEMINI_API_KEY)
18
+ model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
19
+
20
+ def get_headers():
21
+ if not HF_TOKEN:
22
+ raise ValueError("Hugging Face token not found in environment variables")
23
+ return {"Authorization": f"Bearer {HF_TOKEN}"}
24
+
25
+ def get_file_content(space_id: str, file_path: str) -> str:
26
+ file_url = f"https://huggingface.co/spaces/{space_id}/raw/main/{file_path}"
27
+ try:
28
+ response = requests.get(file_url, headers=get_headers())
29
+ if response.status_code == 200:
30
+ return response.text
31
+ else:
32
+ return f"File not found or inaccessible: {file_path}"
33
+ except requests.RequestException:
34
+ return f"Error fetching content for file: {file_path}"
35
+
36
+ def get_space_structure(space_id: str) -> Dict:
37
+ try:
38
+ files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
39
+ tree = {"type": "directory", "path": "", "name": space_id, "children": []}
40
+ for file in files:
41
+ path_parts = file.split('/')
42
+ current = tree
43
+ for i, part in enumerate(path_parts):
44
+ if i == len(path_parts) - 1: # 파일
45
+ current["children"].append({"type": "file", "path": file, "name": part})
46
+ else:
47
+ found = False
48
+ for child in current["children"]:
49
+ if child["type"] == "directory" and child["name"] == part:
50
+ current = child
51
+ found = True
52
+ break
53
+ if not found:
54
+ new_dir = {"type": "directory", "path": '/'.join(path_parts[:i+1]), "name": part, "children": []}
55
+ current["children"].append(new_dir)
56
+ current = new_dir
57
+ return tree
58
+ except Exception as e:
59
+ print(f"Error in get_space_structure: {str(e)}")
60
+ return {"error": f"API request error: {str(e)}"}
61
+
62
+ def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
63
+ if "error" in tree_data:
64
+ return tree_data["error"]
65
+ formatted = f"{indent}{'πŸ“' if tree_data.get('type') == 'directory' else 'πŸ“„'} {tree_data.get('name', 'Unknown')}\n"
66
+ if tree_data.get("type") == "directory":
67
+ for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
68
+ formatted += format_tree_structure(child, indent + " ")
69
+ return formatted
70
+
71
+ def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
72
+ num_lines = len(code_content.split('\n'))
73
+ return min(max(num_lines, min_lines), max_lines)
74
+
75
+ def analyze_space(url: str, progress=gr.Progress()):
76
+ try:
77
+ space_id = url.split('spaces/')[-1]
78
+ if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
79
+ raise ValueError(f"Invalid Space ID format: {space_id}")
80
+
81
+ progress(0.1, desc="파일 ꡬ쑰 뢄석 쀑...")
82
+ tree_structure = get_space_structure(space_id)
83
+ if "error" in tree_structure:
84
+ raise ValueError(tree_structure["error"])
85
+ tree_view = format_tree_structure(tree_structure)
86
+
87
+ progress(0.3, desc="app.py λ‚΄μš© κ°€μ Έμ˜€λŠ” 쀑...")
88
+ app_content = get_file_content(space_id, "app.py")
89
+
90
+ progress(0.5, desc="μ½”λ“œ μš”μ•½ 쀑...")
91
+ summary = summarize_code(app_content)
92
+
93
+ progress(0.7, desc="μ½”λ“œ 뢄석 쀑...")
94
+ analysis = analyze_code(app_content)
95
+
96
+ progress(0.9, desc="μ‚¬μš©λ²• μ„€λͺ… 생성 쀑...")
97
+ usage = explain_usage(app_content)
98
+
99
+ lines_for_app_py = adjust_lines_for_code(app_content)
100
+ progress(1.0, desc="μ™„λ£Œ")
101
+
102
+ return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, lines_for_app_py
103
+
104
+ except Exception as e:
105
+ print(f"Error in analyze_space: {str(e)}")
106
+ print(traceback.format_exc())
107
+ return f"였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€: {str(e)}", "", None, "", "", "", "", 10
108
+
109
+
110
+ # --------------------------------------------------
111
+ # Gemini 2.0 Flash Thinking λͺ¨λΈ (LLM) ν•¨μˆ˜λ“€
112
+ # --------------------------------------------------
113
+ from gradio import ChatMessage
114
+
115
+ def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
116
+ """
117
+ ChatMessage λͺ©λ‘μ„ Gemini λͺ¨λΈμ΄ 이해할 수 μžˆλŠ” ν˜•μ‹μœΌλ‘œ λ³€ν™˜
118
+ (Thinking 메타데이터가 μžˆλŠ” λ©”μ‹œμ§€λŠ” λ¬΄μ‹œ)
119
+ """
120
+ formatted = []
121
+ for m in messages:
122
+ if hasattr(m, "metadata") and m.metadata: # 'Thinking' λ©”μ‹œμ§€λŠ” λ¬΄μ‹œ
123
+ continue
124
+ role = "assistant" if m.role == "assistant" else "user"
125
+ formatted.append({"role": role, "parts": [m.content or ""]})
126
+ return formatted
127
+
128
+ import google.generativeai as genai
129
+
130
+ def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
131
+ init_msgs = [
132
+ ChatMessage(role="system", content=system_message),
133
+ ChatMessage(role="user", content=user_message)
134
+ ]
135
+ chat_history = format_chat_history(init_msgs)
136
+ chat = model.start_chat(history=chat_history)
137
+ final = ""
138
+ try:
139
+ for chunk in chat.send_message(user_message, stream=True):
140
+ parts = chunk.candidates[0].content.parts
141
+ if len(parts) == 2:
142
+ final += parts[1].text
143
+ else:
144
+ final += parts[0].text
145
+ return final.strip()
146
+ except Exception as e:
147
+ return f"LLM 호좜 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
148
+
149
+
150
+ def summarize_code(app_content: str):
151
+ system_msg = "당신은 Python μ½”λ“œλ₯Ό λΆ„μ„ν•˜κ³  μš”μ•½ν•˜λŠ” AI μ‘°μˆ˜μž…λ‹ˆλ‹€. 주어진 μ½”λ“œλ₯Ό 3쀄 μ΄λ‚΄λ‘œ κ°„κ²°ν•˜κ²Œ μš”μ•½ν•΄μ£Όμ„Έμš”."
152
+ user_msg = f"λ‹€μŒ Python μ½”λ“œλ₯Ό 3쀄 μ΄λ‚΄λ‘œ μš”μ•½ν•΄μ£Όμ„Έμš”:\n\n{app_content}"
153
+ try:
154
+ return gemini_chat_completion(system_msg, user_msg, max_tokens=200, temperature=0.7)
155
+ except Exception as e:
156
+ return f"μš”μ•½ 생성 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
157
+
158
+ def analyze_code(app_content: str):
159
+ system_msg = (
160
+ "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
161
+ "and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
162
+ "You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem. "
163
+ "당신은 Python μ½”λ“œλ₯Ό λΆ„μ„ν•˜λŠ” AI μ‘°μˆ˜μž…λ‹ˆλ‹€. 주어진 μ½”λ“œλ₯Ό λΆ„μ„ν•˜μ—¬ μ„œλΉ„μŠ€μ˜ νš¨μš©μ„±κ³Ό ν™œμš© μΈ‘λ©΄μ—μ„œ λ‹€μŒ ν•­λͺ©μ— λŒ€ν•΄ μ„€λͺ…ν•΄μ£Όμ„Έμš”:\n"
164
+ "A. λ°°κ²½ 및 ν•„μš”μ„±\n"
165
+ "B. κΈ°λŠ₯적 νš¨μš©μ„± 및 κ°€μΉ˜\n"
166
+ "C. 특μž₯점\n"
167
+ "D. 적용 λŒ€μƒ 및 νƒ€κ²Ÿ\n"
168
+ "E. κΈ°λŒ€νš¨κ³Ό\n"
169
+ "κΈ°μ‘΄ 및 μœ μ‚¬ ν”„λ‘œμ νŠΈμ™€ λΉ„κ΅ν•˜μ—¬ λΆ„μ„ν•΄μ£Όμ„Έμš”. Markdown ν˜•μ‹μœΌλ‘œ 좜λ ₯ν•˜μ„Έμš”."
170
+ )
171
+ user_msg = f"λ‹€μŒ Python μ½”λ“œλ₯Ό λΆ„μ„ν•΄μ£Όμ„Έμš”:\n\n{app_content}"
172
+ try:
173
+ return gemini_chat_completion(system_msg, user_msg, max_tokens=1000, temperature=0.7)
174
+ except Exception as e:
175
+ return f"뢄석 생성 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
176
+
177
+ def explain_usage(app_content: str):
178
+ system_msg = (
179
+ "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
180
+ "and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
181
+ "You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem. "
182
+ "당신은 Python μ½”λ“œλ₯Ό λΆ„μ„ν•˜μ—¬ μ‚¬μš©λ²•μ„ μ„€λͺ…ν•˜λŠ” AI μ‘°μˆ˜μž…λ‹ˆλ‹€. 주어진 μ½”λ“œλ₯Ό λ°”νƒ•μœΌλ‘œ 마치 화면을 λ³΄λŠ” κ²ƒμ²˜λŸΌ μ‚¬μš©λ²•μ„ μƒμ„Ένžˆ μ„€λͺ…ν•΄μ£Όμ„Έμš”. Markdown ν˜•μ‹μœΌλ‘œ 좜λ ₯ν•˜μ„Έμš”."
183
+ )
184
+ user_msg = f"λ‹€μŒ Python μ½”λ“œλ₯Ό μ‚¬μš©λ²•μ„ μ„€λͺ…ν•΄μ£Όμ„Έμš”:\n\n{app_content}"
185
+ try:
186
+ return gemini_chat_completion(system_msg, user_msg, max_tokens=800, temperature=0.7)
187
+ except Exception as e:
188
+ return f"μ‚¬μš©λ²• μ„€λͺ… 생성 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
189
+
190
+ def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
191
+ """
192
+ Gemini에 슀트리밍 μš”μ²­.
193
+ - user_messageκ°€ λΉ„μ–΄ 있으면, μ΅œμ†Œν•œμ˜ μ•ˆλ‚΄ λ©”μ‹œμ§€λ₯Ό assistant둜 μΆ”κ°€ν•˜κ³  yield ν›„ μ’…λ£Œ
194
+ """
195
+ if not user_message.strip():
196
+ # 빈 μž…λ ₯ 처리: μ•ˆλ‚΄ λ©”μ‹œμ§€ ν‘œμ‹œ
197
+ conversation_state.append(
198
+ ChatMessage(
199
+ role="assistant",
200
+ content="μž…λ ₯이 μ—†μŠ΅λ‹ˆλ‹€. μ§ˆλ¬Έμ„ μž‘μ„±ν•΄μ£Όμ„Έμš”!"
201
+ )
202
+ )
203
+ yield conversation_state
204
+ return
205
+
206
+ print(f"\n=== New Request ===\nUser message: {user_message}")
207
+ chat_history = format_chat_history(conversation_state)
208
+ chat = model.start_chat(history=chat_history)
209
+ response = chat.send_message(user_message, stream=True)
210
+
211
+ thought_buffer = ""
212
+ response_buffer = ""
213
+ thinking_complete = False
214
+
215
+ conversation_state.append(
216
+ ChatMessage(
217
+ role="assistant",
218
+ content="",
219
+ metadata={"title": "βš™οΈ Thinking: *The thoughts produced by the model are experimental"}
220
+ )
221
+ )
222
+
223
+ try:
224
+ for chunk in response:
225
+ parts = chunk.candidates[0].content.parts
226
+ current_chunk = parts[0].text
227
+
228
+ if len(parts) == 2 and not thinking_complete:
229
+ thought_buffer += current_chunk
230
+ print(f"\n=== Complete Thought ===\n{thought_buffer}")
231
+ conversation_state[-1] = ChatMessage(
232
+ role="assistant",
233
+ content=thought_buffer,
234
+ metadata={"title": "βš™οΈ Thinking: *The thoughts produced by the model are experimental"}
235
+ )
236
+ yield conversation_state
237
+
238
+ response_buffer = parts[1].text
239
+ print(f"\n=== Starting Response ===\n{response_buffer}")
240
+ conversation_state.append(
241
+ ChatMessage(role="assistant", content=response_buffer)
242
+ )
243
+ thinking_complete = True
244
+
245
+ elif thinking_complete:
246
+ response_buffer += current_chunk
247
+ print(f"\n=== Response Chunk ===\n{current_chunk}")
248
+ conversation_state[-1] = ChatMessage(
249
+ role="assistant",
250
+ content=response_buffer
251
+ )
252
+ else:
253
+ thought_buffer += current_chunk
254
+ print(f"\n=== Thinking Chunk ===\n{current_chunk}")
255
+ conversation_state[-1] = ChatMessage(
256
+ role="assistant",
257
+ content=thought_buffer,
258
+ metadata={"title": "βš™οΈ Thinking: *The thoughts produced by the model are experimental"}
259
+ )
260
+ yield conversation_state
261
+
262
+ print(f"\n=== Final Response ===\n{response_buffer}")
263
+
264
+ except Exception as e:
265
+ print(f"\n=== Error ===\n{str(e)}")
266
+ conversation_state.append(
267
+ ChatMessage(
268
+ role="assistant",
269
+ content=f"I apologize, but encountered an error: {str(e)}"
270
+ )
271
+ )
272
+ yield conversation_state
273
+
274
+ def convert_for_messages_format(messages: List[ChatMessage]) -> List[Dict[str, str]]:
275
+ """
276
+ ChatMessage 리슀트 -> [{"role":"assistant"/"user", "content":"..."}]
277
+ """
278
+ output = []
279
+ for msg in messages:
280
+ output.append({"role": msg.role, "content": msg.content})
281
+ return output
282
+
283
+ def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
284
+ conversation_state.append(ChatMessage(role="user", content=msg))
285
+ return "", conversation_state
286
+
287
+ def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
288
+ # λ§ˆμ§€λ§‰ μ‚¬μš©μž λ©”μ‹œμ§€ κ°€μ Έμ˜€κΈ°
289
+ last_user_message = ""
290
+ for msg in reversed(conversation_state):
291
+ if msg.role == "user":
292
+ last_user_message = msg.content
293
+ break
294
+
295
+ # λ§ˆμ§€λ§‰ μ‚¬μš©μž λ©”μ‹œμ§€λ‘œ 응닡 생성
296
+ for updated_messages in stream_gemini_response(last_user_message, conversation_state):
297
+ yield "", convert_for_messages_format(updated_messages)
298
+
299
+ def create_ui():
300
+ try:
301
+ css = """
302
+ footer {visibility: hidden;}
303
+ """
304
+
305
+ with gr.Blocks(css=css) as demo:
306
+ gr.Markdown("# MOUSE: Space Research Thinking")
307
+
308
+ with gr.Tabs():
309
+ with gr.TabItem("뢄석"):
310
+ with gr.Row():
311
+ with gr.Column():
312
+ url_input = gr.Textbox(label="HuggingFace Space URL")
313
+ analyze_button = gr.Button("뢄석")
314
+
315
+ summary_output = gr.Markdown(label="μš”μ•½")
316
+ analysis_output = gr.Markdown(label="뢄석")
317
+ usage_output = gr.Markdown(label="μ‚¬μš©λ²•")
318
+ tree_view_output = gr.Textbox(label="파일 ꡬ쑰", lines=20)
319
+
320
+ with gr.Column():
321
+ code_tabs = gr.Tabs()
322
+ with code_tabs:
323
+ with gr.TabItem("app.py"):
324
+ app_py_content = gr.Code(
325
+ language="python",
326
+ label="app.py",
327
+ lines=50
328
+ )
329
+ with gr.TabItem("requirements.txt"):
330
+ requirements_content = gr.Textbox(
331
+ label="requirements.txt",
332
+ lines=50
333
+ )
334
+
335
+ with gr.TabItem("AI μ½”λ“œμ±—"):
336
+ gr.Markdown("## 예제λ₯Ό μž…λ ₯ λ˜λŠ” μ†ŒμŠ€ μ½”λ“œλ₯Ό λΆ™μ—¬λ„£κ³  μ§ˆλ¬Έν•˜μ„Έμš”")
337
+
338
+ # Chatbot: type="messages"
339
+ chatbot = gr.Chatbot(
340
+ label="λŒ€ν™”",
341
+ height=400,
342
+ type="messages"
343
+ )
344
+
345
+ msg = gr.Textbox(
346
+ label="λ©”μ‹œμ§€",
347
+ placeholder="λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”..."
348
+ )
349
+
350
+ max_tokens = gr.Slider(
351
+ minimum=1, maximum=8000,
352
+ value=4000, label="Max Tokens",
353
+ visible=False
354
+ )
355
+ temperature = gr.Slider(
356
+ minimum=0, maximum=1,
357
+ value=0.7, label="Temperature",
358
+ visible=False
359
+ )
360
+ top_p = gr.Slider(
361
+ minimum=0, maximum=1,
362
+ value=0.9, label="Top P",
363
+ visible=False
364
+ )
365
+
366
+ examples = [
367
+ ["μƒμ„Έν•œ μ‚¬μš© 방법을 4000 토큰 이상 μƒμ„Ένžˆ μ„€λͺ…"],
368
+ ["FAQ 20건을 4000 토큰 이상 μž‘μ„±"],
369
+ ["기술 차별점, 강점을 μ€‘μ‹¬μœΌλ‘œ 4000 토큰 이상 μ„€λͺ…"],
370
+ ["νŠΉν—ˆ μΆœμ›μ— ν™œμš© κ°€λŠ₯ν•œ ν˜μ‹  아이디어λ₯Ό 4000 토큰 이상 μž‘μ„±"],
371
+ ["λ…Όλ¬Έ ν˜•μ‹μœΌλ‘œ 4000 토큰 이상 μž‘μ„±"],
372
+ ["계속 μ΄μ–΄μ„œ λ‹΅λ³€ν•˜λΌ"]
373
+ ]
374
+ gr.Examples(examples, inputs=msg)
375
+
376
+ conversation_state = gr.State([])
377
+
378
+ msg.submit(
379
+ user_submit_message,
380
+ inputs=[msg, conversation_state],
381
+ outputs=[msg, conversation_state],
382
+ queue=False
383
+ ).then(
384
+ respond_wrapper,
385
+ inputs=[msg, conversation_state, max_tokens, temperature, top_p],
386
+ outputs=[msg, chatbot],
387
+ )
388
+
389
+ with gr.TabItem("Recommended Best"):
390
+ gr.Markdown(
391
+ "Discover recommended HuggingFace Spaces [here](https://huggingface.co/spaces/openfree/Korean-Leaderboard)."
392
+ )
393
+
394
+ # 뢄석 νƒ­ 둜직
395
+ space_id_state = gr.State()
396
+ tree_structure_state = gr.State()
397
+ app_py_content_lines = gr.State()
398
+
399
+ analyze_button.click(
400
+ analyze_space,
401
+ inputs=[url_input],
402
+ outputs=[
403
+ app_py_content,
404
+ tree_view_output,
405
+ tree_structure_state,
406
+ space_id_state,
407
+ summary_output,
408
+ analysis_output,
409
+ usage_output,
410
+ app_py_content_lines
411
+ ]
412
+ ).then(
413
+ lambda space_id: get_file_content(space_id, "requirements.txt"),
414
+ inputs=[space_id_state],
415
+ outputs=[requirements_content]
416
+ ).then(
417
+ lambda lines: gr.update(lines=lines),
418
+ inputs=[app_py_content_lines],
419
+ outputs=[app_py_content]
420
+ )
421
+
422
+ return demo
423
+
424
+ except Exception as e:
425
+ print(f"Error in create_ui: {str(e)}")
426
+ print(traceback.format_exc())
427
+ raise
428
+
429
+ if __name__ == "__main__":
430
+ try:
431
+ print("Starting HuggingFace Space Analyzer...")
432
+ demo = create_ui()
433
+ print("UI created successfully.")
434
+ print("Configuring Gradio queue...")
435
+ demo.queue()
436
+ print("Gradio queue configured.")
437
+ print("Launching Gradio app...")
438
+ demo.launch(
439
+ server_name="0.0.0.0",
440
+ server_port=7860,
441
+ share=False,
442
+ debug=True,
443
+ show_api=False
444
+ )
445
+ print("Gradio app launched successfully.")
446
+ except Exception as e:
447
+ print(f"Error in main: {str(e)}")
448
+ print("Detailed error information:")
449
+ print(traceback.format_exc())
450
+ raise