Spaces:
Running
Running
Create app-backup.py
Browse files- app-backup.py +450 -0
app-backup.py
ADDED
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
from gradio import ChatMessage
|
4 |
+
from typing import Iterator, List, Dict, Tuple, Any
|
5 |
+
import google.generativeai as genai
|
6 |
+
from huggingface_hub import HfApi
|
7 |
+
import requests
|
8 |
+
import re
|
9 |
+
import traceback
|
10 |
+
|
11 |
+
# HuggingFace κ΄λ ¨ API ν€ (μ€νμ΄μ€ λΆμ μ©)
|
12 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
13 |
+
hf_api = HfApi(token=HF_TOKEN)
|
14 |
+
|
15 |
+
# Gemini 2.0 Flash Thinking λͺ¨λΈ κ΄λ ¨ API ν€ λ° ν΄λΌμ΄μΈνΈ (LLM μ©)
|
16 |
+
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
17 |
+
genai.configure(api_key=GEMINI_API_KEY)
|
18 |
+
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
|
19 |
+
|
20 |
+
def get_headers():
|
21 |
+
if not HF_TOKEN:
|
22 |
+
raise ValueError("Hugging Face token not found in environment variables")
|
23 |
+
return {"Authorization": f"Bearer {HF_TOKEN}"}
|
24 |
+
|
25 |
+
def get_file_content(space_id: str, file_path: str) -> str:
|
26 |
+
file_url = f"https://huggingface.co/spaces/{space_id}/raw/main/{file_path}"
|
27 |
+
try:
|
28 |
+
response = requests.get(file_url, headers=get_headers())
|
29 |
+
if response.status_code == 200:
|
30 |
+
return response.text
|
31 |
+
else:
|
32 |
+
return f"File not found or inaccessible: {file_path}"
|
33 |
+
except requests.RequestException:
|
34 |
+
return f"Error fetching content for file: {file_path}"
|
35 |
+
|
36 |
+
def get_space_structure(space_id: str) -> Dict:
|
37 |
+
try:
|
38 |
+
files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
|
39 |
+
tree = {"type": "directory", "path": "", "name": space_id, "children": []}
|
40 |
+
for file in files:
|
41 |
+
path_parts = file.split('/')
|
42 |
+
current = tree
|
43 |
+
for i, part in enumerate(path_parts):
|
44 |
+
if i == len(path_parts) - 1: # νμΌ
|
45 |
+
current["children"].append({"type": "file", "path": file, "name": part})
|
46 |
+
else:
|
47 |
+
found = False
|
48 |
+
for child in current["children"]:
|
49 |
+
if child["type"] == "directory" and child["name"] == part:
|
50 |
+
current = child
|
51 |
+
found = True
|
52 |
+
break
|
53 |
+
if not found:
|
54 |
+
new_dir = {"type": "directory", "path": '/'.join(path_parts[:i+1]), "name": part, "children": []}
|
55 |
+
current["children"].append(new_dir)
|
56 |
+
current = new_dir
|
57 |
+
return tree
|
58 |
+
except Exception as e:
|
59 |
+
print(f"Error in get_space_structure: {str(e)}")
|
60 |
+
return {"error": f"API request error: {str(e)}"}
|
61 |
+
|
62 |
+
def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
|
63 |
+
if "error" in tree_data:
|
64 |
+
return tree_data["error"]
|
65 |
+
formatted = f"{indent}{'π' if tree_data.get('type') == 'directory' else 'π'} {tree_data.get('name', 'Unknown')}\n"
|
66 |
+
if tree_data.get("type") == "directory":
|
67 |
+
for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
|
68 |
+
formatted += format_tree_structure(child, indent + " ")
|
69 |
+
return formatted
|
70 |
+
|
71 |
+
def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
|
72 |
+
num_lines = len(code_content.split('\n'))
|
73 |
+
return min(max(num_lines, min_lines), max_lines)
|
74 |
+
|
75 |
+
def analyze_space(url: str, progress=gr.Progress()):
|
76 |
+
try:
|
77 |
+
space_id = url.split('spaces/')[-1]
|
78 |
+
if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
|
79 |
+
raise ValueError(f"Invalid Space ID format: {space_id}")
|
80 |
+
|
81 |
+
progress(0.1, desc="νμΌ κ΅¬μ‘° λΆμ μ€...")
|
82 |
+
tree_structure = get_space_structure(space_id)
|
83 |
+
if "error" in tree_structure:
|
84 |
+
raise ValueError(tree_structure["error"])
|
85 |
+
tree_view = format_tree_structure(tree_structure)
|
86 |
+
|
87 |
+
progress(0.3, desc="app.py λ΄μ© κ°μ Έμ€λ μ€...")
|
88 |
+
app_content = get_file_content(space_id, "app.py")
|
89 |
+
|
90 |
+
progress(0.5, desc="μ½λ μμ½ μ€...")
|
91 |
+
summary = summarize_code(app_content)
|
92 |
+
|
93 |
+
progress(0.7, desc="μ½λ λΆμ μ€...")
|
94 |
+
analysis = analyze_code(app_content)
|
95 |
+
|
96 |
+
progress(0.9, desc="μ¬μ©λ² μ€λͺ
μμ± μ€...")
|
97 |
+
usage = explain_usage(app_content)
|
98 |
+
|
99 |
+
lines_for_app_py = adjust_lines_for_code(app_content)
|
100 |
+
progress(1.0, desc="μλ£")
|
101 |
+
|
102 |
+
return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, lines_for_app_py
|
103 |
+
|
104 |
+
except Exception as e:
|
105 |
+
print(f"Error in analyze_space: {str(e)}")
|
106 |
+
print(traceback.format_exc())
|
107 |
+
return f"μ€λ₯κ° λ°μνμ΅λλ€: {str(e)}", "", None, "", "", "", "", 10
|
108 |
+
|
109 |
+
|
110 |
+
# --------------------------------------------------
|
111 |
+
# Gemini 2.0 Flash Thinking λͺ¨λΈ (LLM) ν¨μλ€
|
112 |
+
# --------------------------------------------------
|
113 |
+
from gradio import ChatMessage
|
114 |
+
|
115 |
+
def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
|
116 |
+
"""
|
117 |
+
ChatMessage λͺ©λ‘μ Gemini λͺ¨λΈμ΄ μ΄ν΄ν μ μλ νμμΌλ‘ λ³ν
|
118 |
+
(Thinking λ©νλ°μ΄ν°κ° μλ λ©μμ§λ 무μ)
|
119 |
+
"""
|
120 |
+
formatted = []
|
121 |
+
for m in messages:
|
122 |
+
if hasattr(m, "metadata") and m.metadata: # 'Thinking' λ©μμ§λ 무μ
|
123 |
+
continue
|
124 |
+
role = "assistant" if m.role == "assistant" else "user"
|
125 |
+
formatted.append({"role": role, "parts": [m.content or ""]})
|
126 |
+
return formatted
|
127 |
+
|
128 |
+
import google.generativeai as genai
|
129 |
+
|
130 |
+
def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
|
131 |
+
init_msgs = [
|
132 |
+
ChatMessage(role="system", content=system_message),
|
133 |
+
ChatMessage(role="user", content=user_message)
|
134 |
+
]
|
135 |
+
chat_history = format_chat_history(init_msgs)
|
136 |
+
chat = model.start_chat(history=chat_history)
|
137 |
+
final = ""
|
138 |
+
try:
|
139 |
+
for chunk in chat.send_message(user_message, stream=True):
|
140 |
+
parts = chunk.candidates[0].content.parts
|
141 |
+
if len(parts) == 2:
|
142 |
+
final += parts[1].text
|
143 |
+
else:
|
144 |
+
final += parts[0].text
|
145 |
+
return final.strip()
|
146 |
+
except Exception as e:
|
147 |
+
return f"LLM νΈμΆ μ€ μ€λ₯ λ°μ: {str(e)}"
|
148 |
+
|
149 |
+
|
150 |
+
def summarize_code(app_content: str):
|
151 |
+
system_msg = "λΉμ μ Python μ½λλ₯Ό λΆμνκ³ μμ½νλ AI μ‘°μμ
λλ€. μ£Όμ΄μ§ μ½λλ₯Ό 3μ€ μ΄λ΄λ‘ κ°κ²°νκ² μμ½ν΄μ£ΌμΈμ."
|
152 |
+
user_msg = f"λ€μ Python μ½λλ₯Ό 3μ€ μ΄λ΄λ‘ μμ½ν΄μ£ΌμΈμ:\n\n{app_content}"
|
153 |
+
try:
|
154 |
+
return gemini_chat_completion(system_msg, user_msg, max_tokens=200, temperature=0.7)
|
155 |
+
except Exception as e:
|
156 |
+
return f"μμ½ μμ± μ€ μ€λ₯ λ°μ: {str(e)}"
|
157 |
+
|
158 |
+
def analyze_code(app_content: str):
|
159 |
+
system_msg = (
|
160 |
+
"You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
|
161 |
+
"and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
|
162 |
+
"You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem. "
|
163 |
+
"λΉμ μ Python μ½λλ₯Ό λΆμνλ AI μ‘°μμ
λλ€. μ£Όμ΄μ§ μ½λλ₯Ό λΆμνμ¬ μλΉμ€μ ν¨μ©μ±κ³Ό νμ© μΈ‘λ©΄μμ λ€μ νλͺ©μ λν΄ μ€λͺ
ν΄μ£ΌμΈμ:\n"
|
164 |
+
"A. λ°°κ²½ λ° νμμ±\n"
|
165 |
+
"B. κΈ°λ₯μ ν¨μ©μ± λ° κ°μΉ\n"
|
166 |
+
"C. νΉμ₯μ \n"
|
167 |
+
"D. μ μ© λμ λ° νκ²\n"
|
168 |
+
"E. κΈ°λν¨κ³Ό\n"
|
169 |
+
"κΈ°μ‘΄ λ° μ μ¬ νλ‘μ νΈμ λΉκ΅νμ¬ λΆμν΄μ£ΌμΈμ. Markdown νμμΌλ‘ μΆλ ₯νμΈμ."
|
170 |
+
)
|
171 |
+
user_msg = f"λ€μ Python μ½λλ₯Ό λΆμν΄μ£ΌμΈμ:\n\n{app_content}"
|
172 |
+
try:
|
173 |
+
return gemini_chat_completion(system_msg, user_msg, max_tokens=1000, temperature=0.7)
|
174 |
+
except Exception as e:
|
175 |
+
return f"λΆμ μμ± μ€ μ€λ₯ λ°μ: {str(e)}"
|
176 |
+
|
177 |
+
def explain_usage(app_content: str):
|
178 |
+
system_msg = (
|
179 |
+
"You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
|
180 |
+
"and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
|
181 |
+
"You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem. "
|
182 |
+
"λΉμ μ Python μ½λλ₯Ό λΆμνμ¬ μ¬μ©λ²μ μ€λͺ
νλ AI μ‘°μμ
λλ€. μ£Όμ΄μ§ μ½λλ₯Ό λ°νμΌλ‘ λ§μΉ νλ©΄μ 보λ κ²μ²λΌ μ¬μ©λ²μ μμΈν μ€λͺ
ν΄μ£ΌμΈμ. Markdown νμμΌλ‘ μΆλ ₯νμΈμ."
|
183 |
+
)
|
184 |
+
user_msg = f"λ€μ Python μ½λλ₯Ό μ¬μ©λ²μ μ€λͺ
ν΄μ£ΌμΈμ:\n\n{app_content}"
|
185 |
+
try:
|
186 |
+
return gemini_chat_completion(system_msg, user_msg, max_tokens=800, temperature=0.7)
|
187 |
+
except Exception as e:
|
188 |
+
return f"μ¬μ©λ² μ€λͺ
μμ± μ€ μ€λ₯ λ°μ: {str(e)}"
|
189 |
+
|
190 |
+
def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
|
191 |
+
"""
|
192 |
+
Geminiμ μ€νΈλ¦¬λ° μμ².
|
193 |
+
- user_messageκ° λΉμ΄ μμΌλ©΄, μ΅μνμ μλ΄ λ©μμ§λ₯Ό assistantλ‘ μΆκ°νκ³ yield ν μ’
λ£
|
194 |
+
"""
|
195 |
+
if not user_message.strip():
|
196 |
+
# λΉ μ
λ ₯ μ²λ¦¬: μλ΄ λ©μμ§ νμ
|
197 |
+
conversation_state.append(
|
198 |
+
ChatMessage(
|
199 |
+
role="assistant",
|
200 |
+
content="μ
λ ₯μ΄ μμ΅λλ€. μ§λ¬Έμ μμ±ν΄μ£ΌμΈμ!"
|
201 |
+
)
|
202 |
+
)
|
203 |
+
yield conversation_state
|
204 |
+
return
|
205 |
+
|
206 |
+
print(f"\n=== New Request ===\nUser message: {user_message}")
|
207 |
+
chat_history = format_chat_history(conversation_state)
|
208 |
+
chat = model.start_chat(history=chat_history)
|
209 |
+
response = chat.send_message(user_message, stream=True)
|
210 |
+
|
211 |
+
thought_buffer = ""
|
212 |
+
response_buffer = ""
|
213 |
+
thinking_complete = False
|
214 |
+
|
215 |
+
conversation_state.append(
|
216 |
+
ChatMessage(
|
217 |
+
role="assistant",
|
218 |
+
content="",
|
219 |
+
metadata={"title": "βοΈ Thinking: *The thoughts produced by the model are experimental"}
|
220 |
+
)
|
221 |
+
)
|
222 |
+
|
223 |
+
try:
|
224 |
+
for chunk in response:
|
225 |
+
parts = chunk.candidates[0].content.parts
|
226 |
+
current_chunk = parts[0].text
|
227 |
+
|
228 |
+
if len(parts) == 2 and not thinking_complete:
|
229 |
+
thought_buffer += current_chunk
|
230 |
+
print(f"\n=== Complete Thought ===\n{thought_buffer}")
|
231 |
+
conversation_state[-1] = ChatMessage(
|
232 |
+
role="assistant",
|
233 |
+
content=thought_buffer,
|
234 |
+
metadata={"title": "βοΈ Thinking: *The thoughts produced by the model are experimental"}
|
235 |
+
)
|
236 |
+
yield conversation_state
|
237 |
+
|
238 |
+
response_buffer = parts[1].text
|
239 |
+
print(f"\n=== Starting Response ===\n{response_buffer}")
|
240 |
+
conversation_state.append(
|
241 |
+
ChatMessage(role="assistant", content=response_buffer)
|
242 |
+
)
|
243 |
+
thinking_complete = True
|
244 |
+
|
245 |
+
elif thinking_complete:
|
246 |
+
response_buffer += current_chunk
|
247 |
+
print(f"\n=== Response Chunk ===\n{current_chunk}")
|
248 |
+
conversation_state[-1] = ChatMessage(
|
249 |
+
role="assistant",
|
250 |
+
content=response_buffer
|
251 |
+
)
|
252 |
+
else:
|
253 |
+
thought_buffer += current_chunk
|
254 |
+
print(f"\n=== Thinking Chunk ===\n{current_chunk}")
|
255 |
+
conversation_state[-1] = ChatMessage(
|
256 |
+
role="assistant",
|
257 |
+
content=thought_buffer,
|
258 |
+
metadata={"title": "βοΈ Thinking: *The thoughts produced by the model are experimental"}
|
259 |
+
)
|
260 |
+
yield conversation_state
|
261 |
+
|
262 |
+
print(f"\n=== Final Response ===\n{response_buffer}")
|
263 |
+
|
264 |
+
except Exception as e:
|
265 |
+
print(f"\n=== Error ===\n{str(e)}")
|
266 |
+
conversation_state.append(
|
267 |
+
ChatMessage(
|
268 |
+
role="assistant",
|
269 |
+
content=f"I apologize, but encountered an error: {str(e)}"
|
270 |
+
)
|
271 |
+
)
|
272 |
+
yield conversation_state
|
273 |
+
|
274 |
+
def convert_for_messages_format(messages: List[ChatMessage]) -> List[Dict[str, str]]:
|
275 |
+
"""
|
276 |
+
ChatMessage 리μ€νΈ -> [{"role":"assistant"/"user", "content":"..."}]
|
277 |
+
"""
|
278 |
+
output = []
|
279 |
+
for msg in messages:
|
280 |
+
output.append({"role": msg.role, "content": msg.content})
|
281 |
+
return output
|
282 |
+
|
283 |
+
def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
|
284 |
+
conversation_state.append(ChatMessage(role="user", content=msg))
|
285 |
+
return "", conversation_state
|
286 |
+
|
287 |
+
def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
|
288 |
+
# λ§μ§λ§ μ¬μ©μ λ©μμ§ κ°μ Έμ€κΈ°
|
289 |
+
last_user_message = ""
|
290 |
+
for msg in reversed(conversation_state):
|
291 |
+
if msg.role == "user":
|
292 |
+
last_user_message = msg.content
|
293 |
+
break
|
294 |
+
|
295 |
+
# λ§μ§λ§ μ¬μ©μ λ©μμ§λ‘ μλ΅ μμ±
|
296 |
+
for updated_messages in stream_gemini_response(last_user_message, conversation_state):
|
297 |
+
yield "", convert_for_messages_format(updated_messages)
|
298 |
+
|
299 |
+
def create_ui():
|
300 |
+
try:
|
301 |
+
css = """
|
302 |
+
footer {visibility: hidden;}
|
303 |
+
"""
|
304 |
+
|
305 |
+
with gr.Blocks(css=css) as demo:
|
306 |
+
gr.Markdown("# MOUSE: Space Research Thinking")
|
307 |
+
|
308 |
+
with gr.Tabs():
|
309 |
+
with gr.TabItem("λΆμ"):
|
310 |
+
with gr.Row():
|
311 |
+
with gr.Column():
|
312 |
+
url_input = gr.Textbox(label="HuggingFace Space URL")
|
313 |
+
analyze_button = gr.Button("λΆμ")
|
314 |
+
|
315 |
+
summary_output = gr.Markdown(label="μμ½")
|
316 |
+
analysis_output = gr.Markdown(label="λΆμ")
|
317 |
+
usage_output = gr.Markdown(label="μ¬μ©λ²")
|
318 |
+
tree_view_output = gr.Textbox(label="νμΌ κ΅¬μ‘°", lines=20)
|
319 |
+
|
320 |
+
with gr.Column():
|
321 |
+
code_tabs = gr.Tabs()
|
322 |
+
with code_tabs:
|
323 |
+
with gr.TabItem("app.py"):
|
324 |
+
app_py_content = gr.Code(
|
325 |
+
language="python",
|
326 |
+
label="app.py",
|
327 |
+
lines=50
|
328 |
+
)
|
329 |
+
with gr.TabItem("requirements.txt"):
|
330 |
+
requirements_content = gr.Textbox(
|
331 |
+
label="requirements.txt",
|
332 |
+
lines=50
|
333 |
+
)
|
334 |
+
|
335 |
+
with gr.TabItem("AI μ½λμ±"):
|
336 |
+
gr.Markdown("## μμ λ₯Ό μ
λ ₯ λλ μμ€ μ½λλ₯Ό λΆμ¬λ£κ³ μ§λ¬ΈνμΈμ")
|
337 |
+
|
338 |
+
# Chatbot: type="messages"
|
339 |
+
chatbot = gr.Chatbot(
|
340 |
+
label="λν",
|
341 |
+
height=400,
|
342 |
+
type="messages"
|
343 |
+
)
|
344 |
+
|
345 |
+
msg = gr.Textbox(
|
346 |
+
label="λ©μμ§",
|
347 |
+
placeholder="λ©μμ§λ₯Ό μ
λ ₯νμΈμ..."
|
348 |
+
)
|
349 |
+
|
350 |
+
max_tokens = gr.Slider(
|
351 |
+
minimum=1, maximum=8000,
|
352 |
+
value=4000, label="Max Tokens",
|
353 |
+
visible=False
|
354 |
+
)
|
355 |
+
temperature = gr.Slider(
|
356 |
+
minimum=0, maximum=1,
|
357 |
+
value=0.7, label="Temperature",
|
358 |
+
visible=False
|
359 |
+
)
|
360 |
+
top_p = gr.Slider(
|
361 |
+
minimum=0, maximum=1,
|
362 |
+
value=0.9, label="Top P",
|
363 |
+
visible=False
|
364 |
+
)
|
365 |
+
|
366 |
+
examples = [
|
367 |
+
["μμΈν μ¬μ© λ°©λ²μ 4000 ν ν° μ΄μ μμΈν μ€λͺ
"],
|
368 |
+
["FAQ 20건μ 4000 ν ν° μ΄μ μμ±"],
|
369 |
+
["κΈ°μ μ°¨λ³μ , κ°μ μ μ€μ¬μΌλ‘ 4000 ν ν° μ΄μ μ€λͺ
"],
|
370 |
+
["νΉν μΆμμ νμ© κ°λ₯ν νμ μμ΄λμ΄λ₯Ό 4000 ν ν° μ΄μ μμ±"],
|
371 |
+
["λ
Όλ¬Έ νμμΌλ‘ 4000 ν ν° μ΄μ μμ±"],
|
372 |
+
["κ³μ μ΄μ΄μ λ΅λ³νλΌ"]
|
373 |
+
]
|
374 |
+
gr.Examples(examples, inputs=msg)
|
375 |
+
|
376 |
+
conversation_state = gr.State([])
|
377 |
+
|
378 |
+
msg.submit(
|
379 |
+
user_submit_message,
|
380 |
+
inputs=[msg, conversation_state],
|
381 |
+
outputs=[msg, conversation_state],
|
382 |
+
queue=False
|
383 |
+
).then(
|
384 |
+
respond_wrapper,
|
385 |
+
inputs=[msg, conversation_state, max_tokens, temperature, top_p],
|
386 |
+
outputs=[msg, chatbot],
|
387 |
+
)
|
388 |
+
|
389 |
+
with gr.TabItem("Recommended Best"):
|
390 |
+
gr.Markdown(
|
391 |
+
"Discover recommended HuggingFace Spaces [here](https://huggingface.co/spaces/openfree/Korean-Leaderboard)."
|
392 |
+
)
|
393 |
+
|
394 |
+
# λΆμ ν λ‘μ§
|
395 |
+
space_id_state = gr.State()
|
396 |
+
tree_structure_state = gr.State()
|
397 |
+
app_py_content_lines = gr.State()
|
398 |
+
|
399 |
+
analyze_button.click(
|
400 |
+
analyze_space,
|
401 |
+
inputs=[url_input],
|
402 |
+
outputs=[
|
403 |
+
app_py_content,
|
404 |
+
tree_view_output,
|
405 |
+
tree_structure_state,
|
406 |
+
space_id_state,
|
407 |
+
summary_output,
|
408 |
+
analysis_output,
|
409 |
+
usage_output,
|
410 |
+
app_py_content_lines
|
411 |
+
]
|
412 |
+
).then(
|
413 |
+
lambda space_id: get_file_content(space_id, "requirements.txt"),
|
414 |
+
inputs=[space_id_state],
|
415 |
+
outputs=[requirements_content]
|
416 |
+
).then(
|
417 |
+
lambda lines: gr.update(lines=lines),
|
418 |
+
inputs=[app_py_content_lines],
|
419 |
+
outputs=[app_py_content]
|
420 |
+
)
|
421 |
+
|
422 |
+
return demo
|
423 |
+
|
424 |
+
except Exception as e:
|
425 |
+
print(f"Error in create_ui: {str(e)}")
|
426 |
+
print(traceback.format_exc())
|
427 |
+
raise
|
428 |
+
|
429 |
+
if __name__ == "__main__":
|
430 |
+
try:
|
431 |
+
print("Starting HuggingFace Space Analyzer...")
|
432 |
+
demo = create_ui()
|
433 |
+
print("UI created successfully.")
|
434 |
+
print("Configuring Gradio queue...")
|
435 |
+
demo.queue()
|
436 |
+
print("Gradio queue configured.")
|
437 |
+
print("Launching Gradio app...")
|
438 |
+
demo.launch(
|
439 |
+
server_name="0.0.0.0",
|
440 |
+
server_port=7860,
|
441 |
+
share=False,
|
442 |
+
debug=True,
|
443 |
+
show_api=False
|
444 |
+
)
|
445 |
+
print("Gradio app launched successfully.")
|
446 |
+
except Exception as e:
|
447 |
+
print(f"Error in main: {str(e)}")
|
448 |
+
print("Detailed error information:")
|
449 |
+
print(traceback.format_exc())
|
450 |
+
raise
|