ginipick commited on
Commit
10e03de
Β·
verified Β·
1 Parent(s): 5c58372

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -66
app.py CHANGED
@@ -8,11 +8,11 @@ import requests
8
  import re
9
  import traceback
10
 
11
- # HuggingFace κ΄€λ ¨ API ν‚€ (슀페이슀 뢄석 용)
12
  HF_TOKEN = os.getenv("HF_TOKEN")
13
  hf_api = HfApi(token=HF_TOKEN)
14
 
15
- # Gemini 2.0 Flash Thinking λͺ¨λΈ κ΄€λ ¨ API ν‚€ 및 ν΄λΌμ΄μ–ΈνŠΈ (LLM 용)
16
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
17
  genai.configure(api_key=GEMINI_API_KEY)
18
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
@@ -41,7 +41,7 @@ def get_space_structure(space_id: str) -> Dict:
41
  path_parts = file.split('/')
42
  current = tree
43
  for i, part in enumerate(path_parts):
44
- if i == len(path_parts) - 1: # 파일
45
  current["children"].append({"type": "file", "path": file, "name": part})
46
  else:
47
  found = False
@@ -78,55 +78,52 @@ def analyze_space(url: str, progress=gr.Progress()):
78
  if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
79
  raise ValueError(f"Invalid Space ID format: {space_id}")
80
 
81
- progress(0.1, desc="πŸ“ 파일 ꡬ쑰 뢄석 쀑...")
82
  tree_structure = get_space_structure(space_id)
83
  if "error" in tree_structure:
84
  raise ValueError(tree_structure["error"])
85
  tree_view = format_tree_structure(tree_structure)
86
 
87
- progress(0.3, desc="πŸ“„ app.py λ‚΄μš© κ°€μ Έμ˜€λŠ” 쀑...")
88
  app_content = get_file_content(space_id, "app.py")
89
 
90
- progress(0.5, desc="✏️ μ½”λ“œ μš”μ•½ 쀑...")
91
  summary = summarize_code(app_content)
92
 
93
- progress(0.7, desc="πŸ” μ½”λ“œ 뢄석 쀑...")
94
  analysis = analyze_code(app_content)
95
 
96
- progress(0.9, desc="πŸ“š μ‚¬μš©λ²• μ„€λͺ… 생성 쀑...")
97
  usage = explain_usage(app_content)
98
 
99
  lines_for_app_py = adjust_lines_for_code(app_content)
100
- progress(1.0, desc="βœ… μ™„λ£Œ")
101
 
102
  return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, lines_for_app_py
103
 
104
  except Exception as e:
105
  print(f"Error in analyze_space: {str(e)}")
106
  print(traceback.format_exc())
107
- return f"였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€: {str(e)}", "", None, "", "", "", "", 10
108
-
109
 
110
  # --------------------------------------------------
111
- # Gemini 2.0 Flash Thinking λͺ¨λΈ (LLM) ν•¨μˆ˜λ“€
112
  # --------------------------------------------------
113
  from gradio import ChatMessage
114
 
115
  def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
116
  """
117
- ChatMessage λͺ©λ‘μ„ Gemini λͺ¨λΈμ΄ 이해할 수 μžˆλŠ” ν˜•μ‹μœΌλ‘œ λ³€ν™˜
118
- (Thinking 메타데이터가 μžˆλŠ” λ©”μ‹œμ§€λŠ” λ¬΄μ‹œ)
119
  """
120
  formatted = []
121
  for m in messages:
122
- if hasattr(m, "metadata") and m.metadata: # 'Thinking' λ©”μ‹œμ§€λŠ” λ¬΄μ‹œ
123
  continue
124
  role = "assistant" if m.role == "assistant" else "user"
125
  formatted.append({"role": role, "parts": [m.content or ""]})
126
  return formatted
127
 
128
- import google.generativeai as genai
129
-
130
  def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
131
  init_msgs = [
132
  ChatMessage(role="system", content=system_message),
@@ -144,60 +141,52 @@ def gemini_chat_completion(system_message: str, user_message: str, max_tokens: i
144
  final += parts[0].text
145
  return final.strip()
146
  except Exception as e:
147
- return f"LLM 호좜 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
148
-
149
 
150
  def summarize_code(app_content: str):
151
- system_msg = "당신은 Python μ½”λ“œλ₯Ό λΆ„μ„ν•˜κ³  μš”μ•½ν•˜λŠ” AI μ‘°μˆ˜μž…λ‹ˆλ‹€. 주어진 μ½”λ“œλ₯Ό 3쀄 μ΄λ‚΄λ‘œ κ°„κ²°ν•˜κ²Œ μš”μ•½ν•΄μ£Όμ„Έμš”."
152
- user_msg = f"λ‹€μŒ Python μ½”λ“œλ₯Ό 3쀄 μ΄λ‚΄λ‘œ μš”μ•½ν•΄μ£Όμ„Έμš”:\n\n{app_content}"
153
  try:
154
  return gemini_chat_completion(system_msg, user_msg, max_tokens=200, temperature=0.7)
155
  except Exception as e:
156
- return f"μš”μ•½ 생성 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
157
 
158
  def analyze_code(app_content: str):
159
  system_msg = (
160
- "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
161
- "and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
162
- "You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem. "
163
- "당신은 Python μ½”λ“œλ₯Ό λΆ„μ„ν•˜λŠ” AI μ‘°μˆ˜μž…λ‹ˆλ‹€. 주어진 μ½”λ“œλ₯Ό λΆ„μ„ν•˜μ—¬ μ„œλΉ„μŠ€μ˜ νš¨μš©μ„±κ³Ό ν™œμš© μΈ‘λ©΄μ—μ„œ λ‹€μŒ ν•­λͺ©μ— λŒ€ν•΄ μ„€λͺ…ν•΄μ£Όμ„Έμš”:\n"
164
- "A. λ°°κ²½ 및 ν•„μš”μ„±\n"
165
- "B. κΈ°λŠ₯적 νš¨μš©μ„± 및 κ°€μΉ˜\n"
166
- "C. 특μž₯점\n"
167
- "D. 적용 λŒ€μƒ 및 νƒ€κ²Ÿ\n"
168
- "E. κΈ°λŒ€νš¨κ³Ό\n"
169
- "κΈ°μ‘΄ 및 μœ μ‚¬ ν”„λ‘œμ νŠΈμ™€ λΉ„κ΅ν•˜μ—¬ λΆ„μ„ν•΄μ£Όμ„Έμš”. Markdown ν˜•μ‹μœΌλ‘œ 좜λ ₯ν•˜μ„Έμš”."
170
  )
171
- user_msg = f"λ‹€μŒ Python μ½”λ“œλ₯Ό λΆ„μ„ν•΄μ£Όμ„Έμš”:\n\n{app_content}"
172
  try:
173
  return gemini_chat_completion(system_msg, user_msg, max_tokens=1000, temperature=0.7)
174
  except Exception as e:
175
- return f"뢄석 생성 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
176
 
177
  def explain_usage(app_content: str):
178
  system_msg = (
179
- "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
180
- "and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
181
- "You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem. "
182
- "당신은 Python μ½”λ“œλ₯Ό λΆ„μ„ν•˜μ—¬ μ‚¬μš©λ²•μ„ μ„€λͺ…ν•˜λŠ” AI μ‘°μˆ˜μž…λ‹ˆλ‹€. 주어진 μ½”λ“œλ₯Ό λ°”νƒ•μœΌλ‘œ 마치 화면을 λ³΄λŠ” κ²ƒμ²˜λŸΌ μ‚¬μš©λ²•μ„ μƒμ„Ένžˆ μ„€λͺ…ν•΄μ£Όμ„Έμš”. Markdown ν˜•μ‹μœΌλ‘œ 좜λ ₯ν•˜μ„Έμš”."
183
  )
184
- user_msg = f"λ‹€μŒ Python μ½”λ“œλ₯Ό μ‚¬μš©λ²•μ„ μ„€λͺ…ν•΄μ£Όμ„Έμš”:\n\n{app_content}"
185
  try:
186
  return gemini_chat_completion(system_msg, user_msg, max_tokens=800, temperature=0.7)
187
  except Exception as e:
188
- return f"μ‚¬μš©λ²• μ„€λͺ… 생성 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
189
 
190
  def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
191
  """
192
- Gemini에 슀트리밍 μš”μ²­.
193
- - user_messageκ°€ λΉ„μ–΄ 있으면, μ΅œμ†Œν•œμ˜ μ•ˆλ‚΄ λ©”μ‹œμ§€λ₯Ό assistant둜 μΆ”κ°€ν•˜κ³  yield ν›„ μ’…λ£Œ
194
  """
195
  if not user_message.strip():
196
- # 빈 μž…λ ₯ 처리: μ•ˆλ‚΄ λ©”μ‹œμ§€ ν‘œμ‹œ
197
  conversation_state.append(
198
  ChatMessage(
199
  role="assistant",
200
- content="μž…λ ₯이 μ—†μŠ΅λ‹ˆλ‹€. μ§ˆλ¬Έμ„ μž‘μ„±ν•΄μ£Όμ„Έμš”!"
201
  )
202
  )
203
  yield conversation_state
@@ -273,7 +262,7 @@ def stream_gemini_response(user_message: str, conversation_state: List[ChatMessa
273
 
274
  def convert_for_messages_format(messages: List[ChatMessage]) -> List[Dict[str, str]]:
275
  """
276
- ChatMessage 리슀트 -> [{"role":"assistant"/"user", "content":"..."}]
277
  """
278
  output = []
279
  for msg in messages:
@@ -285,14 +274,14 @@ def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
285
  return "", conversation_state
286
 
287
  def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
288
- # λ§ˆμ§€λ§‰ μ‚¬μš©μž λ©”μ‹œμ§€ κ°€μ Έμ˜€κΈ°
289
  last_user_message = ""
290
  for msg in reversed(conversation_state):
291
  if msg.role == "user":
292
  last_user_message = msg.content
293
  break
294
 
295
- # λ§ˆμ§€λ§‰ μ‚¬μš©μž λ©”μ‹œμ§€λ‘œ 응닡 생성
296
  for updated_messages in stream_gemini_response(last_user_message, conversation_state):
297
  yield "", convert_for_messages_format(updated_messages)
298
 
@@ -320,16 +309,16 @@ def create_ui():
320
  gr.Markdown("# πŸš€ MOUSE: Space Research Thinking")
321
 
322
  with gr.Tabs():
323
- with gr.TabItem("πŸ” 뢄석"):
324
  with gr.Row():
325
  with gr.Column():
326
- url_input = gr.Textbox(label="πŸ”— HuggingFace Space URL", placeholder="예: https://huggingface.co/spaces/username/space-name")
327
- analyze_button = gr.Button("뢄석 μ‹œμž‘ πŸš€", variant="primary")
328
 
329
- summary_output = gr.Markdown(label="πŸ“ μ½”λ“œ μš”μ•½")
330
- analysis_output = gr.Markdown(label="πŸ” μ½”λ“œ 뢄석")
331
- usage_output = gr.Markdown(label="πŸ“š μ‚¬μš©λ²• μ•ˆλ‚΄")
332
- tree_view_output = gr.Textbox(label="πŸ“ 파일 ꡬ쑰", lines=20)
333
 
334
  with gr.Column():
335
  code_tabs = gr.Tabs()
@@ -346,16 +335,16 @@ def create_ui():
346
  lines=50
347
  )
348
 
349
- with gr.TabItem("πŸ€– AI μ½”λ“œμ±—"):
350
- gr.Markdown("## πŸ’¬ 예제λ₯Ό μž…λ ₯ν•˜κ±°λ‚˜ μ†ŒμŠ€ μ½”λ“œλ₯Ό λΆ™μ—¬λ„£κ³  μ§ˆλ¬Έν•΄λ³΄μ„Έμš”!")
351
  chatbot = gr.Chatbot(
352
- label="λŒ€ν™”μ°½",
353
  height=400,
354
  type="messages"
355
  )
356
  msg = gr.Textbox(
357
- label="λ©”μ‹œμ§€ μž…λ ₯",
358
- placeholder="λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”..."
359
  )
360
  max_tokens = gr.Slider(
361
  minimum=1, maximum=8000,
@@ -374,12 +363,12 @@ def create_ui():
374
  )
375
 
376
  examples = [
377
- ["μƒμ„Έν•œ μ‚¬μš© 방법을 4000 토큰 이상 μƒμ„Ένžˆ μ„€λͺ…"],
378
- ["FAQ 20건을 4000 토큰 이상 μž‘μ„±"],
379
- ["기술 차별점, 강점을 μ€‘μ‹¬μœΌλ‘œ 4000 토큰 이상 μ„€λͺ…"],
380
- ["νŠΉν—ˆ μΆœμ›μ— ν™œμš© κ°€λŠ₯ν•œ ν˜μ‹  아이디어λ₯Ό 4000 토큰 이상 μž‘μ„±"],
381
- ["λ…Όλ¬Έ ν˜•μ‹μœΌλ‘œ 4000 토큰 이상 μž‘μ„±"],
382
- ["계속 μ΄μ–΄μ„œ λ‹΅λ³€ν•˜λΌ"]
383
  ]
384
  gr.Examples(examples, inputs=msg)
385
 
@@ -401,7 +390,7 @@ def create_ui():
401
  "Discover recommended HuggingFace Spaces [here](https://huggingface.co/spaces/openfree/Korean-Leaderboard)."
402
  )
403
 
404
- # 뢄석 νƒ­ 둜직
405
  space_id_state = gr.State()
406
  tree_structure_state = gr.State()
407
  app_py_content_lines = gr.State()
 
8
  import re
9
  import traceback
10
 
11
+ # HuggingFace API key for space analysis
12
  HF_TOKEN = os.getenv("HF_TOKEN")
13
  hf_api = HfApi(token=HF_TOKEN)
14
 
15
+ # Gemini 2.0 Flash Thinking model API key and client (for LLM)
16
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
17
  genai.configure(api_key=GEMINI_API_KEY)
18
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
 
41
  path_parts = file.split('/')
42
  current = tree
43
  for i, part in enumerate(path_parts):
44
+ if i == len(path_parts) - 1: # file
45
  current["children"].append({"type": "file", "path": file, "name": part})
46
  else:
47
  found = False
 
78
  if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
79
  raise ValueError(f"Invalid Space ID format: {space_id}")
80
 
81
+ progress(0.1, desc="Analyzing file structure...")
82
  tree_structure = get_space_structure(space_id)
83
  if "error" in tree_structure:
84
  raise ValueError(tree_structure["error"])
85
  tree_view = format_tree_structure(tree_structure)
86
 
87
+ progress(0.3, desc="Fetching app.py content...")
88
  app_content = get_file_content(space_id, "app.py")
89
 
90
+ progress(0.5, desc="Summarizing code...")
91
  summary = summarize_code(app_content)
92
 
93
+ progress(0.7, desc="Analyzing code...")
94
  analysis = analyze_code(app_content)
95
 
96
+ progress(0.9, desc="Generating usage instructions...")
97
  usage = explain_usage(app_content)
98
 
99
  lines_for_app_py = adjust_lines_for_code(app_content)
100
+ progress(1.0, desc="Complete")
101
 
102
  return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, lines_for_app_py
103
 
104
  except Exception as e:
105
  print(f"Error in analyze_space: {str(e)}")
106
  print(traceback.format_exc())
107
+ return f"An error occurred: {str(e)}", "", None, "", "", "", "", 10
 
108
 
109
  # --------------------------------------------------
110
+ # Gemini 2.0 Flash Thinking model (LLM) functions
111
  # --------------------------------------------------
112
  from gradio import ChatMessage
113
 
114
  def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
115
  """
116
+ Convert a list of ChatMessages to a format that the Gemini model can understand.
117
+ (Skip messages with 'Thinking' metadata)
118
  """
119
  formatted = []
120
  for m in messages:
121
+ if hasattr(m, "metadata") and m.metadata: # Skip 'Thinking' messages
122
  continue
123
  role = "assistant" if m.role == "assistant" else "user"
124
  formatted.append({"role": role, "parts": [m.content or ""]})
125
  return formatted
126
 
 
 
127
  def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
128
  init_msgs = [
129
  ChatMessage(role="system", content=system_message),
 
141
  final += parts[0].text
142
  return final.strip()
143
  except Exception as e:
144
+ return f"Error calling LLM: {str(e)}"
 
145
 
146
  def summarize_code(app_content: str):
147
+ system_msg = "You are an AI assistant that analyzes and summarizes Python code. Please summarize the provided code in no more than 3 lines."
148
+ user_msg = f"Please summarize the following Python code in no more than 3 lines:\n\n{app_content}"
149
  try:
150
  return gemini_chat_completion(system_msg, user_msg, max_tokens=200, temperature=0.7)
151
  except Exception as e:
152
+ return f"Error generating summary: {str(e)}"
153
 
154
  def analyze_code(app_content: str):
155
  system_msg = (
156
+ "You are an AI assistant that analyzes Python code. Please analyze the provided code in terms of its service utility and application with respect to the following aspects:\n"
157
+ "A. Background and Necessity\n"
158
+ "B. Functional Utility and Value\n"
159
+ "C. Key Features\n"
160
+ "D. Target Audience\n"
161
+ "E. Expected Impact\n"
162
+ "Please also compare with existing and similar projects. Output in Markdown format."
 
 
 
163
  )
164
+ user_msg = f"Please analyze the following Python code:\n\n{app_content}"
165
  try:
166
  return gemini_chat_completion(system_msg, user_msg, max_tokens=1000, temperature=0.7)
167
  except Exception as e:
168
+ return f"Error generating analysis: {str(e)}"
169
 
170
  def explain_usage(app_content: str):
171
  system_msg = (
172
+ "You are an AI assistant that analyzes Python code to explain its usage. Based on the provided code, please describe how to use it as if you were viewing the interface. Output in Markdown format."
 
 
 
173
  )
174
+ user_msg = f"Please explain how to use the following Python code:\n\n{app_content}"
175
  try:
176
  return gemini_chat_completion(system_msg, user_msg, max_tokens=800, temperature=0.7)
177
  except Exception as e:
178
+ return f"Error generating usage instructions: {str(e)}"
179
 
180
  def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
181
  """
182
+ Send a streaming request to Gemini.
183
+ If the user_message is empty, append a minimal guidance message from the assistant and yield.
184
  """
185
  if not user_message.strip():
 
186
  conversation_state.append(
187
  ChatMessage(
188
  role="assistant",
189
+ content="No input provided. Please enter a question!"
190
  )
191
  )
192
  yield conversation_state
 
262
 
263
  def convert_for_messages_format(messages: List[ChatMessage]) -> List[Dict[str, str]]:
264
  """
265
+ Convert a list of ChatMessages to the format [{"role": "assistant"/"user", "content": "..."}].
266
  """
267
  output = []
268
  for msg in messages:
 
274
  return "", conversation_state
275
 
276
  def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
277
+ # Get the last user message
278
  last_user_message = ""
279
  for msg in reversed(conversation_state):
280
  if msg.role == "user":
281
  last_user_message = msg.content
282
  break
283
 
284
+ # Generate response based on the last user message
285
  for updated_messages in stream_gemini_response(last_user_message, conversation_state):
286
  yield "", convert_for_messages_format(updated_messages)
287
 
 
309
  gr.Markdown("# πŸš€ MOUSE: Space Research Thinking")
310
 
311
  with gr.Tabs():
312
+ with gr.TabItem("πŸ” Analysis"):
313
  with gr.Row():
314
  with gr.Column():
315
+ url_input = gr.Textbox(label="πŸ”— HuggingFace Space URL", placeholder="e.g.: https://huggingface.co/spaces/username/space-name")
316
+ analyze_button = gr.Button("Start Analysis πŸš€", variant="primary")
317
 
318
+ summary_output = gr.Markdown(label="πŸ“ Code Summary")
319
+ analysis_output = gr.Markdown(label="πŸ” Code Analysis")
320
+ usage_output = gr.Markdown(label="πŸ“š Usage Instructions")
321
+ tree_view_output = gr.Textbox(label="πŸ“ File Structure", lines=20)
322
 
323
  with gr.Column():
324
  code_tabs = gr.Tabs()
 
335
  lines=50
336
  )
337
 
338
+ with gr.TabItem("πŸ€– AI Code Chat"):
339
+ gr.Markdown("## πŸ’¬ Enter an example or paste your source code and ask your question!")
340
  chatbot = gr.Chatbot(
341
+ label="Chat Window",
342
  height=400,
343
  type="messages"
344
  )
345
  msg = gr.Textbox(
346
+ label="Enter your message",
347
+ placeholder="Type your message here..."
348
  )
349
  max_tokens = gr.Slider(
350
  minimum=1, maximum=8000,
 
363
  )
364
 
365
  examples = [
366
+ ["Explain detailed usage instructions in over 4000 tokens"],
367
+ ["Generate 20 FAQs in over 4000 tokens"],
368
+ ["Describe technical differentiators and strengths in over 4000 tokens"],
369
+ ["Generate innovative ideas for patent applications in over 4000 tokens"],
370
+ ["Write an academic paper in over 4000 tokens"],
371
+ ["Continue your answer"]
372
  ]
373
  gr.Examples(examples, inputs=msg)
374
 
 
390
  "Discover recommended HuggingFace Spaces [here](https://huggingface.co/spaces/openfree/Korean-Leaderboard)."
391
  )
392
 
393
+ # Analysis tab logic
394
  space_id_state = gr.State()
395
  tree_structure_state = gr.State()
396
  app_py_content_lines = gr.State()