Spaces:
Sleeping
Sleeping
Update supplemental.py
Browse files- supplemental.py +120 -202
supplemental.py
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
import os
|
2 |
import json
|
3 |
import logging
|
4 |
-
from typing import List, Dict,
|
5 |
-
|
6 |
-
from
|
7 |
-
from huggingface_hub import HfApi, InferenceApi
|
8 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
9 |
|
|
|
10 |
@dataclass
|
11 |
class ProjectConfig:
|
12 |
name: str
|
@@ -59,6 +59,25 @@ class JavaScriptGenerator(WebDevelopmentTool):
|
|
59 |
js += "}\n\n"
|
60 |
return js
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
class EnhancedAIAgent:
|
63 |
def __init__(self, name: str, description: str, skills: List[str], model_name: str):
|
64 |
self.name = name
|
@@ -69,41 +88,28 @@ class EnhancedAIAgent:
|
|
69 |
self.css_gen_tool = CSSGenerator()
|
70 |
self.js_gen_tool = JavaScriptGenerator()
|
71 |
self.hf_api = HfApi()
|
72 |
-
self.
|
73 |
-
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
74 |
-
self.model = AutoModelForCausalLM.from_pretrained(model_name)
|
75 |
-
self.text_generation = pipeline("text-generation", model=self.model, tokenizer=self.tokenizer)
|
76 |
self.logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
def generate_agent_response(self, prompt: str) -> str:
|
79 |
try:
|
80 |
-
response = self.
|
81 |
-
|
|
|
82 |
except Exception as e:
|
83 |
-
self.logger.error(f"Error generating response: {str(e)}")
|
84 |
return f"Error: Unable to generate response. {str(e)}"
|
85 |
|
86 |
-
def
|
87 |
-
|
88 |
-
for directory, files in project_config.structure.items():
|
89 |
-
for file in files:
|
90 |
-
file_path = os.path.join(directory, file)
|
91 |
-
if file.endswith('.html'):
|
92 |
-
content = self.html_gen_tool.generate_code({"body": f"<h1>{project_config.name}</h1>"})
|
93 |
-
elif file.endswith('.css'):
|
94 |
-
content = self.css_gen_tool.generate_code({"body": {"font-family": "Arial, sans-serif"}})
|
95 |
-
elif file.endswith('.js'):
|
96 |
-
content = self.js_gen_tool.generate_code([{"name": "init", "params": [], "body": "console.log('Initialized');"}])
|
97 |
-
else:
|
98 |
-
content = f"// TODO: Implement {file}"
|
99 |
-
project_files[file_path] = content
|
100 |
-
return project_files
|
101 |
-
|
102 |
-
import json
|
103 |
-
from json.decoder import JSONDecodeError
|
104 |
-
|
105 |
-
def generate_project_config(self, project_description: str) -> Optional[ProjectConfig]:
|
106 |
-
prompt = f"""
|
107 |
Based on the following project description, generate a ProjectConfig object:
|
108 |
|
109 |
Description: {project_description}
|
@@ -116,185 +122,97 @@ The ProjectConfig should include:
|
|
116 |
|
117 |
Respond with a JSON object representing the ProjectConfig.
|
118 |
"""
|
119 |
-
|
120 |
|
121 |
-
try:
|
122 |
-
# Try to find and extract a JSON object from the response
|
123 |
-
json_start = response.find('{')
|
124 |
-
json_end = response.rfind('}') + 1
|
125 |
-
if json_start != -1 and json_end != -1:
|
126 |
-
json_str = response[json_start:json_end]
|
127 |
-
config_dict = json.loads(json_str)
|
128 |
-
return ProjectConfig(**config_dict)
|
129 |
-
else:
|
130 |
-
raise ValueError("No JSON object found in the response")
|
131 |
-
except (JSONDecodeError, ValueError) as e:
|
132 |
-
self.logger.error(f"Error parsing JSON from response: {str(e)}")
|
133 |
-
self.logger.error(f"Full response from model: {response}")
|
134 |
-
|
135 |
-
# Attempt to salvage partial information
|
136 |
try:
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
return None
|
145 |
|
146 |
-
def
|
147 |
-
|
148 |
-
|
149 |
-
description = self.extract_field(response, "description")
|
150 |
-
technologies = self.extract_list(response, "technologies")
|
151 |
-
structure = self.extract_dict(response, "structure")
|
152 |
-
|
153 |
-
if name and description:
|
154 |
-
return ProjectConfig(
|
155 |
-
name=name,
|
156 |
-
description=description,
|
157 |
-
technologies=technologies or [],
|
158 |
-
structure=structure or {}
|
159 |
-
)
|
160 |
-
return None
|
161 |
-
|
162 |
-
def extract_field(self, text: str, field: str) -> Optional[str]:
|
163 |
-
"""Extract a simple field value from text."""
|
164 |
-
match = re.search(rf'"{field}"\s*:\s*"([^"]*)"', text)
|
165 |
-
return match.group(1) if match else None
|
166 |
-
|
167 |
-
def extract_list(self, text: str, field: str) -> Optional[List[str]]:
|
168 |
-
"""Extract a list from text."""
|
169 |
-
match = re.search(rf'"{field}"\s*:\s*\[(.*?)\]', text, re.DOTALL)
|
170 |
-
if match:
|
171 |
-
items = re.findall(r'"([^"]*)"', match.group(1))
|
172 |
-
return items
|
173 |
-
return None
|
174 |
-
|
175 |
-
def extract_dict(self, text: str, field: str) -> Optional[Dict[str, List[str]]]:
|
176 |
-
"""Extract a dictionary from text."""
|
177 |
-
match = re.search(rf'"{field}"\s*:\s*\{{(.*?)\}}', text, re.DOTALL)
|
178 |
-
if match:
|
179 |
-
dict_str = match.group(1)
|
180 |
-
result = {}
|
181 |
-
for item in re.finditer(r'"([^"]*)"\s*:\s*\[(.*?)\]', dict_str, re.DOTALL):
|
182 |
-
key = item.group(1)
|
183 |
-
values = re.findall(r'"([^"]*)"', item.group(2))
|
184 |
-
result[key] = values
|
185 |
-
return result
|
186 |
-
return None
|
187 |
-
|
188 |
-
def implement_feature(self, feature_description: str, existing_code: Optional[str] = None) -> str:
|
189 |
-
prompt = f"""
|
190 |
-
Feature to implement: {feature_description}
|
191 |
-
|
192 |
-
Existing code:
|
193 |
-
```
|
194 |
-
{existing_code if existing_code else 'No existing code provided.'}
|
195 |
-
```
|
196 |
-
|
197 |
-
Please implement the described feature, modifying the existing code if provided.
|
198 |
-
Respond with only the code, no explanations.
|
199 |
-
"""
|
200 |
-
return self.generate_agent_response(prompt)
|
201 |
-
|
202 |
-
def review_code(self, code: str) -> str:
|
203 |
-
prompt = f"""
|
204 |
-
Please review the following code and provide feedback:
|
205 |
-
|
206 |
-
```
|
207 |
-
{code}
|
208 |
-
```
|
209 |
-
|
210 |
-
Consider the following aspects in your review:
|
211 |
-
1. Code quality and readability
|
212 |
-
2. Potential bugs or errors
|
213 |
-
3. Adherence to best practices
|
214 |
-
4. Suggestions for improvement
|
215 |
-
Provide your feedback in a structured format.
|
216 |
-
"""
|
217 |
-
return self.generate_agent_response(prompt)
|
218 |
-
|
219 |
-
def optimize_code(self, code: str, optimization_goal: str) -> str:
|
220 |
-
prompt = f"""
|
221 |
-
Please optimize the following code with the goal of improving {optimization_goal}:
|
222 |
-
|
223 |
-
```
|
224 |
-
{code}
|
225 |
-
```
|
226 |
-
|
227 |
-
Provide only the optimized code in your response, no explanations.
|
228 |
-
"""
|
229 |
-
return self.generate_agent_response(prompt)
|
230 |
-
|
231 |
-
def generate_documentation(self, code: str) -> str:
|
232 |
-
prompt = f"""
|
233 |
-
Please generate comprehensive documentation for the following code:
|
234 |
-
|
235 |
-
```
|
236 |
-
{code}
|
237 |
-
```
|
238 |
-
|
239 |
-
Include the following in your documentation:
|
240 |
-
1. Overview of the code's purpose
|
241 |
-
2. Description of functions/classes and their parameters
|
242 |
-
3. Usage examples
|
243 |
-
4. Any important notes or considerations
|
244 |
-
|
245 |
-
Provide the documentation in Markdown format.
|
246 |
-
"""
|
247 |
-
return self.generate_agent_response(prompt)
|
248 |
-
|
249 |
-
def suggest_tests(self, code: str) -> str:
|
250 |
-
prompt = f"""
|
251 |
-
Please suggest unit tests for the following code:
|
252 |
-
|
253 |
-
```
|
254 |
-
{code}
|
255 |
-
```
|
256 |
-
|
257 |
-
For each function or class, provide:
|
258 |
-
1. Test case description
|
259 |
-
2. Input values
|
260 |
-
3. Expected output or behavior
|
261 |
|
262 |
-
|
263 |
-
""
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
Please provide a detailed explanation of the following code:
|
269 |
-
|
270 |
-
```
|
271 |
-
{code}
|
272 |
-
```
|
273 |
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
279 |
|
280 |
-
|
281 |
-
|
282 |
-
return self.generate_agent_response(prompt)
|
283 |
|
284 |
-
def
|
285 |
-
|
286 |
-
Please suggest refactoring improvements for the following code:
|
287 |
|
288 |
-
|
289 |
-
|
290 |
-
```
|
291 |
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
297 |
|
298 |
-
|
299 |
-
|
300 |
-
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import json
|
3 |
import logging
|
4 |
+
from typing import List, Dict, Optional
|
5 |
+
import re
|
6 |
+
from huggingface_hub import HfApi, InferenceClient
|
|
|
7 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
8 |
|
9 |
+
|
10 |
@dataclass
|
11 |
class ProjectConfig:
|
12 |
name: str
|
|
|
59 |
js += "}\n\n"
|
60 |
return js
|
61 |
|
62 |
+
class ProjectConfig:
|
63 |
+
def __init__(self, name: str, description: str, technologies: List[str], structure: Dict[str, List[str]]):
|
64 |
+
self.name = name
|
65 |
+
self.description = description
|
66 |
+
self.technologies = technologies
|
67 |
+
self.structure = structure
|
68 |
+
|
69 |
+
class HTMLGenerator:
|
70 |
+
def generate(self, content: str) -> str:
|
71 |
+
return f"<html><body>{content}</body></html>"
|
72 |
+
|
73 |
+
class CSSGenerator:
|
74 |
+
def generate(self, styles: Dict[str, str]) -> str:
|
75 |
+
return "\n".join([f"{selector} {{ {'; '.join([f'{prop}: {value}' for prop, value in properties.items()])} }}" for selector, properties in styles.items()])
|
76 |
+
|
77 |
+
class JavaScriptGenerator:
|
78 |
+
def generate(self, functionality: str) -> str:
|
79 |
+
return f"function main() {{ {functionality} }}"
|
80 |
+
|
81 |
class EnhancedAIAgent:
|
82 |
def __init__(self, name: str, description: str, skills: List[str], model_name: str):
|
83 |
self.name = name
|
|
|
88 |
self.css_gen_tool = CSSGenerator()
|
89 |
self.js_gen_tool = JavaScriptGenerator()
|
90 |
self.hf_api = HfApi()
|
91 |
+
self.inference_client = InferenceClient(model=model_name, token=os.environ.get("HF_API_TOKEN"))
|
92 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_name, clean_up_tokenization_spaces=True)
|
93 |
+
self.model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
|
94 |
+
self.text_generation = pipeline("text-generation", model=self.model, tokenizer=self.tokenizer, clean_up_tokenization_spaces=True)
|
95 |
self.logger = logging.getLogger(__name__)
|
96 |
+
self.logger.setLevel(logging.INFO)
|
97 |
+
handler = logging.StreamHandler()
|
98 |
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
99 |
+
handler.setFormatter(formatter)
|
100 |
+
self.logger.addHandler(handler)
|
101 |
|
102 |
def generate_agent_response(self, prompt: str) -> str:
|
103 |
try:
|
104 |
+
response = self.inference_client.text_generation(prompt, max_new_tokens=100)
|
105 |
+
self.logger.info(f"Generated response for prompt: {prompt[:50]}...")
|
106 |
+
return response.generated_text
|
107 |
except Exception as e:
|
108 |
+
self.logger.error(f"Error generating response: {str(e)}", exc_info=True)
|
109 |
return f"Error: Unable to generate response. {str(e)}"
|
110 |
|
111 |
+
def generate_project_config(self, project_description: str) -> Optional[ProjectConfig]:
|
112 |
+
prompt = f"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
Based on the following project description, generate a ProjectConfig object:
|
114 |
|
115 |
Description: {project_description}
|
|
|
122 |
|
123 |
Respond with a JSON object representing the ProjectConfig.
|
124 |
"""
|
125 |
+
response = self.generate_agent_response(prompt)
|
126 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
try:
|
128 |
+
json_start = response.find('{')
|
129 |
+
json_end = response.rfind('}') + 1
|
130 |
+
if json_start != -1 and json_end != -1:
|
131 |
+
json_str = response[json_start:json_end]
|
132 |
+
config_dict = json.loads(json_str)
|
133 |
+
return ProjectConfig(**config_dict)
|
134 |
+
else:
|
135 |
+
raise ValueError("No JSON object found in the response")
|
136 |
+
except (json.JSONDecodeError, ValueError) as e:
|
137 |
+
self.logger.error(f"Error parsing JSON from response: {str(e)}")
|
138 |
+
self.logger.error(f"Full response from model: {response}")
|
139 |
+
|
140 |
+
try:
|
141 |
+
partial_config = self.extract_partial_config(response)
|
142 |
+
if partial_config:
|
143 |
+
self.logger.warning("Extracted partial config from malformed response")
|
144 |
+
return partial_config
|
145 |
+
except Exception as ex:
|
146 |
+
self.logger.error(f"Failed to extract partial config: {str(ex)}")
|
147 |
+
|
148 |
+
return None
|
149 |
+
|
150 |
+
def extract_partial_config(self, response: str) -> Optional[ProjectConfig]:
|
151 |
+
name = self.extract_field(response, "name")
|
152 |
+
description = self.extract_field(response, "description")
|
153 |
+
technologies = self.extract_list(response, "technologies")
|
154 |
+
structure = self.extract_dict(response, "structure")
|
155 |
|
156 |
+
if name and description:
|
157 |
+
return ProjectConfig(
|
158 |
+
name=name,
|
159 |
+
description=description,
|
160 |
+
technologies=technologies or [],
|
161 |
+
structure=structure or {}
|
162 |
+
)
|
163 |
return None
|
164 |
|
165 |
+
def extract_field(self, text: str, field: str) -> Optional[str]:
|
166 |
+
match = re.search(rf'"{field}"\s*:\s*"([^"]*)"', text)
|
167 |
+
return match.group(1) if match else None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
|
169 |
+
def extract_list(self, text: str, field: str) -> Optional[List[str]]:
|
170 |
+
match = re.search(rf'"{field}"\s*:\s*\[(.*?)\]', text, re.DOTALL)
|
171 |
+
if match:
|
172 |
+
items = re.findall(r'"([^"]*)"', match.group(1))
|
173 |
+
return items
|
174 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
175 |
|
176 |
+
def extract_dict(self, text: str, field: str) -> Optional[Dict[str, List[str]]]:
|
177 |
+
match = re.search(rf'"{field}"\s*:\s*\{{(.*?)\}}', text, re.DOTALL)
|
178 |
+
if match:
|
179 |
+
dict_str = match.group(1)
|
180 |
+
result = {}
|
181 |
+
for item in re.finditer(r'"([^"]*)"\s*:\s*\[(.*?)\]', dict_str, re.DOTALL):
|
182 |
+
key = item.group(1)
|
183 |
+
values = re.findall(r'"([^"]*)"', item.group(2))
|
184 |
+
result[key] = values
|
185 |
+
return result
|
186 |
+
return None
|
187 |
|
188 |
+
def generate_html(self, content: str) -> str:
|
189 |
+
return self.html_gen_tool.generate(content)
|
|
|
190 |
|
191 |
+
def generate_css(self, styles: Dict[str, str]) -> str:
|
192 |
+
return self.css_gen_tool.generate(styles)
|
|
|
193 |
|
194 |
+
def generate_javascript(self, functionality: str) -> str:
|
195 |
+
return self.js_gen_tool.generate(functionality)
|
|
|
196 |
|
197 |
+
def create_project_files(self, config: ProjectConfig) -> Dict[str, str]:
|
198 |
+
files = {}
|
199 |
+
for directory, file_list in config.structure.items():
|
200 |
+
for file in file_list:
|
201 |
+
file_path = os.path.join(directory, file)
|
202 |
+
if file.endswith('.html'):
|
203 |
+
files[file_path] = self.generate_html(f"Content for {file}")
|
204 |
+
elif file.endswith('.css'):
|
205 |
+
files[file_path] = self.generate_css({"body": {"font-family": "Arial, sans-serif"}})
|
206 |
+
elif file.endswith('.js'):
|
207 |
+
files[file_path] = self.generate_javascript(f"console.log('Script for {file}');")
|
208 |
+
else:
|
209 |
+
files[file_path] = f"Content for {file}"
|
210 |
+
return files
|
211 |
|
212 |
+
def execute_project(self, project_description: str) -> Dict[str, str]:
|
213 |
+
config = self.generate_project_config(project_description)
|
214 |
+
if config:
|
215 |
+
return self.create_project_files(config)
|
216 |
+
else:
|
217 |
+
self.logger.error("Failed to generate project configuration")
|
218 |
+
return {}
|