acecalisto3 commited on
Commit
80dbb6b
·
verified ·
1 Parent(s): f9f73dd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +169 -179
app.py CHANGED
@@ -1,43 +1,13 @@
 
1
  import os
2
- import sys
3
- import subprocess
4
- import streamlit as st
5
  from huggingface_hub import InferenceClient
6
- import gradio as gr
7
- import random
8
- import prompts
9
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
10
- import black
11
- from pylint import lint
12
- from io import StringIO
13
-
14
- # Initialize the InferenceClient for Mixtral-8x7B-Instruct-v0.1
15
- client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
16
-
17
- # Initialize the pipeline for Llama-3-8B-Instruct-Coder-GGUF
18
- pipe = pipeline("text-generation", model="bartowski/Llama-3-8B-Instruct-Coder-GGUF")
19
-
20
- HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
21
- PROJECT_ROOT = "projects"
22
- AGENT_DIRECTORY = "agents"
23
-
24
- # Global state management
25
- if 'chat_history' not in st.session_state:
26
- st.session_state.chat_history = []
27
- if 'terminal_history' not in st.session_state:
28
- st.session_state.terminal_history = []
29
- if 'workspace_projects' not in st.session_state:
30
- st.session_state.workspace_projects = {}
31
- if 'available_agents' not in st.session_state:
32
- st.session_state.available_agents = []
33
- if 'current_state' not in st.session_state:
34
- st.session_state.current_state = {
35
- 'toolbox': {},
36
- 'workspace_chat': {}
37
- }
38
-
39
- # Define the agents
40
- agents = [
41
  "WEB_DEV",
42
  "AI_SYSTEM_PROMPT",
43
  "PYTHON_CODE_DEV",
@@ -47,177 +17,197 @@ agents = [
47
  "HUGGINGFACE_FILE_DEV",
48
  ]
49
 
 
 
 
 
50
  class AIAgent:
51
- def __init__(self, name, description, skills):
52
  self.name = name
53
  self.description = description
54
  self.skills = skills
55
 
56
- def create_agent_prompt(self):
57
  skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
58
- agent_prompt = f"""
59
- As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
60
- {skills_str}
61
- I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
62
- """
63
- return agent_prompt
64
-
65
- def autonomous_build(self, chat_history, workspace_projects):
 
66
  summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
67
  summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
68
  next_step = "Based on the current state, the next logical step is to implement the main application logic."
69
  return summary, next_step
70
 
71
- def format_prompt(message, history):
 
 
 
 
 
72
  prompt = "<s>"
73
  for user_prompt, bot_response in history:
74
- prompt += f"[INST] {user_prompt} [/INST]"
75
- prompt += f" {bot_response}</s> "
76
  prompt += f"[INST] {message} [/INST]"
77
  return prompt
78
 
79
- def generate(prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
 
80
  seed = random.randint(1, 1111111111111111)
81
- agent = getattr(prompts, agent_name, prompts.WEB_DEV_SYSTEM_PROMPT)
82
- system_prompt = agent
83
-
84
  generate_kwargs = dict(
85
- temperature=float(temperature),
86
  max_new_tokens=max_new_tokens,
87
- top_p=float(top_p),
88
  repetition_penalty=repetition_penalty,
89
  do_sample=True,
90
  seed=seed,
91
  )
92
-
93
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
94
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
95
  output = ""
96
-
97
  for response in stream:
98
  output += response.token.text
99
  yield output
100
  return output
101
 
102
- def chat_interface(chat_input):
103
- response = generate(chat_input, st.session_state.chat_history)
104
- return response
105
-
106
- def chat_interface_with_agent(chat_input, agent_name):
107
- agent_prompt = getattr(prompts, agent_name, prompts.WEB_DEV_SYSTEM_PROMPT)
108
- response = generate(chat_input, st.session_state.chat_history, agent_name=agent_name, sys_prompt=agent_prompt)
109
- return response
110
-
111
- def terminal_interface(command, project_name):
112
- # Implement terminal functionality here
113
- return f"Executed command: {command} in project: {project_name}"
114
-
115
- def add_code_to_workspace(project_name, code, file_name):
116
- if project_name not in st.session_state.workspace_projects:
117
- st.session_state.workspace_projects[project_name] = {'files': []}
118
- st.session_state.workspace_projects[project_name]['files'].append(file_name)
119
  return f"Added {file_name} to {project_name}"
120
 
121
- # Streamlit UI
122
- st.title("DevToolKit: AI-Powered Development Environment")
123
-
124
- # Project Management
125
- st.header("Project Management")
126
- project_name = st.text_input("Enter project name:")
127
- if st.button("Create Project"):
128
- if project_name not in st.session_state.workspace_projects:
129
- st.session_state.workspace_projects[project_name] = {'files': []}
130
- st.success(f"Created project: {project_name}")
131
- else:
132
- st.warning(f"Project {project_name} already exists")
133
-
134
- # Code Addition
135
- st.subheader("Add Code to Workspace")
136
- code_to_add = st.text_area("Enter code to add to workspace:")
137
- file_name = st.text_input("Enter file name (e.g. 'app.py'):")
138
- if st.button("Add Code"):
139
- add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
140
- st.success(add_code_status)
141
-
142
- # Terminal Interface
143
- st.subheader("Terminal (Workspace Context)")
144
- terminal_input = st.text_input("Enter a command within the workspace:")
145
- if st.button("Run Command"):
146
- terminal_output = terminal_interface(terminal_input, project_name)
147
- st.code(terminal_output, language="bash")
148
-
149
- # Chat Interface
150
- st.subheader("Chat with DevToolKit for Guidance")
151
- chat_input = st.text_area("Enter your message for guidance:")
152
- if st.button("Get Guidance"):
153
- chat_response = chat_interface(chat_input)
154
- st.session_state.chat_history.append((chat_input, chat_response))
155
- st.write(f"DevToolKit: {chat_response}")
156
-
157
- # Display Chat History
158
- st.subheader("Chat History")
159
- for user_input, response in st.session_state.chat_history:
160
- st.write(f"User: {user_input}")
161
- st.write(f"DevToolKit: {response}")
162
-
163
- # Display Terminal History
164
- st.subheader("Terminal History")
165
- for command, output in st.session_state.terminal_history:
166
- st.write(f"Command: {command}")
167
- st.code(output, language="bash")
168
-
169
- # Display Projects and Files
170
- st.subheader("Workspace Projects")
171
- for project, details in st.session_state.workspace_projects.items():
172
- st.write(f"Project: {project}")
173
- for file in details['files']:
174
- st.write(f" - {file}")
175
-
176
- # Chat with AI Agents
177
- st.subheader("Chat with AI Agents")
178
- selected_agent = st.selectbox("Select an AI agent", agents)
179
- agent_chat_input = st.text_area("Enter your message for the agent:")
180
- if st.button("Send to Agent"):
181
- agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
182
- st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
183
- st.write(f"{selected_agent}: {agent_chat_response}")
184
-
185
- # Automate Build Process
186
- st.subheader("Automate Build Process")
187
- if st.button("Automate"):
188
- agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
189
- summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
190
- st.write("Autonomous Build Summary:")
191
- st.write(summary)
192
- st.write("Next Step:")
193
- st.write(next_step)
194
-
195
- # Display current state for debugging
196
- st.sidebar.subheader("Current State")
197
- st.sidebar.json(st.session_state.current_state)
198
-
199
- # Gradio Interface
200
- additional_inputs = [
201
- gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True),
202
- gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
203
- gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
204
- gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1000*10, step=64, interactive=True, info="The maximum numbers of new tokens"),
205
- gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
206
- gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"),
207
- ]
208
 
209
- examples = [
210
- ["Create a simple web application using Flask", agents[0], None, None, None, None, ],
211
- ["Generate a Python script to perform a linear regression analysis", agents[2], None, None, None, None, ],
212
- ["Create a Dockerfile for a Node.js application", agents[1], None, None, None, None, ],
213
- # Add more examples as needed
214
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
 
216
- gr.ChatInterface(
217
- fn=generate,
218
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
219
- additional_inputs=additional_inputs,
220
- title="DevToolKit AI Assistant",
221
- examples=examples,
222
- concurrency_limit=20,
223
- ).launch(show_api=True)
 
1
+ # config.py
2
  import os
 
 
 
3
  from huggingface_hub import InferenceClient
4
+ from transformers import pipeline
5
+
6
+ # Initialize clients and models
7
+ MIXTRAL_CLIENT = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
8
+ LLAMA_PIPELINE = pipeline("text-generation", model="bartowski/Llama-3-8B-Instruct-Coder-GGUF")
9
+
10
+ AGENTS = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  "WEB_DEV",
12
  "AI_SYSTEM_PROMPT",
13
  "PYTHON_CODE_DEV",
 
17
  "HUGGINGFACE_FILE_DEV",
18
  ]
19
 
20
+ # ai_agent.py
21
+ import random
22
+ from typing import List, Dict, Tuple
23
+
24
  class AIAgent:
25
+ def __init__(self, name: str, description: str, skills: List[str]):
26
  self.name = name
27
  self.description = description
28
  self.skills = skills
29
 
30
+ def create_agent_prompt(self) -> str:
31
  skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
32
+ return f"""
33
+ As an elite expert developer, my name is {self.name}.
34
+ I possess a comprehensive understanding of the following areas:
35
+ {skills_str}
36
+ I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications.
37
+ Please feel free to ask any questions or present any challenges you may encounter.
38
+ """
39
+
40
+ def autonomous_build(self, chat_history: List[Tuple[str, str]], workspace_projects: Dict[str, Dict]) -> Tuple[str, str]:
41
  summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
42
  summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
43
  next_step = "Based on the current state, the next logical step is to implement the main application logic."
44
  return summary, next_step
45
 
46
+ # utils.py
47
+ import os
48
+ import subprocess
49
+ from typing import List, Tuple
50
+
51
+ def format_prompt(message: str, history: List[Tuple[str, str]]) -> str:
52
  prompt = "<s>"
53
  for user_prompt, bot_response in history:
54
+ prompt += f"[INST] {user_prompt} [/INST] {bot_response}</s> "
 
55
  prompt += f"[INST] {message} [/INST]"
56
  return prompt
57
 
58
+ def generate(prompt: str, history: List[Tuple[str, str]], agent_name: str = AGENTS[0], sys_prompt: str = "",
59
+ temperature: float = 0.9, max_new_tokens: int = 256, top_p: float = 0.95, repetition_penalty: float = 1.0):
60
  seed = random.randint(1, 1111111111111111)
 
 
 
61
  generate_kwargs = dict(
62
+ temperature=temperature,
63
  max_new_tokens=max_new_tokens,
64
+ top_p=top_p,
65
  repetition_penalty=repetition_penalty,
66
  do_sample=True,
67
  seed=seed,
68
  )
69
+ formatted_prompt = format_prompt(f"{sys_prompt}, {prompt}", history)
70
+ stream = MIXTRAL_CLIENT.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
 
71
  output = ""
 
72
  for response in stream:
73
  output += response.token.text
74
  yield output
75
  return output
76
 
77
+ def terminal_interface(command: str, project_name: str) -> str:
78
+ try:
79
+ result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_name)
80
+ return result.stdout if result.returncode == 0 else result.stderr
81
+ except Exception as e:
82
+ return str(e)
83
+
84
+ def add_code_to_workspace(project_name: str, code: str, file_name: str) -> str:
85
+ project_path = os.path.join(os.getcwd(), project_name)
86
+ os.makedirs(project_path, exist_ok=True)
87
+ file_path = os.path.join(project_path, file_name)
88
+ with open(file_path, 'w') as file:
89
+ file.write(code)
 
 
 
 
90
  return f"Added {file_name} to {project_name}"
91
 
92
+ # main.py
93
+ import streamlit as st
94
+ import gradio as gr
95
+ from config import AGENTS
96
+ from ai_agent import AIAgent
97
+ from utils import generate, terminal_interface, add_code_to_workspace
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
+ # Streamlit UI
100
+ def main():
101
+ st.title("DevToolKit: AI-Powered Development Environment")
102
+
103
+ # Project Management
104
+ st.header("Project Management")
105
+ project_name = st.text_input("Enter project name:")
106
+ if st.button("Create Project"):
107
+ if project_name not in st.session_state.workspace_projects:
108
+ st.session_state.workspace_projects[project_name] = {'files': []}
109
+ st.success(f"Created project: {project_name}")
110
+ else:
111
+ st.warning(f"Project {project_name} already exists")
112
+
113
+ # Code Addition
114
+ st.subheader("Add Code to Workspace")
115
+ code_to_add = st.text_area("Enter code to add to workspace:")
116
+ file_name = st.text_input("Enter file name (e.g. 'app.py'):")
117
+ if st.button("Add Code"):
118
+ add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
119
+ st.success(add_code_status)
120
+
121
+ # Terminal Interface
122
+ st.subheader("Terminal (Workspace Context)")
123
+ terminal_input = st.text_input("Enter a command within the workspace:")
124
+ if st.button("Run Command"):
125
+ terminal_output = terminal_interface(terminal_input, project_name)
126
+ st.code(terminal_output, language="bash")
127
+
128
+ # Chat Interface
129
+ st.subheader("Chat with DevToolKit for Guidance")
130
+ chat_input = st.text_area("Enter your message for guidance:")
131
+ if st.button("Get Guidance"):
132
+ chat_response = next(generate(chat_input, st.session_state.chat_history))
133
+ st.session_state.chat_history.append((chat_input, chat_response))
134
+ st.write(f"DevToolKit: {chat_response}")
135
+
136
+ # Display Chat History
137
+ st.subheader("Chat History")
138
+ for user_input, response in st.session_state.chat_history:
139
+ st.write(f"User: {user_input}")
140
+ st.write(f"DevToolKit: {response}")
141
+
142
+ # Display Terminal History
143
+ st.subheader("Terminal History")
144
+ for command, output in st.session_state.terminal_history:
145
+ st.write(f"Command: {command}")
146
+ st.code(output, language="bash")
147
+
148
+ # Display Projects and Files
149
+ st.subheader("Workspace Projects")
150
+ for project, details in st.session_state.workspace_projects.items():
151
+ st.write(f"Project: {project}")
152
+ for file in details['files']:
153
+ st.write(f" - {file}")
154
+
155
+ # Chat with AI Agents
156
+ st.subheader("Chat with AI Agents")
157
+ selected_agent = st.selectbox("Select an AI agent", AGENTS)
158
+ agent_chat_input = st.text_area("Enter your message for the agent:")
159
+ if st.button("Send to Agent"):
160
+ agent_chat_response = next(generate(agent_chat_input, st.session_state.chat_history, agent_name=selected_agent))
161
+ st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
162
+ st.write(f"{selected_agent}: {agent_chat_response}")
163
+
164
+ # Automate Build Process
165
+ st.subheader("Automate Build Process")
166
+ if st.button("Automate"):
167
+ agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
168
+ summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
169
+ st.write("Autonomous Build Summary:")
170
+ st.write(summary)
171
+ st.write("Next Step:")
172
+ st.write(next_step)
173
+
174
+ # Display current state for debugging
175
+ st.sidebar.subheader("Current State")
176
+ st.sidebar.json(st.session_state.current_state)
177
+
178
+ if __name__ == "__main__":
179
+ main()
180
+
181
+ # gradio_interface.py
182
+ import gradio as gr
183
+ from config import AGENTS
184
+ from utils import generate
185
+
186
+ def create_gradio_interface():
187
+ additional_inputs = [
188
+ gr.Dropdown(label="Agents", choices=[s for s in AGENTS], value=AGENTS[0], interactive=True),
189
+ gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
190
+ gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
191
+ gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1000*10, step=64, interactive=True, info="The maximum numbers of new tokens"),
192
+ gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
193
+ gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"),
194
+ ]
195
+
196
+ examples = [
197
+ ["Create a simple web application using Flask", AGENTS[0], None, None, None, None],
198
+ ["Generate a Python script to perform a linear regression analysis", AGENTS[2], None, None, None, None],
199
+ ["Create a Dockerfile for a Node.js application", AGENTS[1], None, None, None, None],
200
+ ]
201
+
202
+ return gr.ChatInterface(
203
+ fn=generate,
204
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
205
+ additional_inputs=additional_inputs,
206
+ title="DevToolKit AI Assistant",
207
+ examples=examples,
208
+ concurrency_limit=20,
209
+ )
210
 
211
+ if __name__ == "__main__":
212
+ interface = create_gradio_interface()
213
+ interface.launch(show_api=True)