saksornr commited on
Commit
891c45c
·
verified ·
1 Parent(s): 42d7d63

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +280 -0
  2. data/agent_bank.json +64 -0
  3. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import gradio as gr
4
+ from openai import AsyncOpenAI, OpenAI
5
+ from dotenv import load_dotenv
6
+ import os
7
+
8
+ # Load environment variables
9
+ load_dotenv()
10
+
11
+ # Configuration
12
+ XAI_API_KEY = os.getenv("XAI_API_KEY")
13
+ client = AsyncOpenAI(
14
+ api_key=XAI_API_KEY,
15
+ base_url="https://api.x.ai/v1",
16
+ )
17
+
18
+ simple_client = OpenAI(
19
+ api_key=XAI_API_KEY,
20
+ base_url="https://api.x.ai/v1",
21
+ )
22
+
23
+ # Load agent personalities
24
+ with open('data/agent_bank.json', 'r') as f:
25
+ AGENT_BANK = json.load(f)['agents']
26
+
27
+ class MultiAgentConversationalSystem:
28
+ def __init__(self, api_client):
29
+ self.client = api_client
30
+ self.agents = AGENT_BANK
31
+ self.first_stage_results = []
32
+ self.conversation_histories = {}
33
+ self.manager_agent = {
34
+ "first_name": "Alex",
35
+ "last_name": "Policymaker",
36
+ "expertise": "Policy Strategy and Synthesis",
37
+ "personality": "Strategic, analytical, and focused on comprehensive understanding"
38
+ }
39
+
40
+ async def first_stage_analysis(self, policy):
41
+ """First stage: Agents analyze policy and provide reasoning with yes/no answer"""
42
+ async def agent_policy_analysis(agent):
43
+ agent_context = "\n".join([
44
+ f"{key}: {value}" for key, value in agent.items()
45
+ ])
46
+
47
+ prompt = f"""
48
+ Agent Profile:
49
+ {agent_context}
50
+
51
+ Policy/Topic: {policy}
52
+
53
+ Task:
54
+ 1. Carefully analyze the policy/topic using ALL aspects of your defined personality and expertise.
55
+ 2. Provide a clear YES or NO answer.
56
+ 3. Explain your reasoning in 2-3 detailed paragraphs.
57
+ 4. Leverage every aspect of your defined characteristics to provide a comprehensive analysis.
58
+
59
+ Format your response as:
60
+ - Agent: {agent['first_name']} {agent['last_name']}
61
+ - Answer: YES/NO
62
+ - Reasoning: [Detailed explanation drawing from ALL your defined attributes]
63
+ """
64
+
65
+ try:
66
+ response = await self.client.chat.completions.create(
67
+ model="grok-beta",
68
+ messages=[{"role": "user", "content": prompt}]
69
+ )
70
+ agent_response = {
71
+ "full_name": f"{agent['first_name']} {agent['last_name']}",
72
+ "expertise": agent['expertise'],
73
+ "full_agent_context": agent,
74
+ "full_response": response.choices[0].message.content
75
+ }
76
+
77
+ return agent_response
78
+ except Exception as e:
79
+ return {
80
+ "full_name": f"{agent['first_name']} {agent['last_name']}",
81
+ "full_agent_context": agent,
82
+ "full_response": f"Error: {str(e)}"
83
+ }
84
+
85
+ tasks = [agent_policy_analysis(agent) for agent in self.agents]
86
+ self.first_stage_results = await asyncio.gather(*tasks)
87
+
88
+ # {chr(10).join([f"- {result['full_name']}: {result['full_response'].split('Reasoning:')[1].strip()}" for result in self.first_stage_results])}
89
+
90
+ summary_prompt = f"""
91
+ Policy/Topic: {policy}
92
+
93
+ Agent Analyses Summary:
94
+ {self.first_stage_results}
95
+
96
+ Your Task:
97
+ 1. Synthesize the diverse agent perspectives into a comprehensive policy overview.
98
+ 2. Identify key insights, potential challenges, and strategic recommendations.
99
+ 3. Provide a balanced and strategic assessment of the policy.
100
+ """
101
+
102
+ manager_name = f"{self.manager_agent['first_name']} {self.manager_agent['last_name']}"
103
+ self.conversation_histories[manager_name] = [
104
+ {"role": "system", "content": f"""
105
+ You are {manager_name}, a strategic policy analyst with expertise in {self.manager_agent['expertise']}.
106
+ You synthesize complex perspectives and provide strategic policy insights.
107
+
108
+ Initial Policy Summary:
109
+ {summary_prompt}
110
+ """}
111
+ ]
112
+
113
+ return self.first_stage_results
114
+
115
+ async def manager_summary(self, policy):
116
+ try:
117
+ response = await self.client.chat.completions.create(
118
+ model="grok-beta",
119
+ messages=[{"role": "user", "content": f"""Summarized this.\n\n{policy}"""}],
120
+ stream=False
121
+ )
122
+
123
+ manager_summary = response.choices[0].message.content
124
+ return manager_summary
125
+
126
+ except Exception as e:
127
+ return f"Summary generation error: {str(e)}"
128
+
129
+ async def agent_conversation(self, agent_name, message, history):
130
+ if agent_name not in self.conversation_histories:
131
+ agent_context = next((agent for agent in self.first_stage_results
132
+ if f"{agent['full_agent_context']['first_name']} {agent['full_agent_context']['last_name']}" == agent_name),
133
+ None)
134
+ if not agent_context:
135
+ return "Agent not found."
136
+
137
+ self.conversation_histories[agent_name] = [
138
+ {"role": "system", "content": f"""
139
+ You are {agent_name}, an agent with the following profile:
140
+ Expertise: {agent_context['expertise']}
141
+
142
+ Approach the conversation from your unique perspective,
143
+ drawing on your expertise and personality.
144
+ """}
145
+ ]
146
+
147
+ conversation_history = self.conversation_histories[agent_name].copy()
148
+ conversation_history.append({"role": "user", "content": message})
149
+
150
+ try:
151
+ response = await self.client.chat.completions.create(
152
+ model="grok-beta",
153
+ messages=conversation_history,
154
+ stream=True
155
+ )
156
+
157
+ agent_response = response.choices[0].message.content
158
+ self.conversation_histories[agent_name].append(
159
+ {"role": "user", "content": message}
160
+ )
161
+ self.conversation_histories[agent_name].append(
162
+ {"role": "assistant", "content": agent_response}
163
+ )
164
+
165
+ return agent_response
166
+
167
+ except Exception as e:
168
+ return f"Conversation error: {str(e)}"
169
+
170
+ # Chat
171
+ def predict(message, history, policy_summary):
172
+
173
+ system_prompt = """\
174
+ You are an assistant, that work as a Policymaker. Expertise in Policy Strategy and Synthesis.
175
+ With a personality of Strategic, analytical, and focused on comprehensive understanding.
176
+ """
177
+
178
+ policy_summary_prompt = f"""\
179
+ Here are the policy summary of professtional role in the country.
180
+ {policy_summary}
181
+ """
182
+
183
+ history_openai_format = [{"role": "system", "content": system_prompt}]
184
+ history_openai_format.append({"role": "user", "content": policy_summary_prompt})
185
+
186
+ for human, assistant in history:
187
+ if isinstance(human, str) and human.strip():
188
+ history_openai_format.append({"role": "user", "content": human})
189
+ if isinstance(assistant, str) and assistant.strip():
190
+ history_openai_format.append({"role": "assistant", "content": assistant})
191
+
192
+ history_openai_format.append({"role": "user", "content": message})
193
+
194
+ print("history_openai_format:", history_openai_format)
195
+
196
+ response = simple_client.chat.completions.create(
197
+ model='grok-beta',
198
+ messages=history_openai_format,
199
+ temperature=0.6,
200
+ stream=True
201
+ )
202
+
203
+ partial_message = ""
204
+ for chunk in response:
205
+ if chunk.choices[0].delta.content is not None:
206
+ partial_message += chunk.choices[0].delta.content
207
+ yield partial_message
208
+
209
+ def chat_bot(user_input, history, policy_summary):
210
+ bot_response_generator = predict(user_input, history, policy_summary)
211
+ history.append((user_input, ""))
212
+
213
+ for bot_response in bot_response_generator:
214
+ history[-1] = (user_input, bot_response)
215
+ yield "", history
216
+
217
+ def create_gradio_interface():
218
+ multi_agent_system = MultiAgentConversationalSystem(client)
219
+
220
+ def get_manager_summary(policy):
221
+ summary = asyncio.run(multi_agent_system.manager_summary(policy))
222
+ return summary
223
+
224
+ def agent_chat(agent_name, message, history, summary_policy):
225
+ response = asyncio.run(multi_agent_system.agent_conversation(agent_name, message, history, summary_policy))
226
+ history.append((message, response))
227
+ return "", history
228
+
229
+ def first_stage_process(policy):
230
+ gr.Info("Running Agent Parallel Please Wait....")
231
+ results = asyncio.run(multi_agent_system.first_stage_analysis(policy))
232
+ formatted_output = "🔍 First Stage: Agent Policy Analyses\n\n"
233
+ for result in results:
234
+ formatted_output += f"**{result['full_name']}:**\n{result['full_response']}\n\n{'='*50}\n\n"
235
+ gr.Info("Running Agent Done!")
236
+
237
+ return formatted_output
238
+
239
+ with gr.Blocks() as demo:
240
+ gr.Markdown("# 🌐 Two-Stage Multi-Agent Policy Analysis")
241
+
242
+ with gr.Tab("First Stage: Policy Analysis"):
243
+ policy_input = gr.Textbox(label="Policy/Topic")
244
+ first_stage_btn = gr.Button("Analyze Policy")
245
+ policy_summary = gr.Markdown(label="Agent Perspectives")
246
+
247
+ first_stage_btn.click(
248
+ fn=first_stage_process,
249
+ inputs=policy_input,
250
+ outputs=[policy_summary]
251
+ )
252
+
253
+ with gr.Tab("Second Stage: Chat with Policy Maker"):
254
+ chatbot = gr.Chatbot(elem_id="chatbot")
255
+ msg = gr.Textbox(placeholder="Put your message here...")
256
+
257
+ with gr.Row():
258
+ clear = gr.Button("Clear History")
259
+ send = gr.Button("Send Message", variant="primary")
260
+
261
+ gr.Examples(
262
+ examples=[
263
+ "Should I implement this?",
264
+ "Can you recommend what should i do?",
265
+ ],
266
+ inputs=msg,
267
+ )
268
+
269
+ clear.click(lambda: [], [], chatbot)
270
+ msg.submit(chat_bot, [msg, chatbot, policy_summary], [msg, chatbot])
271
+ send.click(chat_bot, [msg, chatbot, policy_summary], [msg, chatbot])
272
+
273
+ return demo
274
+
275
+ def main():
276
+ app = create_gradio_interface()
277
+ app.launch()
278
+
279
+ if __name__ == "__main__":
280
+ main()
data/agent_bank.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "agents": [
3
+ {
4
+ "first_name": "Alex",
5
+ "last_name": "Chen",
6
+ "personality": "Enthusiastic about technological advancements, believes AI and technology can solve most global challenges",
7
+ "expertise": "Technology and Innovation",
8
+ "core_values": [
9
+ "Technological progress",
10
+ "Innovation",
11
+ "Transformative potential of AI"
12
+ ],
13
+ "communication_style": "Excited, forward-looking, solution-oriented",
14
+ "biases": [
15
+ "Tendency to overestimate technological solutions",
16
+ "Potential underestimation of implementation challenges"
17
+ ],
18
+ "key_motivations": [
19
+ "Pushing technological boundaries",
20
+ "Solving complex problems through innovation"
21
+ ]
22
+ },
23
+ {
24
+ "first_name": "Elena",
25
+ "last_name": "Rodriguez",
26
+ "personality": "Deeply concerned with ethical implications of technological developments, prioritizes human welfare",
27
+ "expertise": "Ethics and Policy",
28
+ "core_values": [
29
+ "Human rights",
30
+ "Ethical considerations",
31
+ "Long-term societal impact"
32
+ ],
33
+ "communication_style": "Measured, principled, critically analytical",
34
+ "biases": [
35
+ "Potential overcautiousness",
36
+ "Risk-averse approach to innovation"
37
+ ],
38
+ "key_motivations": [
39
+ "Protecting human interests",
40
+ "Ensuring responsible technological development"
41
+ ]
42
+ },
43
+ {
44
+ "first_name": "David",
45
+ "last_name": "Goldman",
46
+ "personality": "Focuses on economic impact, cost-benefit analysis, and market potential of innovations",
47
+ "expertise": "Economics and Finance",
48
+ "core_values": [
49
+ "Economic efficiency",
50
+ "Market dynamics",
51
+ "Financial sustainability"
52
+ ],
53
+ "communication_style": "Quantitative, data-driven, pragmatic",
54
+ "biases": [
55
+ "Potential prioritization of financial metrics",
56
+ "Risk of overlooking non-economic factors"
57
+ ],
58
+ "key_motivations": [
59
+ "Understanding economic implications",
60
+ "Identifying potential market opportunities"
61
+ ]
62
+ }
63
+ ]
64
+ }
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ openai==1.56.2
2
+ python-dotenv==1.0.1
3
+ gradio
4
+ ipython