Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,50 +3,66 @@ import datetime
|
|
3 |
import requests
|
4 |
import pytz
|
5 |
import yaml
|
|
|
|
|
|
|
|
|
6 |
from tools.final_answer import FinalAnswerTool
|
7 |
-
|
8 |
from Gradio_UI import GradioUI
|
9 |
|
10 |
@tool
|
11 |
-
def
|
12 |
"""
|
13 |
-
|
|
|
14 |
|
15 |
Args:
|
16 |
-
|
17 |
-
rounds: Number of exchange rounds between the two AI models.
|
18 |
|
19 |
Returns:
|
20 |
-
A
|
21 |
"""
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
transcript += f"AI Alpha: {alpha_response}\n\n"
|
32 |
-
|
33 |
-
# AI Beta's turn
|
34 |
-
beta_prompt = (
|
35 |
-
f"Round {i+1} - You are AI Beta. Provide your counterargument on the topic '{topic}' "
|
36 |
-
f"given the following discussion transcript:\n\n{transcript}\nYour response:"
|
37 |
-
)
|
38 |
-
beta_response = model.generate(prompt=beta_prompt)
|
39 |
-
transcript += f"AI Beta: {beta_response}\n\n"
|
40 |
|
41 |
-
return
|
42 |
|
43 |
@tool
|
44 |
def get_current_time_in_timezone(timezone: str) -> str:
|
45 |
"""
|
46 |
-
|
47 |
|
48 |
Args:
|
49 |
-
timezone: A
|
50 |
|
51 |
Returns:
|
52 |
A string with the current local time or an error message.
|
@@ -71,16 +87,16 @@ image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_co
|
|
71 |
|
72 |
with open("prompts.yaml", 'r') as stream:
|
73 |
prompt_templates = yaml.safe_load(stream)
|
74 |
-
|
75 |
agent = CodeAgent(
|
76 |
model=model,
|
77 |
-
tools=[
|
78 |
max_steps=6,
|
79 |
verbosity_level=1,
|
80 |
grammar=None,
|
81 |
planning_interval=None,
|
82 |
-
name=
|
83 |
-
description=
|
84 |
prompt_templates=prompt_templates
|
85 |
)
|
86 |
|
|
|
3 |
import requests
|
4 |
import pytz
|
5 |
import yaml
|
6 |
+
import io
|
7 |
+
import base64
|
8 |
+
import schemdraw
|
9 |
+
import schemdraw.elements as elm
|
10 |
from tools.final_answer import FinalAnswerTool
|
|
|
11 |
from Gradio_UI import GradioUI
|
12 |
|
13 |
@tool
|
14 |
+
def text_to_flowchart(steps_text: str) -> str:
|
15 |
"""
|
16 |
+
Generates a flowchart diagram from pre-processed text that lists sequential process steps.
|
17 |
+
The input should be a text with each step on a new line (optionally prefixed by bullet markers).
|
18 |
|
19 |
Args:
|
20 |
+
steps_text: A string containing the process steps.
|
|
|
21 |
|
22 |
Returns:
|
23 |
+
A data URL for a PNG image of the generated flowchart.
|
24 |
"""
|
25 |
+
# Parse steps from the input text
|
26 |
+
parsed_steps = []
|
27 |
+
for line in steps_text.splitlines():
|
28 |
+
line = line.strip()
|
29 |
+
if line.startswith(("-", "*", "•")):
|
30 |
+
line = line[1:].strip()
|
31 |
+
elif line and line[0].isdigit():
|
32 |
+
dot_index = line.find('.')
|
33 |
+
if dot_index != -1:
|
34 |
+
line = line[dot_index+1:].strip()
|
35 |
+
if line:
|
36 |
+
parsed_steps.append(line)
|
37 |
+
if not parsed_steps:
|
38 |
+
parsed_steps = ["No steps provided."]
|
39 |
+
|
40 |
+
# Create the flowchart using SchemDraw
|
41 |
+
d = schemdraw.Drawing()
|
42 |
+
d.add(elm.Start(label="Start"))
|
43 |
+
for step in parsed_steps:
|
44 |
+
d.add(elm.Arrow())
|
45 |
+
d.add(elm.Process().label(step))
|
46 |
+
d.add(elm.Arrow())
|
47 |
+
d.add(elm.End(label="End"))
|
48 |
|
49 |
+
# Render diagram to PNG and encode as a data URL
|
50 |
+
buf = io.BytesIO()
|
51 |
+
d.draw()
|
52 |
+
d.fig.savefig(buf, format='png', bbox_inches='tight')
|
53 |
+
buf.seek(0)
|
54 |
+
encoded_image = base64.b64encode(buf.getvalue()).decode('utf-8')
|
55 |
+
data_url = f"data:image/png;base64,{encoded_image}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
+
return data_url
|
58 |
|
59 |
@tool
|
60 |
def get_current_time_in_timezone(timezone: str) -> str:
|
61 |
"""
|
62 |
+
Fetches the current local time for a specified timezone.
|
63 |
|
64 |
Args:
|
65 |
+
timezone: A valid timezone string (e.g., 'America/New_York').
|
66 |
|
67 |
Returns:
|
68 |
A string with the current local time or an error message.
|
|
|
87 |
|
88 |
with open("prompts.yaml", 'r') as stream:
|
89 |
prompt_templates = yaml.safe_load(stream)
|
90 |
+
|
91 |
agent = CodeAgent(
|
92 |
model=model,
|
93 |
+
tools=[text_to_flowchart, final_answer],
|
94 |
max_steps=6,
|
95 |
verbosity_level=1,
|
96 |
grammar=None,
|
97 |
planning_interval=None,
|
98 |
+
name="Flowchart Generator",
|
99 |
+
description="Generates a flowchart diagram from pre-processed sequential steps using SchemDraw.",
|
100 |
prompt_templates=prompt_templates
|
101 |
)
|
102 |
|