Spaces:
Running
Running
File size: 2,589 Bytes
959296e c92c3bf 1d70844 0e607c7 c92c3bf 9b5b26a c19d193 4c2a00d d4578f1 4db54a2 0e607c7 6aae614 e38aeb8 d11beff 95de6d7 9b5b26a d4578f1 9b5b26a 453152e 6e73783 9b5b26a 32b8e07 bac848f 9b5b26a 453e1a8 9b5b26a 8c01ffb 6aae614 5dfe22c e38aeb8 d11beff be94a9b 1d70844 be94a9b aa945ae 1d70844 aa945ae 1d70844 aa945ae 8c01ffb 9b5b26a 8c01ffb 85f8fc6 861422e 85f8fc6 1d70844 776a454 8c01ffb 8fe992b 453152e 8c01ffb 1795753 8c01ffb 3d41af7 d4578f1 6e73783 8fe992b 9b5b26a d1e3daa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
# Google Gemini
# from litellm import LiteLLMModel, RateLimitError
import datetime
import requests
import yaml
import os
import pytz # Had to give it permission in Code agent
# Tools
from tools.final_answer import FinalAnswerTool
from tools.polite_guard import PoliteGuardTool
from tools.web_search import DuckDuckGoSearchTool
from Gradio_UI import GradioUI
@tool
def get_the_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
Returns:
string: A sentence that provides the time using the 12-hour clock including AM/PM
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%I:%M %p") # %I for 12-hour clock, %M for minutes, %p for AM/PM
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
#@tool
polite_guard = PoliteGuardTool()
web_search = DuckDuckGoSearchTool()
# Switch to Gemini if this model gets overloaded
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
#model = LiteLLMModel(
# model_id="gemini/gemini-2.0-flash-exp",
# max_tokens=2096,
# temperature=0.6,
# api_key=os.getenv("LITELLM_API_KEY")
#)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
# Load prompts.yaml
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
# Load contentprompts.yml
with open("contentprompts.yml", 'r') as stream:
content_prompts = yaml.safe_load(stream)
combined_prompts = {**prompt_templates, **content_prompts}
# web_search, visit_webpage
agent = CodeAgent(
model=model,
tools=[final_answer, polite_guard, web_search, get_the_current_time_in_timezone ], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=3,
grammar=None,
planning_interval=None,
name="Content Agent",
description="Evaluates whether text is polite or impolite. ",
prompt_templates=combined_prompts,
additional_authorized_imports=["pytz"]
)
GradioUI(agent).launch() |