|
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool |
|
import datetime |
|
import requests |
|
import pytz |
|
import yaml |
|
from tools.final_answer import FinalAnswerTool |
|
|
|
from Gradio_UI import GradioUI |
|
|
|
|
|
|
|
|
|
@tool |
|
def generate_key_results(objective: str) -> list[str]: |
|
""" |
|
Queries the Qwen2.5-Coder-32B-Instruct model to generate key results for a given objective. |
|
|
|
Args: |
|
objective (str): The objective for which key results should be generated. |
|
|
|
Returns: |
|
list[str]: A list of key results generated by the model. |
|
""" |
|
url = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct" |
|
headers = {"Authorization": f"Bearer {HUGGINGFACE_API_KEY}"} |
|
payload = {"inputs": f"Objective: {objective}\nGenerate three key results for this objective."} |
|
|
|
response = requests.post(url, headers=headers, json=payload) |
|
|
|
if response.status_code != 200: |
|
raise Exception(f"API request failed: {response.json()}") |
|
|
|
output_text = response.json()[0]["generated_text"] |
|
|
|
key_results = output_text.split("\n") |
|
return [kr.strip("- ") for kr in key_results if kr] |
|
|
|
|
|
@tool |
|
def get_current_time_in_timezone(timezone: str) -> str: |
|
"""A tool that fetches the current local time in a specified timezone. |
|
Args: |
|
timezone: A string representing a valid timezone (e.g., 'America/New_York'). |
|
""" |
|
try: |
|
|
|
tz = pytz.timezone(timezone) |
|
|
|
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") |
|
return f"The current local time in {timezone} is: {local_time}" |
|
except Exception as e: |
|
return f"Error fetching time for timezone '{timezone}': {str(e)}" |
|
|
|
|
|
final_answer = FinalAnswerTool() |
|
|
|
|
|
|
|
|
|
model = HfApiModel( |
|
max_tokens=2096, |
|
temperature=0.5, |
|
model_id='Qwen/Qwen2.5-Coder-32B-Instruct', |
|
custom_role_conversions=None, |
|
) |
|
|
|
|
|
|
|
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) |
|
|
|
with open("prompts.yaml", 'r') as stream: |
|
prompt_templates = yaml.safe_load(stream) |
|
|
|
agent = CodeAgent( |
|
model=model, |
|
tools=[final_answer], |
|
max_steps=6, |
|
verbosity_level=1, |
|
grammar=None, |
|
planning_interval=None, |
|
name=None, |
|
description=None, |
|
prompt_templates=prompt_templates |
|
) |
|
|
|
|
|
GradioUI(agent).launch() |