Spaces:
Running
Running
File size: 2,483 Bytes
9d9cae4 4397750 9b5b26a c19d193 4c2a00d 6aae614 8fe992b 9b5b26a 5df72d6 4397750 5c18de9 2f54f8b 5c18de9 2f54f8b 5c18de9 9b5b26a 8c01ffb 6aae614 be94a9b 4435c97 be94a9b 13d500a 8c01ffb 9b5b26a 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b 16553c2 8c01ffb 861422e 8fe992b 9b5b26a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool, LiteLLMModel
from transformers import pipeline
import datetime
import requests
import pytz
import yaml
import os
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# Below is an example of a tool that does nothing. Amaze us with your creativity !
# Tool to classify text into politeness categories
@tool
def ask_polite_guard(input_text: str) -> dict:
"""Tool that classifies text into four categories: polite, somewhat polite, neutral, and impolite.
Args:
input_text: The text to classify.
"""
try:
classifier = pipeline("text-classification", "Intel/polite-guard")
return {
"label": result['label'],
"score": result['score']
}
except Exception as e:
return f"Error fetching classification for text '{input_text}': {str(e)}"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
# model = HfApiModel(
# max_tokens=2096,
# temperature=0.5,
# model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
# it is possible that this model may be overloaded
# custom_role_conversions=None,
# )
model = LiteLLMModel(
model_id="gemini/gemini-2.0-flash-exp",
max_tokens=2096,
temperature=0.6,
api_key=os.getenv("LITELLM_API_KEY")
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer, ask_polite_guard, get_current_time_in_timezone ], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |