ContentAgent / tools /polite_guard.py
yetessam's picture
Update tools/polite_guard.py
5f42a09 verified
from typing import Any, Optional
from smolagents.tools import Tool
from transformers import pipeline
class PoliteGuardTool(Tool):
"""
Takes the input text from users and then evaluates it against Polite Guard to return specific information
about whether the content is polite.
Args:
input_text: Text that the user inputs into the agent and should then be evaluated.
Returns:
A classification label about whether the content is polite, somewhat polite, neutral or impolite.
"""
name = "polite_guard"
description = "Uses Polite guard to classify input text from polite to impolite name and it provides a score as well. Anything over .95 should be considered a significant threshold."
inputs = {'input_text': {'type': 'any', 'description': 'Enter text for assessing whether it is respectful'}}
output_type = "any"
def forward(self, input_text: Any) -> Any:
str_return_value = self.ask_polite_guard(input_text)
print(f"forward sets the following: {str_return_value}")
return str_return_value
def __init__(self, *args, **kwargs):
self.is_initialized = False
self.label = None
self.score = None
def ask_polite_guard(self, input_text: str) -> str:
"""
Args:
input_text: The text to classify.
Returns:
tuple: with classification label and the score
"""
try:
classifier = pipeline("text-classification", "Intel/polite-guard")
result = classifier(input_text)
print(f"return {str(result)}")
output = result[0]
label = output['label']
score = output['score']
str_output = f"label is {label} with a score of {score}"
return str_output
except Exception as e:
return f"Error fetching classification for text '{input_text}': {str(e)}"