Spaces:
Running
Running
File size: 2,041 Bytes
8ced4a7 97af386 01fb156 97af386 8ab9b9f 093f69c 8ab9b9f 01fb156 92e3278 97af386 5f42a09 97af386 0fadd7f 97af386 ba32940 bffba19 30a38c2 c704fac ba32940 bffba19 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
from typing import Any, Optional
from smolagents.tools import Tool
from transformers import pipeline
class PoliteGuardTool(Tool):
"""
Takes the input text from users and then evaluates it against Polite Guard to return specific information
about whether the content is polite.
Args:
input_text: Text that the user inputs into the agent and should then be evaluated.
Returns:
A classification label about whether the content is polite, somewhat polite, neutral or impolite.
"""
name = "polite_guard"
description = "Uses Polite guard to classify input text from polite to impolite name and it provides a score as well. Anything over .95 should be considered a significant threshold."
inputs = {'input_text': {'type': 'any', 'description': 'Enter text for assessing whether it is respectful'}}
output_type = "any"
def forward(self, input_text: Any) -> Any:
str_return_value = self.ask_polite_guard(input_text)
print(f"forward sets the following: {str_return_value}")
return str_return_value
def __init__(self, *args, **kwargs):
self.is_initialized = False
self.label = None
self.score = None
def ask_polite_guard(self, input_text: str) -> str:
"""
Args:
input_text: The text to classify.
Returns:
tuple: with classification label and the score
"""
try:
classifier = pipeline("text-classification", "Intel/polite-guard")
result = classifier(input_text)
print(f"return {str(result)}")
output = result[0]
label = output['label']
score = output['score']
str_output = f"label is {label} with a score of {score}"
return str_output
except Exception as e:
return f"Error fetching classification for text '{input_text}': {str(e)}" |