|
import os |
|
from huggingface_hub import login |
|
import torch |
|
from transformers import pipeline |
|
import gradio as gr |
|
|
|
|
|
hf_token = os.getenv("HF_TOKEN") |
|
if hf_token is None: |
|
raise ValueError("Hugging Face token is not set in the environment variable.") |
|
|
|
|
|
login(token=hf_token) |
|
|
|
|
|
model_id = "meta-llama/Llama-3.2-1B-Instruct" |
|
|
|
|
|
pipe = pipeline( |
|
"text-classification", |
|
model=model_id, |
|
torch_dtype=torch.bfloat16, |
|
device_map="auto" |
|
) |
|
|
|
|
|
pipe.model.config.id2label = {0: 'greeting', 1: 'farewell', 2: 'other'} |
|
|
|
|
|
def classify_text(text): |
|
result = pipe(text) |
|
return result[0]['label'] |
|
|
|
|
|
iface = gr.Interface( |
|
fn=classify_text, |
|
inputs=gr.Textbox(label="Enter Text"), |
|
outputs=gr.Label(label="Classification"), |
|
title="Text Classifier", |
|
description="Classify your text as 'greeting', 'farewell', or 'other'." |
|
) |
|
|
|
|
|
iface.launch() |
|
|