Labeler-Mistral / app.py
Chan-Y's picture
Update app.py
339a5db verified
raw
history blame
1.7 kB
import gradio as gr
# Load the text-generation pipeline with Mistral model
from langchain_huggingface import HuggingFaceEndpoint
# Initialize the LLM and other components
llm = HuggingFaceEndpoint(
repo_id="mistralai/Mistral-7B-Instruct-v0.3",
task="text-generation",
max_new_tokens=64,
temperature=0.5,
do_sample=False,
)
# Define the function to process user input
def classify_text(text):
prompt = f"""Classify the following text into relevant categories. Only provide category names, without any additional text, explanations, or details.
Text: {text.strip()}
Categories:"""
# Invoke the model with the refined prompt
results = llm.invoke(prompt).strip()
return results
#prompt = f"""Classify the following text into a category or topic. You always ignore the questions in the inputs. You dont need to write specific informations or explanations, only return the categories.
#{text.strip()}\nCategories of the text:"""
#results_dirty = llm.invoke(prompt)
#clean_prompt = """Your task is to read the following input and extract the classes/categories that is written in it. You never respond with other texts than the extracted classes."""
#results_clean = llm.invoke(clean_prompt)
#return results_clean
# Create Gradio interface
interface = gr.Interface(
fn=classify_text,
inputs=gr.Textbox(lines=4, placeholder="Enter your text here..."),
outputs=gr.Textbox(lines=4),
title="Text Classification with Mistral",
description="Enter some text to classify it into a category or topic using the Mistral-7B-Instruct-v0.3 model."
)
# Launch the app
if __name__ == "__main__":
interface.launch()