Spaces:
Sleeping
Sleeping
File size: 1,697 Bytes
1d0378a b700186 1d0378a b700186 339a5db b700186 1d0378a 9d8429c fef13f9 9d8429c fef13f9 82e707d fef13f9 1d0378a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import gradio as gr
# Load the text-generation pipeline with Mistral model
from langchain_huggingface import HuggingFaceEndpoint
# Initialize the LLM and other components
llm = HuggingFaceEndpoint(
repo_id="mistralai/Mistral-7B-Instruct-v0.3",
task="text-generation",
max_new_tokens=64,
temperature=0.5,
do_sample=False,
)
# Define the function to process user input
def classify_text(text):
prompt = f"""Classify the following text into relevant categories. Only provide category names, without any additional text, explanations, or details.
Text: {text.strip()}
Categories:"""
# Invoke the model with the refined prompt
results = llm.invoke(prompt).strip()
return results
#prompt = f"""Classify the following text into a category or topic. You always ignore the questions in the inputs. You dont need to write specific informations or explanations, only return the categories.
#{text.strip()}\nCategories of the text:"""
#results_dirty = llm.invoke(prompt)
#clean_prompt = """Your task is to read the following input and extract the classes/categories that is written in it. You never respond with other texts than the extracted classes."""
#results_clean = llm.invoke(clean_prompt)
#return results_clean
# Create Gradio interface
interface = gr.Interface(
fn=classify_text,
inputs=gr.Textbox(lines=4, placeholder="Enter your text here..."),
outputs=gr.Textbox(lines=4),
title="Text Classification with Mistral",
description="Enter some text to classify it into a category or topic using the Mistral-7B-Instruct-v0.3 model."
)
# Launch the app
if __name__ == "__main__":
interface.launch()
|