Ghgg / app.py
Yhhxhfh's picture
Create app.py
c820819 verified
raw
history blame
2.19 kB
import os
from huggingface_hub import login, create_repo, upload_folder
from autotrain import AutoTrainAdvanced
from datasets import load_dataset, Dataset
from dotenv import load_dotenv
import pandas as pd
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
load_dotenv()
hf_token = os.getenv('HF_TOKEN')
profile_name = os.getenv('HUGGINGFACE_PROFILE')
login(token=hf_token)
dataset1 = load_dataset("daqc/wikipedia-txt-spanish", split='train')
dataset2 = load_dataset("jorgeortizfuentes/universal_spanish_chilean_corpus", split='train')
df1 = pd.DataFrame(dataset1)
df2 = pd.DataFrame(dataset2)
combined_df = pd.concat([df1, df2], ignore_index=True)
combined_dataset = Dataset.from_pandas(combined_df)
task_type = "text-generation"
model_name = "meta-llama/Llama-3.2-1B"
config = {
"task": task_type,
"model": model_name,
"train_data": combined_dataset,
"output_dir": None,
"epochs": 1,
"learning_rate": 5e-5,
"batch_size": 32,
"fp16": True,
"gradient_accumulation_steps": 4,
"max_steps": 1,
}
model_repo_name = f"{profile_name}/llama-3-2-1b-text-generation"
create_repo(repo_id=model_repo_name, exist_ok=True, token=hf_token)
while True:
trainer = AutoTrainAdvanced(config=config)
trainer.train()
upload_folder(
folder_path="./output_model_llama",
repo_id=model_repo_name,
token=hf_token,
repo_type="model"
)
print(f"Modelo subido correctamente a: https://huggingface.co./{model_repo_name}")
print("Iteraci贸n de entrenamiento completada. Continuando con la siguiente...")
tokenizer = AutoTokenizer.from_pretrained(model_repo_name)
model = AutoModelForCausalLM.from_pretrained(model_repo_name)
def generate_text(input_text):
inputs = tokenizer.encode(input_text, return_tensors='pt')
with torch.no_grad():
outputs = model.generate(inputs, max_length=50, num_return_sequences=1)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", title="Interacci贸n con Llama 3.2", description="Escribe un texto y genera una respuesta.")
iface.launch()