|
import gradio as gr |
|
from llama_cpp import Llama |
|
|
|
|
|
llm = Llama.from_pretrained( |
|
repo_id="google/gemma-2b-it-GGUF", |
|
filename="gemma-2b-it.gguf" |
|
) |
|
|
|
|
|
def process_prompt(prompt): |
|
|
|
output = llm( |
|
prompt, |
|
max_tokens=512, |
|
echo=True |
|
) |
|
return output['choices'][0]['text'] |
|
|
|
|
|
interface = gr.Interface( |
|
fn=process_prompt, |
|
inputs="text", |
|
outputs="text", |
|
title="Hugging Face Space API - Gemma-2B-it", |
|
description="Modelo basado en Gemma-2B-it para probar vulnerabilidades con RedTeamer.", |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
interface.launch(share=True) |
|
|