File size: 1,072 Bytes
22578cc
 
76f8356
 
22578cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# Import necessary libraries
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr

# Load the model and tokenizer
model_name = "meta-llama/Llama-2-7b-chat-hf"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# Define the chat function
def chat_with_llama2(input_text):
    inputs = tokenizer(input_text, return_tensors="pt")
    outputs = model.generate(inputs["input_ids"], max_length=512, do_sample=True, top_p=0.95, top_k=60)
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response

# Create the Gradio interface
interface = gr.Interface(
    fn=chat_with_llama2,
    inputs="text",
    outputs="text",
    title="LLaMa 2 Chat HF",
    description="Chat with LLaMa 2 model using Hugging Face Transformers and Gradio.",
    examples=[
        ["Hello, LLaMa 2! How are you today?"],
        ["Can you tell me a joke?"],
        ["What is the capital of France?"]
    ]
)

# Launch the interface
if __name__ == "__main__":
    interface.launch()