Spaces:
Sleeping
Sleeping
import streamlit as st | |
from llama_cpp import Llama | |
llm = Llama.from_pretrained( | |
repo_id="Mykes/med_gemma7b_gguf", | |
filename="*Q4_K_M.gguf", | |
verbose=False | |
) | |
basic_prompt = "Below is the context which is your conversation history and the last user question. Write a response according the context and question. ### Context: user: Ответь мне на вопрос о моем здоровье. assistant: Конечно! Какой у Вас вопрос? ### Question: {question} ### Response:" | |
def generate_response(question): | |
model_input = basic_prompt.format(question=input_text) | |
if question: | |
output = llm( | |
model_input, # Prompt | |
max_tokens=32, # Generate up to 32 tokens, set to None to generate up to the end of the context window | |
stop=["<end_of_turn>"], | |
echo=False # Echo the prompt back in the output | |
) # Generate a completion, can also call create_completion | |
st.write(output["choices"][0]["text"]) | |
else: | |
st.write("Please enter a question to get a response.") | |
# Streamlit text input widget | |
input_text = st.text_input('Задайте мне медицинский вопрос...') | |
# Button to trigger response generation | |
if st.button('Generate Response'): | |
generate_response(input_text) | |