|
from llama_cpp import Llama |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = Llama.from_pretrained(repo_id="Arpit-Bansal/counsellor_model_q5_k_m", |
|
filename="counsellor_model_q5_k_m-unsloth.Q5_K_M.gguf", |
|
verbose=False) |
|
|
|
def prompt_for_chat(content:str): |
|
return [{"role": "system", "content": """You are an excellent counselor who assists user with their mental health, |
|
educational challenges, and everyday life issues. |
|
and you will provide thoughtful answers to user question."""}, |
|
|
|
{ "role": "user", |
|
"content":content}] |
|
|
|
def response_return(response): |
|
res = "" |
|
for chunk in response: |
|
delta = chunk["choices"][0]["delta"] |
|
if "content" not in delta: |
|
continue |
|
res += delta["content"] |
|
return res |
|
|
|
def llm_function(user_input:str): |
|
llm_response = model.create_chat_completion(messages=prompt_for_chat(content=user_input), |
|
stream = True, temperature = 0.6, max_tokens = 256) |
|
resp = response_return(llm_response) |
|
return resp |
|
|