Spaces:
Running
Running
import streamlit as st | |
from llama_cpp import Llama | |
from huggingface_hub import snapshot_download | |
import os | |
st.title("GGUF Model Streamlit App") | |
st.write("Downloading the model...") | |
repo_id = "Divyansh12/check" | |
model_dir = "model" | |
# Download the model | |
if not os.path.exists(model_dir): | |
snapshot_download(repo_id=repo_id, cache_dir=model_dir) | |
st.write("Model downloaded successfully!") | |
# List downloaded files for debugging | |
for root, dirs, files in os.walk(model_dir): | |
st.write(f"Files in {root}: {files}") | |
# Update this path based on the actual file structure found | |
model_path = "model/models--Divyansh12--check/unsloth.F16.gguf" | |
if not os.path.exists(model_path): | |
st.error(f"Model file not found at {model_path}") | |
else: | |
st.write(f"Found model file at {model_path}") | |
llm = Llama.from_pretrained(model_path=model_path) | |
st.write("Model loaded successfully!") | |
# Example query | |
response = llm.create_chat_completion( | |
messages=[ | |
{"role": "user", "content": "What is the capital of France?"} | |
] | |
) | |
st.write("Response:", response) | |