Spaces:
Running
Running
File size: 1,100 Bytes
ad7aaa9 9446222 ad7aaa9 9518de8 ad7aaa9 9446222 ad7aaa9 9446222 9518de8 ad7aaa9 9518de8 ad7aaa9 9446222 ad7aaa9 9446222 ad7aaa9 9518de8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import streamlit as st
from llama_cpp import Llama
from huggingface_hub import snapshot_download
import os
st.title("GGUF Model Streamlit App")
st.write("Downloading the model...")
repo_id = "Divyansh12/check"
model_dir = "model"
# Download the model
if not os.path.exists(model_dir):
snapshot_download(repo_id=repo_id, cache_dir=model_dir)
st.write("Model downloaded successfully!")
# List downloaded files for debugging
for root, dirs, files in os.walk(model_dir):
st.write(f"Files in {root}: {files}")
# Update this path based on the actual file structure found
model_path = "model/models--Divyansh12--check/unsloth.F16.gguf"
if not os.path.exists(model_path):
st.error(f"Model file not found at {model_path}")
else:
st.write(f"Found model file at {model_path}")
llm = Llama.from_pretrained(model_path=model_path)
st.write("Model loaded successfully!")
# Example query
response = llm.create_chat_completion(
messages=[
{"role": "user", "content": "What is the capital of France?"}
]
)
st.write("Response:", response)
|