Divyansh12 commited on
Commit
ad7aaa9
1 Parent(s): 9cef45c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -0
app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from llama_cpp import Llama
3
+ from huggingface_hub import snapshot_download
4
+ import os
5
+
6
+ # Define the model repository and filename
7
+ repo_id = "Divyansh12/check"
8
+ filename = "unsloth.F16.gguf"
9
+
10
+ # Download the model if not already present
11
+ model_dir = "model"
12
+ if not os.path.exists(model_dir):
13
+ st.write("Downloading the model...")
14
+ snapshot_download(repo_id=repo_id, cache_dir=model_dir)
15
+ st.write("Model downloaded successfully!")
16
+
17
+ # Since the model file is directly inside the downloaded directory
18
+ # Find the exact file path
19
+ model_path = os.path.join(model_dir, filename)
20
+
21
+ # Check if the file exists at the specified location
22
+ if not os.path.exists(model_path):
23
+ st.error(f"Model file not found at {model_path}")
24
+ else:
25
+ st.write(f"Found model file at {model_path}")
26
+
27
+ # Load the GGUF model using llama-cpp
28
+ st.write("Loading the model...")
29
+ llm = Llama.from_pretrained(
30
+ model_path=model_path
31
+ )
32
+ st.write("Model loaded successfully!")
33
+
34
+ # Streamlit input for the user to enter a prompt
35
+ prompt = st.text_input("Enter your prompt:", "What is the capital of France?")
36
+
37
+ # Generate the response when a prompt is given
38
+ if st.button("Generate Response"):
39
+ with st.spinner("Generating..."):
40
+ response = llm.create_chat_completion(
41
+ messages=[
42
+ {
43
+ "role": "user",
44
+ "content": prompt
45
+ }
46
+ ]
47
+ )
48
+ # Extract the message content from the response and display it
49
+ st.write("Response:")
50
+ st.write(response['choices'][0]['message']['content'])