Divyansh12 commited on
Commit
9446222
1 Parent(s): 90788c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -31
app.py CHANGED
@@ -3,48 +3,34 @@ from llama_cpp import Llama
3
  from huggingface_hub import snapshot_download
4
  import os
5
 
6
- # Define the model repository and filename
 
 
7
  repo_id = "Divyansh12/check"
8
- filename = "unsloth.F16.gguf"
9
 
10
- # Download the model if not already present
11
- model_dir = "model"
12
  if not os.path.exists(model_dir):
13
- st.write("Downloading the model...")
14
  snapshot_download(repo_id=repo_id, cache_dir=model_dir)
15
  st.write("Model downloaded successfully!")
16
 
17
- # Since the model file is directly inside the downloaded directory
18
- # Find the exact file path
19
- model_path = os.path.join(model_dir, filename)
20
 
21
- # Check if the file exists at the specified location
22
  if not os.path.exists(model_path):
23
  st.error(f"Model file not found at {model_path}")
24
  else:
25
  st.write(f"Found model file at {model_path}")
26
-
27
- # Load the GGUF model using llama-cpp
28
- st.write("Loading the model...")
29
- llm = Llama.from_pretrained(
30
- model_path=model_path
31
- )
32
  st.write("Model loaded successfully!")
33
 
34
- # Streamlit input for the user to enter a prompt
35
- prompt = st.text_input("Enter your prompt:", "What is the capital of France?")
 
 
 
 
 
36
 
37
- # Generate the response when a prompt is given
38
- if st.button("Generate Response"):
39
- with st.spinner("Generating..."):
40
- response = llm.create_chat_completion(
41
- messages=[
42
- {
43
- "role": "user",
44
- "content": prompt
45
- }
46
- ]
47
- )
48
- # Extract the message content from the response and display it
49
- st.write("Response:")
50
- st.write(response['choices'][0]['message']['content'])
 
3
  from huggingface_hub import snapshot_download
4
  import os
5
 
6
+ st.title("GGUF Model Streamlit App")
7
+
8
+ st.write("Downloading the model...")
9
  repo_id = "Divyansh12/check"
10
+ model_dir = "model/Divyansh12/check"
11
 
12
+ # Download the model
 
13
  if not os.path.exists(model_dir):
 
14
  snapshot_download(repo_id=repo_id, cache_dir=model_dir)
15
  st.write("Model downloaded successfully!")
16
 
17
+ # List downloaded files for debugging
18
+ downloaded_files = os.listdir(model_dir)
19
+ st.write("Downloaded model files:", downloaded_files)
20
 
21
+ model_path = os.path.join(model_dir, "unsloth.F16.gguf")
22
  if not os.path.exists(model_path):
23
  st.error(f"Model file not found at {model_path}")
24
  else:
25
  st.write(f"Found model file at {model_path}")
26
+ llm = Llama.from_pretrained(model_path=model_path)
 
 
 
 
 
27
  st.write("Model loaded successfully!")
28
 
29
+ # Example query
30
+ response = llm.create_chat_completion(
31
+ messages=[
32
+ {"role": "user", "content": "What is the capital of France?"}
33
+ ]
34
+ )
35
+ st.write("Response:", response)
36