Spaces:
Runtime error
Runtime error
Rename rag_with_mircosoftphi2_and_hf_embeddings.py to app.py
Browse files
rag_with_mircosoftphi2_and_hf_embeddings.py → app.py
RENAMED
@@ -1,7 +1,7 @@
|
|
1 |
from llama_index.core import VectorStoreIndex,SimpleDirectoryReader,ServiceContext
|
2 |
import torch
|
3 |
|
4 |
-
documents = SimpleDirectoryReader("SansarChat").load_data()
|
5 |
|
6 |
"""New sectiond"""
|
7 |
|
@@ -18,7 +18,7 @@ model_url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/
|
|
18 |
|
19 |
llm = LlamaCPP(
|
20 |
# optionally, you can set the path to a pre-downloaded model instead of model_url
|
21 |
-
model_path="
|
22 |
temperature=0.1,
|
23 |
max_new_tokens=256,
|
24 |
# llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room
|
|
|
1 |
from llama_index.core import VectorStoreIndex,SimpleDirectoryReader,ServiceContext
|
2 |
import torch
|
3 |
|
4 |
+
documents = SimpleDirectoryReader("/SansarChat").load_data()
|
5 |
|
6 |
"""New sectiond"""
|
7 |
|
|
|
18 |
|
19 |
llm = LlamaCPP(
|
20 |
# optionally, you can set the path to a pre-downloaded model instead of model_url
|
21 |
+
model_path="LLM.gguf",
|
22 |
temperature=0.1,
|
23 |
max_new_tokens=256,
|
24 |
# llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room
|