mitulagr2 commited on
Commit
acf6d8f
·
1 Parent(s): 2eec007

Update to qwen:1.8b

Browse files
Files changed (2) hide show
  1. app/rag.py +5 -4
  2. start_service.sh +1 -1
app/rag.py CHANGED
@@ -15,14 +15,15 @@ class ChatPDF:
15
  chain = None
16
 
17
  def __init__(self):
18
- self.model = ChatOllama(model="llama3")
19
  self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=64)
20
  self.prompt = PromptTemplate.from_template(
21
  """
22
- <s> [INST] Use the context to answer the question.[/INST] </s>
23
- [INST] Question: {question}
 
24
  Context: {context}
25
- Answer: [/INST]
26
  """
27
  )
28
 
 
15
  chain = None
16
 
17
  def __init__(self):
18
+ self.model = ChatOllama(model="qwen:1.8b")
19
  self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=64)
20
  self.prompt = PromptTemplate.from_template(
21
  """
22
+ You are an assistant for question-answering tasks. Use the following pieces of context
23
+ to answer the question. If you don't know the answer, just say that you don't know.
24
+ Question: {question}
25
  Context: {context}
26
+ Answer:
27
  """
28
  )
29
 
start_service.sh CHANGED
@@ -7,7 +7,7 @@ ollama serve &
7
  sleep 5
8
 
9
  # Pull and run <YOUR_MODEL_NAME>
10
- ollama pull llama3
11
 
12
  #
13
  fastapi run /code/app/main.py --port 7860
 
7
  sleep 5
8
 
9
  # Pull and run <YOUR_MODEL_NAME>
10
+ ollama pull qwen:1.8b
11
 
12
  #
13
  fastapi run /code/app/main.py --port 7860