Omar Solano commited on
Commit
1281058
Β·
1 Parent(s): 069addf
Files changed (2) hide show
  1. scripts/custom_retriever.py +1 -3
  2. scripts/gradio-ui.py +16 -17
scripts/custom_retriever.py CHANGED
@@ -26,13 +26,11 @@ class CustomRetriever(BaseRetriever):
26
  def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
27
  """Retrieve nodes given query."""
28
 
29
- logger.info(f"Retrieving nodes for query: {query_bundle}")
30
-
31
  # LlamaIndex adds "\ninput is " to the query string
32
  query_bundle.query_str = query_bundle.query_str.replace("\ninput is ", "")
33
  query_bundle.query_str = query_bundle.query_str.rstrip()
34
 
35
- logger.info(f"Query: {query_bundle.query_str}")
36
 
37
  nodes = self._vector_retriever.retrieve(query_bundle)
38
 
 
26
  def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
27
  """Retrieve nodes given query."""
28
 
 
 
29
  # LlamaIndex adds "\ninput is " to the query string
30
  query_bundle.query_str = query_bundle.query_str.replace("\ninput is ", "")
31
  query_bundle.query_str = query_bundle.query_str.rstrip()
32
 
33
+ logger.info(f"Retrieving nodes for query: {query_bundle}")
34
 
35
  nodes = self._vector_retriever.retrieve(query_bundle)
36
 
scripts/gradio-ui.py CHANGED
@@ -1,4 +1,3 @@
1
- import json
2
  import logging
3
  import os
4
  import pickle
@@ -12,11 +11,12 @@ from dotenv import load_dotenv
12
  from llama_index.agent.openai import OpenAIAgent
13
  from llama_index.core import VectorStoreIndex, get_response_synthesizer
14
  from llama_index.core.agent import AgentRunner, ReActAgent
15
- from llama_index.core.chat_engine import (
16
- CondensePlusContextChatEngine,
17
- CondenseQuestionChatEngine,
18
- ContextChatEngine,
19
- )
 
20
  from llama_index.core.data_structs import Node
21
  from llama_index.core.memory import ChatMemoryBuffer
22
  from llama_index.core.node_parser import SentenceSplitter
@@ -228,18 +228,17 @@ def generate_completion(
228
  else:
229
  llm = OpenAI(temperature=1, model=model, max_tokens=None)
230
 
231
- response_synthesizer = get_response_synthesizer(
232
- llm=llm,
233
- response_mode="simple_summarize",
234
- text_qa_template=TEXT_QA_TEMPLATE,
235
- streaming=True,
236
- )
237
 
238
- # completion = response_synthesizer.synthesize(query, nodes=nodes_context)
239
- custom_query_engine = RetrieverQueryEngine(
240
- retriever=custom_retriever,
241
- response_synthesizer=response_synthesizer,
242
- )
243
 
244
  # agent = CondensePlusContextChatEngine.from_defaults(
245
  # agent = CondenseQuestionChatEngine.from_defaults(
 
 
1
  import logging
2
  import os
3
  import pickle
 
11
  from llama_index.agent.openai import OpenAIAgent
12
  from llama_index.core import VectorStoreIndex, get_response_synthesizer
13
  from llama_index.core.agent import AgentRunner, ReActAgent
14
+
15
+ # from llama_index.core.chat_engine import (
16
+ # CondensePlusContextChatEngine,
17
+ # CondenseQuestionChatEngine,
18
+ # ContextChatEngine,
19
+ # )
20
  from llama_index.core.data_structs import Node
21
  from llama_index.core.memory import ChatMemoryBuffer
22
  from llama_index.core.node_parser import SentenceSplitter
 
228
  else:
229
  llm = OpenAI(temperature=1, model=model, max_tokens=None)
230
 
231
+ # response_synthesizer = get_response_synthesizer(
232
+ # llm=llm,
233
+ # response_mode="simple_summarize",
234
+ # text_qa_template=TEXT_QA_TEMPLATE,
235
+ # streaming=True,
236
+ # )
237
 
238
+ # custom_query_engine = RetrieverQueryEngine(
239
+ # retriever=custom_retriever,
240
+ # response_synthesizer=response_synthesizer,
241
+ # )
 
242
 
243
  # agent = CondensePlusContextChatEngine.from_defaults(
244
  # agent = CondenseQuestionChatEngine.from_defaults(