File size: 1,672 Bytes
afe6333 57b5b5a ca73534 afe6333 6f793f1 afe6333 ab0f9ed afe6333 6f793f1 afe6333 6f793f1 afe6333 ea9dc6f afe6333 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import os
from langchain_huggingface import HuggingFaceEndpoint
from langchain_core.runnables import RunnablePassthrough
import app.schemas as schemas
from app.prompts import (
raw_prompt,
raw_prompt_formatted,
history_prompt_formatted,
format_context,
tokenizer
)
from app.data_indexing import DataIndexer
data_indexer = DataIndexer()
llm = HuggingFaceEndpoint(
repo_id="meta-llama/Meta-Llama-3-8B-Instruct",
huggingfacehub_api_token=os.environ['HF_TOKEN'],
max_new_tokens=512,
stop_sequences=[tokenizer.eos_token],
streaming=True,
)
simple_chain = (raw_prompt | llm).with_types(input_type=schemas.UserQuestion)
# TODO: create formatted_chain by piping raw_prompt_formatted and the LLM endpoint.
formatted_chain = (raw_prompt_formatted | llm).with_types(input_type=schemas.UserQuestion)
# TODO: use history_prompt_formatted and HistoryInput to create the history_chain
history_chain = (history_prompt_formatted | llm).with_types(input_type=schemas.HistoryInput)
# TODO: Let's construct the standalone_chain by piping standalone_prompt_formatted with the LLM
standalone_chain = None
# input_1 = RunnablePassthrough.assign(new_question=standalone_chain)
# input_2 = {
# 'context': lambda x: format_context(data_indexer.search(x['new_question'])),
# 'standalone_question': lambda x: x['new_question']
# }
# input_to_rag_chain = input_1 | input_2
# TODO: use input_to_rag_chain, rag_prompt_formatted,
# HistoryInput and the LLM to build the rag_chain.
rag_chain = None
# TODO: Implement the filtered_rag_chain. It should be the
# same as the rag_chain but with hybrid_search = True.
filtered_rag_chain = None
|