prompt
stringlengths 43
25.9k
| completion
stringlengths 7
362
| api
stringlengths 18
90
|
---|---|---|
from langchain_community.llms import DeepSparse
llm = | DeepSparse(
model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none"
) | langchain_community.llms.DeepSparse |
from langchain.prompts import PromptTemplate
prompt = (
PromptTemplate.from_template("Tell me a joke about {topic}")
+ ", make it funny"
+ "\n\nand in {language}"
)
prompt
prompt.format(topic="sports", language="spanish")
from langchain.chains import LLMChain
from langchain_openai import ChatOpenAI
model = ChatOpenAI()
chain = LLMChain(llm=model, prompt=prompt)
chain.run(topic="sports", language="spanish")
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
prompt = SystemMessage(content="You are a nice pirate")
new_prompt = (
prompt + HumanMessage(content="hi") + AIMessage(content="what?") + "{input}"
)
new_prompt.format_messages(input="i said hi")
from langchain.chains import LLMChain
from langchain_openai import ChatOpenAI
model = | ChatOpenAI() | langchain_openai.ChatOpenAI |
from langchain.chains import HypotheticalDocumentEmbedder, LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI, OpenAIEmbeddings
base_embeddings = OpenAIEmbeddings()
llm = OpenAI()
embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, "web_search")
result = embeddings.embed_query("Where is the Taj Mahal?")
multi_llm = | OpenAI(n=4, best_of=4) | langchain_openai.OpenAI |
from langchain.evaluation import load_evaluator
evaluator = | load_evaluator("embedding_distance") | langchain.evaluation.load_evaluator |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2')
import os
from langchain_community.llms import HuggingFaceTextGenInference
ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>"
HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
llm = HuggingFaceTextGenInference(
inference_server_url=ENDPOINT_URL,
max_new_tokens=512,
top_k=50,
temperature=0.1,
repetition_penalty=1.03,
server_kwargs={
"headers": {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json",
}
},
)
from langchain_community.llms import HuggingFaceEndpoint
ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>"
llm = HuggingFaceEndpoint(
endpoint_url=ENDPOINT_URL,
task="text-generation",
model_kwargs={
"max_new_tokens": 512,
"top_k": 50,
"temperature": 0.1,
"repetition_penalty": 1.03,
},
)
from langchain_community.llms import HuggingFaceHub
llm = HuggingFaceHub(
repo_id="HuggingFaceH4/zephyr-7b-beta",
task="text-generation",
model_kwargs={
"max_new_tokens": 512,
"top_k": 30,
"temperature": 0.1,
"repetition_penalty": 1.03,
},
)
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_community.chat_models.huggingface import ChatHuggingFace
messages = [
SystemMessage(content="You're a helpful assistant"),
HumanMessage(
content="What happens when an unstoppable force meets an immovable object?"
),
]
chat_model = ChatHuggingFace(llm=llm)
chat_model.model_id
chat_model._to_chat_prompt(messages)
res = chat_model.invoke(messages)
print(res.content)
from langchain import hub
from langchain.agents import AgentExecutor, load_tools
from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import (
ReActJsonSingleInputOutputParser,
)
from langchain.tools.render import render_text_description
from langchain_community.utilities import SerpAPIWrapper
tools = | load_tools(["serpapi", "llm-math"], llm=llm) | langchain.agents.load_tools |
get_ipython().run_line_magic('pip', 'install --quiet pypdf chromadb tiktoken openai')
get_ipython().run_line_magic('pip', 'uninstall -y langchain-fireworks')
get_ipython().run_line_magic('pip', 'install --editable /mnt/disks/data/langchain/libs/partners/fireworks')
import fireworks
print(fireworks)
import fireworks.client
import requests
from langchain_community.document_loaders import PyPDFLoader
url = "https://storage.googleapis.com/deepmind-media/gemma/gemma-report.pdf"
response = requests.get(url, stream=True)
file_name = "temp_file.pdf"
with open(file_name, "wb") as pdf:
pdf.write(response.content)
loader = PyPDFLoader(file_name)
data = loader.load()
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
from langchain_community.vectorstores import Chroma
from langchain_fireworks.embeddings import FireworksEmbeddings
vectorstore = Chroma.from_documents(
documents=all_splits,
collection_name="rag-chroma",
embedding=FireworksEmbeddings(),
)
retriever = vectorstore.as_retriever()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
from langchain_together import Together
llm = Together(
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
temperature=0.0,
max_tokens=2000,
top_k=1,
)
chain = (
RunnableParallel({"context": retriever, "question": | RunnablePassthrough() | langchain_core.runnables.RunnablePassthrough |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy')
from langchain_community.embeddings.spacy_embeddings import SpacyEmbeddings
embedder = | SpacyEmbeddings(model_name="en_core_web_sm") | langchain_community.embeddings.spacy_embeddings.SpacyEmbeddings |
get_ipython().run_line_magic('pip', "install --upgrade --quiet langchain-openai 'deeplake[enterprise]' tiktoken")
from langchain_community.vectorstores import DeepLake
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
activeloop_token = getpass.getpass("activeloop token:")
embeddings = OpenAIEmbeddings()
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, overwrite=True)
db.add_documents(docs)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
db = DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, read_only=True)
docs = db.similarity_search(query)
from langchain.chains import RetrievalQA
from langchain_openai import OpenAIChat
qa = RetrievalQA.from_chain_type(
llm=OpenAIChat(model="gpt-3.5-turbo"),
chain_type="stuff",
retriever=db.as_retriever(),
)
query = "What did the president say about Ketanji Brown Jackson"
qa.run(query)
import random
for d in docs:
d.metadata["year"] = random.randint(2012, 2014)
db = DeepLake.from_documents(
docs, embeddings, dataset_path="./my_deeplake/", overwrite=True
)
db.similarity_search(
"What did the president say about Ketanji Brown Jackson",
filter={"metadata": {"year": 2013}},
)
db.similarity_search(
"What did the president say about Ketanji Brown Jackson?", distance_metric="cos"
)
db.max_marginal_relevance_search(
"What did the president say about Ketanji Brown Jackson?"
)
db.delete_dataset()
DeepLake.force_delete_by_path("./my_deeplake")
os.environ["ACTIVELOOP_TOKEN"] = activeloop_token
username = "<USERNAME_OR_ORG>" # your username on app.activeloop.ai
dataset_path = f"hub://{username}/langchain_testing_python" # could be also ./local/path (much faster locally), s3://bucket/path/to/dataset, gcs://path/to/dataset, etc.
docs = text_splitter.split_documents(documents)
embedding = OpenAIEmbeddings()
db = DeepLake(dataset_path=dataset_path, embedding=embeddings, overwrite=True)
ids = db.add_documents(docs)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
username = "<USERNAME_OR_ORG>" # your username on app.activeloop.ai
dataset_path = f"hub://{username}/langchain_testing"
docs = text_splitter.split_documents(documents)
embedding = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
from typing import List
from langchain.output_parsers import PydanticOutputParser
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
class Actor(BaseModel):
name: str = | Field(description="name of an actor") | langchain_core.pydantic_v1.Field |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_community.retrievers import WikipediaRetriever
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
]
)
prompt.pretty_print()
from operator import itemgetter
from typing import List
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
def format_docs(docs: List[Document]) -> str:
"""Convert Documents to a single string.:"""
formatted = [
f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for doc in docs
]
return "\n\n" + "\n\n".join(formatted)
format = itemgetter("docs") | RunnableLambda(format_docs)
answer = prompt | llm | StrOutputParser()
chain = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain.invoke("How fast are cheetahs?")
from langchain_core.pydantic_v1 import BaseModel, Field
class cited_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[int] = Field(
...,
description="The integer IDs of the SPECIFIC sources which justify the answer.",
)
llm_with_tool = llm.bind_tools(
[cited_answer],
tool_choice="cited_answer",
)
example_q = """What Brian's height?
Source: 1
Information: Suzy is 6'2"
Source: 2
Information: Jeremiah is blonde
Source: 3
Information: Brian is 3 inches shorted than Suzy"""
llm_with_tool.invoke(example_q)
from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser
output_parser = JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True)
(llm_with_tool | output_parser).invoke(example_q)
def format_docs_with_id(docs: List[Document]) -> str:
formatted = [
f"Source ID: {i}\nArticle Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for i, doc in enumerate(docs)
]
return "\n\n" + "\n\n".join(formatted)
format_1 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_1 = prompt | llm_with_tool | output_parser
chain_1 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_1)
.assign(cited_answer=answer_1)
.pick(["cited_answer", "docs"])
)
chain_1.invoke("How fast are cheetahs?")
class Citation(BaseModel):
source_id: int = Field(
...,
description="The integer ID of a SPECIFIC source which justifies the answer.",
)
quote: str = Field(
...,
description="The VERBATIM quote from the specified source that justifies the answer.",
)
class quoted_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[Citation] = Field(
..., description="Citations from the given sources that justify the answer."
)
output_parser_2 = JsonOutputKeyToolsParser(key_name="quoted_answer", return_single=True)
llm_with_tool_2 = llm.bind_tools(
[quoted_answer],
tool_choice="quoted_answer",
)
format_2 = itemgetter("docs") | | RunnableLambda(format_docs_with_id) | langchain_core.runnables.RunnableLambda |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from typing import Iterator, List
from langchain.prompts.chat import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
"Write a comma-separated list of 5 animals similar to: {animal}"
)
model = ChatOpenAI(temperature=0.0)
str_chain = prompt | model | | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet clearml')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet textstat')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy')
get_ipython().system('python -m spacy download en_core_web_sm')
import os
os.environ["CLEARML_API_ACCESS_KEY"] = ""
os.environ["CLEARML_API_SECRET_KEY"] = ""
os.environ["OPENAI_API_KEY"] = ""
os.environ["SERPAPI_API_KEY"] = ""
from langchain.callbacks import ClearMLCallbackHandler
from langchain.callbacks import StdOutCallbackHandler
from langchain_openai import OpenAI
clearml_callback = ClearMLCallbackHandler(
task_type="inference",
project_name="langchain_callback_demo",
task_name="llm",
tags=["test"],
visualize=True,
complexity_metrics=True,
stream_logs=True,
)
callbacks = [StdOutCallbackHandler(), clearml_callback]
llm = | OpenAI(temperature=0, callbacks=callbacks) | langchain_openai.OpenAI |
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
import dspy
colbertv2 = dspy.ColBERTv2(url="http://20.102.90.50:2017/wiki17_abstracts")
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
from langchain_openai import OpenAI
set_llm_cache(SQLiteCache(database_path="cache.db"))
llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)
def retrieve(inputs):
return [doc["text"] for doc in colbertv2(inputs["question"], k=5)]
colbertv2("cycling")
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnablePassthrough
prompt = PromptTemplate.from_template(
"Given {context}, answer the question `{question}` as a tweet."
)
vanilla_chain = (
RunnablePassthrough.assign(context=retrieve) | prompt | llm | | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
from langchain.evaluation import ExactMatchStringEvaluator
evaluator = ExactMatchStringEvaluator()
from langchain.evaluation import load_evaluator
evaluator = | load_evaluator("exact_match") | langchain.evaluation.load_evaluator |
get_ipython().system(' docker run -d -p 8123:8123 -p9000:9000 --name langchain-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:23.4.2.11')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet clickhouse-connect')
import getpass
import os
if not os.environ["OPENAI_API_KEY"]:
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.vectorstores import Clickhouse, ClickhouseSettings
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
for d in docs:
d.metadata = {"some": "metadata"}
settings = ClickhouseSettings(table="clickhouse_vector_search_example")
docsearch = Clickhouse.from_documents(docs, embeddings, config=settings)
query = "What did the president say about Ketanji Brown Jackson"
docs = docsearch.similarity_search(query)
print(docs[0].page_content)
print(str(docsearch))
print(f"Clickhouse Table DDL:\n\n{docsearch.schema}")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Clickhouse, ClickhouseSettings
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
meals = [
"Beef Enchiladas with Feta cheese. Mexican-Greek fusion",
"Chicken Flatbreads with red sauce. Italian-Mexican fusion",
"Veggie sweet potato quesadillas with vegan cheese",
"One-Pan Tortelonni bake with peppers and onions",
]
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-instruct")
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}".
Embed the meal into the given text: "{text_to_personalize}".
Prepend a personalized message including the user's name "{user}"
and their preference "{preference}".
Make it sound good.
"""
PROMPT = PromptTemplate(
input_variables=["meal", "text_to_personalize", "user", "preference"],
template=PROMPT_TEMPLATE,
)
import langchain_experimental.rl_chain as rl_chain
chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs \
believe you will love it!",
)
print(response["response"])
for _ in range(5):
try:
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference= | rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]) | langchain_experimental.rl_chain.BasedOn |
from langchain_community.llms.fake import FakeListLLM
from langchain.agents import AgentType, initialize_agent, load_tools
tools = load_tools(["python_repl"])
responses = ["Action: Python REPL\nAction Input: print(2 + 2)", "Final Answer: 4"]
llm = | FakeListLLM(responses=responses) | langchain_community.llms.fake.FakeListLLM |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet opaqueprompts langchain')
import os
os.environ["OPAQUEPROMPTS_API_KEY"] = "<OPAQUEPROMPTS_API_KEY>"
os.environ["OPENAI_API_KEY"] = "<OPENAI_API_KEY>"
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.globals import set_debug, set_verbose
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from langchain_community.llms import OpaquePrompts
from langchain_openai import OpenAI
set_debug(True)
set_verbose(True)
prompt_template = """
As an AI assistant, you will answer questions according to given context.
Sensitive personal information in the question is masked for privacy.
For instance, if the original text says "Giana is good," it will be changed
to "PERSON_998 is good."
Here's how to handle these changes:
* Consider these masked phrases just as placeholders, but still refer to
them in a relevant way when answering.
* It's possible that different masked terms might mean the same thing.
Stick with the given term and don't modify it.
* All masked terms follow the "TYPE_ID" pattern.
* Please don't invent new masked terms. For instance, if you see "PERSON_998,"
don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question.
Conversation History: ```{history}```
Context : ```During our recent meeting on February 23, 2023, at 10:30 AM,
John Doe provided me with his personal details. His email is [email protected]
and his contact number is 650-456-7890. He lives in New York City, USA, and
belongs to the American nationality with Christian beliefs and a leaning towards
the Democratic party. He mentioned that he recently made a transaction using his
credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address
1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he noted
down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided his website
as https://johndoeportfolio.com. John also discussed some of his US-specific details.
He said his bank account number is 1234567890123456 and his drivers license is Y12345678.
His ITIN is 987-65-4321, and he recently renewed his passport, the number for which is
123456789. He emphasized not to share his SSN, which is 123-45-6789. Furthermore, he
mentioned that he accesses his work files remotely through the IP 192.168.1.1 and has
a medical license number MED-123456. ```
Question: ```{question}```
"""
chain = LLMChain(
prompt=PromptTemplate.from_template(prompt_template),
llm=OpaquePrompts(base_llm=OpenAI()),
memory=ConversationBufferWindowMemory(k=2),
verbose=True,
)
print(
chain.run(
{
"question": """Write a message to remind John to do password reset for his website to stay secure."""
},
callbacks=[StdOutCallbackHandler()],
)
)
import langchain_community.utilities.opaqueprompts as op
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
prompt = (PromptTemplate.from_template(prompt_template),)
llm = OpenAI()
pg_chain = (
op.sanitize
| RunnablePassthrough.assign(
response=(lambda x: x["sanitized_input"]) | prompt | llm | StrOutputParser(),
)
| (lambda x: | op.desanitize(x["response"], x["secure_context"]) | langchain_community.utilities.opaqueprompts.desanitize |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet GitPython')
from git import Repo
repo = Repo.clone_from(
"https://github.com/langchain-ai/langchain", to_path="./example_data/test_repo1"
)
branch = repo.head.reference
from langchain_community.document_loaders import GitLoader
loader = | GitLoader(repo_path="./example_data/test_repo1/", branch=branch) | langchain_community.document_loaders.GitLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet apify-client langchain-openai langchain chromadb tiktoken')
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders.base import Document
from langchain_community.utilities import ApifyWrapper
import os
os.environ["OPENAI_API_KEY"] = "Your OpenAI API key"
os.environ["APIFY_API_TOKEN"] = "Your Apify API token"
apify = | ApifyWrapper() | langchain_community.utilities.ApifyWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai argilla')
import os
os.environ["ARGILLA_API_URL"] = "..."
os.environ["ARGILLA_API_KEY"] = "..."
os.environ["OPENAI_API_KEY"] = "..."
import argilla as rg
from packaging.version import parse as parse_version
if parse_version(rg.__version__) < parse_version("1.8.0"):
raise RuntimeError(
"`FeedbackDataset` is only available in Argilla v1.8.0 or higher, please "
"upgrade `argilla` as `pip install argilla --upgrade`."
)
dataset = rg.FeedbackDataset(
fields=[
rg.TextField(name="prompt"),
rg.TextField(name="response"),
],
questions=[
rg.RatingQuestion(
name="response-rating",
description="How would you rate the quality of the response?",
values=[1, 2, 3, 4, 5],
required=True,
),
rg.TextQuestion(
name="response-feedback",
description="What feedback do you have for the response?",
required=False,
),
],
guidelines="You're asked to rate the quality of the response and provide feedback.",
)
rg.init(
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
dataset.push_to_argilla("langchain-dataset")
from langchain.callbacks import ArgillaCallbackHandler
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
callbacks = [StdOutCallbackHandler(), argilla_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks)
llm.generate(["Tell me a joke", "Tell me a poem"] * 3)
from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
callbacks = [StdOutCallbackHandler(), argilla_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks)
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
prompt_template = PromptTemplate(input_variables=["title"], template=template)
synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=callbacks)
test_prompts = [{"title": "Documentary about Bigfoot in Paris"}]
synopsis_chain.apply(test_prompts)
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
callbacks = [StdOutCallbackHandler(), argilla_callback]
llm = | OpenAI(temperature=0.9, callbacks=callbacks) | langchain_openai.OpenAI |
get_ipython().system('pip3 install tcvectordb')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.fake import FakeEmbeddings
from langchain_community.vectorstores import TencentVectorDB
from langchain_community.vectorstores.tencentvectordb import ConnectionParams
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = FakeEmbeddings(size=128)
conn_params = ConnectionParams(
url="http://10.0.X.X",
key="eC4bLRy2va******************************",
username="root",
timeout=20,
)
vector_db = TencentVectorDB.from_documents(
docs,
embeddings,
connection_params=conn_params,
)
query = "What did the president say about Ketanji Brown Jackson"
docs = vector_db.similarity_search(query)
docs[0].page_content
vector_db = | TencentVectorDB(embeddings, conn_params) | langchain_community.vectorstores.TencentVectorDB |
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import RedisChatMessageHistory
from langchain_community.utilities import GoogleSearchAPIWrapper
from langchain_openai import OpenAI
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
)
]
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
message_history = RedisChatMessageHistory(
url="redis://localhost:6379/0", ttl=600, session_id="my-session"
)
memory = ConversationBufferMemory(
memory_key="chat_history", chat_memory=message_history
)
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=memory
)
agent_chain.run(input="How many people live in canada?")
agent_chain.run(input="what is their national anthem called?")
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools, prefix=prefix, suffix=suffix, input_variables=["input", "agent_scratchpad"]
)
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = | ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True) | langchain.agents.ZeroShotAgent |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pygithub')
import os
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit
from langchain_community.utilities.github import GitHubAPIWrapper
from langchain_openai import ChatOpenAI
os.environ["GITHUB_APP_ID"] = "123456"
os.environ["GITHUB_APP_PRIVATE_KEY"] = "path/to/your/private-key.pem"
os.environ["GITHUB_REPOSITORY"] = "username/repo-name"
os.environ["GITHUB_BRANCH"] = "bot-branch-name"
os.environ["GITHUB_BASE_BRANCH"] = "main"
os.environ["OPENAI_API_KEY"] = ""
llm = ChatOpenAI(temperature=0, model="gpt-4-1106-preview")
github = GitHubAPIWrapper()
toolkit = GitHubToolkit.from_github_api_wrapper(github)
tools = toolkit.get_tools()
agent = initialize_agent(
tools,
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
print("Available tools:")
for tool in tools:
print("\t" + tool.name)
agent.run(
"You have the software engineering capabilities of a Google Principle engineer. You are tasked with completing issues on a github repository. Please look at the existing issues and complete them."
)
from langchain import hub
gh_issue_prompt_template = hub.pull("kastanday/new-github-issue")
print(gh_issue_prompt_template.template)
def format_issue(issue):
title = f"Title: {issue.get('title')}."
opened_by = f"Opened by user: {issue.get('opened_by')}"
body = f"Body: {issue.get('body')}"
comments = issue.get("comments") # often too long
return "\n".join([title, opened_by, body])
issue = github.get_issue(33) # task to implement a RNA-seq pipeline (bioinformatics)
final_gh_issue_prompt = gh_issue_prompt_template.format(
issue_description=format_issue(issue)
)
print(final_gh_issue_prompt)
from langchain.memory.summary_buffer import ConversationSummaryBufferMemory
from langchain_core.prompts.chat import MessagesPlaceholder
summarizer_llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # type: ignore
chat_history = MessagesPlaceholder(variable_name="chat_history")
memory = ConversationSummaryBufferMemory(
memory_key="chat_history",
return_messages=True,
llm=summarizer_llm,
max_token_limit=2_000,
)
agent = initialize_agent(
tools,
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True, # or pass a function that accepts the error and returns a string
max_iterations=30,
max_execution_time=None,
early_stopping_method="generate",
memory=memory,
agent_kwargs={
"memory_prompts": [chat_history],
"input_variables": ["input", "agent_scratchpad", "chat_history"],
"prefix": final_gh_issue_prompt,
},
)
from langchain_core.tracers.context import tracing_v2_enabled
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = "ls__......"
os.environ["LANGCHAIN_PROJECT"] = "Github_Demo_PR"
os.environ["LANGCHAIN_WANDB_TRACING"] = "false"
with | tracing_v2_enabled(project_name="Github_Demo_PR", tags=["PR_bot"]) | langchain_core.tracers.context.tracing_v2_enabled |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet semanticscholar')
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_openai import ChatOpenAI
instructions = """You are an expert researcher."""
base_prompt = hub.pull("langchain-ai/openai-functions-template")
prompt = base_prompt.partial(instructions=instructions)
llm = ChatOpenAI(temperature=0)
from langchain_community.tools.semanticscholar.tool import SemanticScholarQueryRun
tools = [SemanticScholarQueryRun()]
agent = | create_openai_functions_agent(llm, tools, prompt) | langchain.agents.create_openai_functions_agent |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet yfinance')
import os
os.environ["OPENAI_API_KEY"] = "..."
from langchain.agents import AgentType, initialize_agent
from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.0)
tools = [YahooFinanceNewsTool()]
agent_chain = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
agent_chain.run(
"What happens today with Microsoft stocks?",
)
agent_chain.run(
"How does Microsoft feels today comparing with Nvidia?",
)
tool = | YahooFinanceNewsTool() | langchain_community.tools.yahoo_finance_news.YahooFinanceNewsTool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymysql')
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import (
DirectoryLoader,
UnstructuredMarkdownLoader,
)
from langchain_community.vectorstores import StarRocks
from langchain_community.vectorstores.starrocks import StarRocksSettings
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import TokenTextSplitter
update_vectordb = False
loader = DirectoryLoader(
"./docs", glob="**/*.md", loader_cls=UnstructuredMarkdownLoader
)
documents = loader.load()
text_splitter = TokenTextSplitter(chunk_size=400, chunk_overlap=50)
split_docs = text_splitter.split_documents(documents)
update_vectordb = True
split_docs[-20]
print("# docs = %d, # splits = %d" % (len(documents), len(split_docs)))
def gen_starrocks(update_vectordb, embeddings, settings):
if update_vectordb:
docsearch = | StarRocks.from_documents(split_docs, embeddings, config=settings) | langchain_community.vectorstores.StarRocks.from_documents |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-translate')
from langchain_community.document_transformers import GoogleTranslateTransformer
from langchain_core.documents import Document
sample_text = """[Generated with Google Bard]
Subject: Key Business Process Updates
Date: Friday, 27 October 2023
Dear team,
I am writing to provide an update on some of our key business processes.
Sales process
We have recently implemented a new sales process that is designed to help us close more deals and grow our revenue. The new process includes a more rigorous qualification process, a more streamlined proposal process, and a more effective customer relationship management (CRM) system.
Marketing process
We have also revamped our marketing process to focus on creating more targeted and engaging content. We are also using more social media and paid advertising to reach a wider audience.
Customer service process
We have also made some improvements to our customer service process. We have implemented a new customer support system that makes it easier for customers to get help with their problems. We have also hired more customer support representatives to reduce wait times.
Overall, we are very pleased with the progress we have made on improving our key business processes. We believe that these changes will help us to achieve our goals of growing our business and providing our customers with the best possible experience.
If you have any questions or feedback about any of these changes, please feel free to contact me directly.
Thank you,
Lewis Cymbal
CEO, Cymbal Bank
"""
documents = [ | Document(page_content=sample_text) | langchain_core.documents.Document |
from langchain_community.document_loaders import NewsURLLoader
urls = [
"https://www.bbc.com/news/world-us-canada-66388172",
"https://www.bbc.com/news/entertainment-arts-66384971",
]
loader = | NewsURLLoader(urls=urls) | langchain_community.document_loaders.NewsURLLoader |
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt", encoding="utf-8")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, text in enumerate(texts):
text.metadata["source"] = f"{i}-pl"
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings)
from langchain.chains import create_qa_with_sources_chain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
qa_chain = create_qa_with_sources_chain(llm)
doc_prompt = PromptTemplate(
template="Content: {page_content}\nSource: {source}",
input_variables=["page_content", "source"],
)
final_qa_chain = StuffDocumentsChain(
llm_chain=qa_chain,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain
)
query = "What did the president say about russia"
retrieval_qa.run(query)
qa_chain_pydantic = create_qa_with_sources_chain(llm, output_parser="pydantic")
final_qa_chain_pydantic = StuffDocumentsChain(
llm_chain=qa_chain_pydantic,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa_pydantic = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain_pydantic
)
retrieval_qa_pydantic.run(query)
from langchain.chains import ConversationalRetrievalChain, LLMChain
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\
Make sure to avoid using any unclear pronouns.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
condense_question_chain = LLMChain(
llm=llm,
prompt=CONDENSE_QUESTION_PROMPT,
)
qa = ConversationalRetrievalChain(
question_generator=condense_question_chain,
retriever=docsearch.as_retriever(),
memory=memory,
combine_docs_chain=final_qa_chain,
)
query = "What did the president say about Ketanji Brown Jackson"
result = qa({"question": query})
result
query = "what did he say about her predecessor?"
result = qa({"question": query})
result
from typing import List
from langchain.chains.openai_functions import create_qa_with_structure_chain
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.messages import HumanMessage, SystemMessage
from pydantic import BaseModel, Field
class CustomResponseSchema(BaseModel):
"""An answer to the question being asked, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
countries_referenced: List[str] = Field(
..., description="All of the countries mentioned in the sources"
)
sources: List[str] = Field(
..., description="List of sources used to answer the question"
)
prompt_messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(
content="Tips: Make sure to answer in the correct format. Return all of the countries mentioned in the sources in uppercase characters."
),
]
chain_prompt = | ChatPromptTemplate(messages=prompt_messages) | langchain.prompts.chat.ChatPromptTemplate |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sagemaker')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
os.environ["OPENAI_API_KEY"] = "<ADD-KEY-HERE>"
os.environ["SERPAPI_API_KEY"] = "<ADD-KEY-HERE>"
from langchain.agents import initialize_agent, load_tools
from langchain.callbacks import SageMakerCallbackHandler
from langchain.chains import LLMChain, SimpleSequentialChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from sagemaker.analytics import ExperimentAnalytics
from sagemaker.experiments.run import Run
from sagemaker.session import Session
HPARAMS = {
"temperature": 0.1,
"model_name": "gpt-3.5-turbo-instruct",
}
BUCKET_NAME = None
EXPERIMENT_NAME = "langchain-sagemaker-tracker"
session = Session(default_bucket=BUCKET_NAME)
RUN_NAME = "run-scenario-1"
PROMPT_TEMPLATE = "tell me a joke about {topic}"
INPUT_VARIABLES = {"topic": "fish"}
with Run(
experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session
) as run:
sagemaker_callback = SageMakerCallbackHandler(run)
llm = OpenAI(callbacks=[sagemaker_callback], **HPARAMS)
prompt = | PromptTemplate.from_template(template=PROMPT_TEMPLATE) | langchain.prompts.PromptTemplate.from_template |
import warnings
warnings.filterwarnings("ignore")
import os
from langchain.agents.agent_toolkits import create_conversational_retrieval_agent
from langchain_community.agent_toolkits import CogniswitchToolkit
from langchain_openai import ChatOpenAI
cs_token = "Your CogniSwitch token"
OAI_token = "Your OpenAI API token"
oauth_token = "Your CogniSwitch authentication token"
os.environ["OPENAI_API_KEY"] = OAI_token
cogniswitch_toolkit = CogniswitchToolkit(
cs_token=cs_token, OAI_token=OAI_token, apiKey=oauth_token
)
tool_lst = cogniswitch_toolkit.get_tools()
llm = ChatOpenAI(
temperature=0,
openai_api_key=OAI_token,
max_tokens=1500,
model_name="gpt-3.5-turbo-0613",
)
agent_executor = | create_conversational_retrieval_agent(llm, tool_lst, verbose=False) | langchain.agents.agent_toolkits.create_conversational_retrieval_agent |
import getpass
import os
os.environ["TAVILY_API_KEY"] = getpass.getpass()
from langchain.retrievers.tavily_search_api import TavilySearchAPIRetriever
retriever = | TavilySearchAPIRetriever(k=3) | langchain.retrievers.tavily_search_api.TavilySearchAPIRetriever |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0).configurable_fields(
temperature=ConfigurableField(
id="llm_temperature",
name="LLM Temperature",
description="The temperature of the LLM",
)
)
model.invoke("pick a random number")
model.with_config(configurable={"llm_temperature": 0.9}).invoke("pick a random number")
prompt = PromptTemplate.from_template("Pick a random number above {x}")
chain = prompt | model
chain.invoke({"x": 0})
chain.with_config(configurable={"llm_temperature": 0.9}).invoke({"x": 0})
from langchain.runnables.hub import HubRunnable
prompt = HubRunnable("rlm/rag-prompt").configurable_fields(
owner_repo_commit=ConfigurableField(
id="hub_commit",
name="Hub Commit",
description="The Hub commit to pull from",
)
)
prompt.invoke({"question": "foo", "context": "bar"})
prompt.with_config(configurable={"hub_commit": "rlm/rag-prompt-llama"}).invoke(
{"question": "foo", "context": "bar"}
)
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatAnthropic
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
llm = ChatAnthropic(temperature=0).configurable_alternatives(
ConfigurableField(id="llm"),
default_key="anthropic",
openai=ChatOpenAI(),
gpt4=ChatOpenAI(model="gpt-4"),
)
prompt = PromptTemplate.from_template("Tell me a joke about {topic}")
chain = prompt | llm
chain.invoke({"topic": "bears"})
chain.with_config(configurable={"llm": "openai"}).invoke({"topic": "bears"})
chain.with_config(configurable={"llm": "anthropic"}).invoke({"topic": "bears"})
llm = ChatAnthropic(temperature=0)
prompt = PromptTemplate.from_template(
"Tell me a joke about {topic}"
).configurable_alternatives(
ConfigurableField(id="prompt"),
default_key="joke",
poem=PromptTemplate.from_template("Write a short poem about {topic}"),
)
chain = prompt | llm
chain.invoke({"topic": "bears"})
chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"})
llm = ChatAnthropic(temperature=0).configurable_alternatives(
ConfigurableField(id="llm"),
default_key="anthropic",
openai= | ChatOpenAI() | langchain_openai.ChatOpenAI |
get_ipython().system('pip3 install clickhouse-sqlalchemy InstructorEmbedding sentence_transformers openai langchain-experimental')
import getpass
from os import environ
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.utilities import SQLDatabase
from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain
from langchain_openai import OpenAI
from sqlalchemy import MetaData, create_engine
MYSCALE_HOST = "msc-4a9e710a.us-east-1.aws.staging.myscale.cloud"
MYSCALE_PORT = 443
MYSCALE_USER = "chatdata"
MYSCALE_PASSWORD = "myscale_rocks"
OPENAI_API_KEY = getpass.getpass("OpenAI API Key:")
engine = create_engine(
f"clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}/default?protocol=https"
)
metadata = MetaData(bind=engine)
environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
from langchain_experimental.sql.vector_sql import VectorSQLOutputParser
output_parser = VectorSQLOutputParser.from_embeddings(
model=HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs={"device": "cpu"}
)
)
from langchain.callbacks import StdOutCallbackHandler
from langchain_community.utilities.sql_database import SQLDatabase
from langchain_experimental.sql.prompt import MYSCALE_PROMPT
from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain
from langchain_openai import OpenAI
chain = VectorSQLDatabaseChain(
llm_chain=LLMChain(
llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0),
prompt=MYSCALE_PROMPT,
),
top_k=10,
return_direct=True,
sql_cmd_parser=output_parser,
database=SQLDatabase(engine, None, metadata),
)
import pandas as pd
pd.DataFrame(
chain.run(
"Please give me 10 papers to ask what is PageRank?",
callbacks=[StdOutCallbackHandler()],
)
)
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
from langchain_experimental.retrievers.vector_sql_database import (
VectorSQLDatabaseChainRetriever,
)
from langchain_experimental.sql.prompt import MYSCALE_PROMPT
from langchain_experimental.sql.vector_sql import (
VectorSQLDatabaseChain,
VectorSQLRetrieveAllOutputParser,
)
from langchain_openai import ChatOpenAI
output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings(
output_parser.model
)
chain = VectorSQLDatabaseChain.from_llm(
llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0),
prompt=MYSCALE_PROMPT,
top_k=10,
return_direct=True,
db=SQLDatabase(engine, None, metadata),
sql_cmd_parser=output_parser_retrieve_all,
native_format=True,
)
retriever = VectorSQLDatabaseChainRetriever(
sql_db_chain=chain, page_content_key="abstract"
)
document_with_metadata_prompt = PromptTemplate(
input_variables=["page_content", "id", "title", "authors", "pubdate", "categories"],
template="Content:\n\tTitle: {title}\n\tAbstract: {page_content}\n\tAuthors: {authors}\n\tDate of Publication: {pubdate}\n\tCategories: {categories}\nSOURCE: {id}",
)
chain = RetrievalQAWithSourcesChain.from_chain_type(
ChatOpenAI(
model_name="gpt-3.5-turbo-16k", openai_api_key=OPENAI_API_KEY, temperature=0.6
),
retriever=retriever,
chain_type="stuff",
chain_type_kwargs={
"document_prompt": document_with_metadata_prompt,
},
return_source_documents=True,
)
ans = chain(
"Please give me 10 papers to ask what is PageRank?",
callbacks=[ | StdOutCallbackHandler() | langchain.callbacks.StdOutCallbackHandler |
import os
import yaml
get_ipython().system('wget https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml -O openai_openapi.yaml')
get_ipython().system('wget https://www.klarna.com/us/shopping/public/openai/v0/api-docs -O klarna_openapi.yaml')
get_ipython().system('wget https://raw.githubusercontent.com/APIs-guru/openapi-directory/main/APIs/spotify.com/1.0.0/openapi.yaml -O spotify_openapi.yaml')
from langchain_community.agent_toolkits.openapi.spec import reduce_openapi_spec
with open("openai_openapi.yaml") as f:
raw_openai_api_spec = yaml.load(f, Loader=yaml.Loader)
openai_api_spec = reduce_openapi_spec(raw_openai_api_spec)
with open("klarna_openapi.yaml") as f:
raw_klarna_api_spec = yaml.load(f, Loader=yaml.Loader)
klarna_api_spec = reduce_openapi_spec(raw_klarna_api_spec)
with open("spotify_openapi.yaml") as f:
raw_spotify_api_spec = yaml.load(f, Loader=yaml.Loader)
spotify_api_spec = reduce_openapi_spec(raw_spotify_api_spec)
import spotipy.util as util
from langchain.requests import RequestsWrapper
def construct_spotify_auth_headers(raw_spec: dict):
scopes = list(
raw_spec["components"]["securitySchemes"]["oauth_2_0"]["flows"][
"authorizationCode"
]["scopes"].keys()
)
access_token = util.prompt_for_user_token(scope=",".join(scopes))
return {"Authorization": f"Bearer {access_token}"}
headers = construct_spotify_auth_headers(raw_spotify_api_spec)
requests_wrapper = | RequestsWrapper(headers=headers) | langchain.requests.RequestsWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiledb-vector-search')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import TileDB
from langchain_text_splitters import CharacterTextSplitter
raw_documents = TextLoader("../../modules/state_of_the_union.txt").load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
import os
import chromadb
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import DocumentCompressorPipeline
from langchain.retrievers.merger_retriever import MergerRetriever
from langchain_community.document_transformers import (
EmbeddingsClusteringFilter,
EmbeddingsRedundantFilter,
)
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
all_mini = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
multi_qa_mini = HuggingFaceEmbeddings(model_name="multi-qa-MiniLM-L6-dot-v1")
filter_embeddings = OpenAIEmbeddings()
ABS_PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(ABS_PATH, "db")
client_settings = chromadb.config.Settings(
is_persistent=True,
persist_directory=DB_DIR,
anonymized_telemetry=False,
)
db_all = Chroma(
collection_name="project_store_all",
persist_directory=DB_DIR,
client_settings=client_settings,
embedding_function=all_mini,
)
db_multi_qa = Chroma(
collection_name="project_store_multi",
persist_directory=DB_DIR,
client_settings=client_settings,
embedding_function=multi_qa_mini,
)
retriever_all = db_all.as_retriever(
search_type="similarity", search_kwargs={"k": 5, "include_metadata": True}
)
retriever_multi_qa = db_multi_qa.as_retriever(
search_type="mmr", search_kwargs={"k": 5, "include_metadata": True}
)
lotr = MergerRetriever(retrievers=[retriever_all, retriever_multi_qa])
filter = EmbeddingsRedundantFilter(embeddings=filter_embeddings)
pipeline = DocumentCompressorPipeline(transformers=[filter])
compression_retriever = ContextualCompressionRetriever(
base_compressor=pipeline, base_retriever=lotr
)
filter_ordered_cluster = EmbeddingsClusteringFilter(
embeddings=filter_embeddings,
num_clusters=10,
num_closest=1,
)
filter_ordered_by_retriever = EmbeddingsClusteringFilter(
embeddings=filter_embeddings,
num_clusters=10,
num_closest=1,
sorted=True,
)
pipeline = DocumentCompressorPipeline(transformers=[filter_ordered_by_retriever])
compression_retriever = ContextualCompressionRetriever(
base_compressor=pipeline, base_retriever=lotr
)
from langchain_community.document_transformers import LongContextReorder
filter = EmbeddingsRedundantFilter(embeddings=filter_embeddings)
reordering = LongContextReorder()
pipeline = | DocumentCompressorPipeline(transformers=[filter, reordering]) | langchain.retrievers.document_compressors.DocumentCompressorPipeline |
import pprint
from typing import Any, Dict
import pandas as pd
from langchain.output_parsers import PandasDataFrameOutputParser
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
def format_parser_output(parser_output: Dict[str, Any]) -> None:
for key in parser_output.keys():
parser_output[key] = parser_output[key].to_dict()
return pprint.PrettyPrinter(width=4, compact=True).pprint(parser_output)
df = pd.DataFrame(
{
"num_legs": [2, 4, 8, 0],
"num_wings": [2, 0, 0, 0],
"num_specimen_seen": [10, 2, 1, 8],
}
)
parser = | PandasDataFrameOutputParser(dataframe=df) | langchain.output_parsers.PandasDataFrameOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import os
import uuid
uid = uuid.uuid4().hex[:6]
project_name = f"Run Fine-tuning Walkthrough {uid}"
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "YOUR API KEY"
os.environ["LANGCHAIN_PROJECT"] = project_name
from enum import Enum
from langchain_core.pydantic_v1 import BaseModel, Field
class Operation(Enum):
add = "+"
subtract = "-"
multiply = "*"
divide = "/"
class Calculator(BaseModel):
"""A calculator function"""
num1: float
num2: float
operation: Operation = | Field(..., description="+,-,*,/") | langchain_core.pydantic_v1.Field |
from langchain.tools import ShellTool
shell_tool = | ShellTool() | langchain.tools.ShellTool |
from typing import List
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, validator
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
@validator("setup")
def question_ends_with_question_mark(cls, field):
if field[-1] != "?":
raise ValueError("Badly formed question!")
return field
joke_query = "Tell me a joke."
parser = PydanticOutputParser(pydantic_object=Joke)
prompt = PromptTemplate(
template="Answer the user query.\n{format_instructions}\n{query}\n",
input_variables=["query"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
chain = prompt | model | parser
chain.invoke({"query": joke_query})
class Actor(BaseModel):
name: str = Field(description="name of an actor")
film_names: List[str] = | Field(description="list of names of films they starred in") | langchain_core.pydantic_v1.Field |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet python-gitlab')
import os
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit
from langchain_community.utilities.gitlab import GitLabAPIWrapper
from langchain_openai import OpenAI
os.environ["GITLAB_URL"] = "https://gitlab.example.org"
os.environ["GITLAB_PERSONAL_ACCESS_TOKEN"] = ""
os.environ["GITLAB_REPOSITORY"] = "username/repo-name"
os.environ["GITLAB_BRANCH"] = "bot-branch-name"
os.environ["GITLAB_BASE_BRANCH"] = "main"
os.environ["OPENAI_API_KEY"] = ""
llm = OpenAI(temperature=0)
gitlab = | GitLabAPIWrapper() | langchain_community.utilities.gitlab.GitLabAPIWrapper |
from typing import List, Optional
from langchain.chains.openai_tools import create_extraction_chain_pydantic
from langchain_core.pydantic_v1 import BaseModel
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo-1106")
class Person(BaseModel):
"""Information about people to extract."""
name: str
age: Optional[int] = None
chain = create_extraction_chain_pydantic(Person, model)
chain.invoke({"input": "jane is 2 and bob is 3"})
class Class(BaseModel):
"""Information about classes to extract."""
teacher: str
students: List[str]
chain = | create_extraction_chain_pydantic([Person, Class], model) | langchain.chains.openai_tools.create_extraction_chain_pydantic |
from langchain.memory import ConversationKGMemory
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
memory = ConversationKGMemory(llm=llm)
memory.save_context({"input": "say hi to sam"}, {"output": "who is sam"})
memory.save_context({"input": "sam is a friend"}, {"output": "okay"})
memory.load_memory_variables({"input": "who is sam"})
memory = ConversationKGMemory(llm=llm, return_messages=True)
memory.save_context({"input": "say hi to sam"}, {"output": "who is sam"})
memory.save_context({"input": "sam is a friend"}, {"output": "okay"})
memory.load_memory_variables({"input": "who is sam"})
memory.get_current_entities("what's Sams favorite color?")
memory.get_knowledge_triplets("her favorite color is red")
llm = OpenAI(temperature=0)
from langchain.chains import ConversationChain
from langchain.prompts.prompt import PromptTemplate
template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the "Relevant Information" section and does not hallucinate.
Relevant Information:
{history}
Conversation:
Human: {input}
AI:"""
prompt = | PromptTemplate(input_variables=["history", "input"], template=template) | langchain.prompts.prompt.PromptTemplate |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet dashvector dashscope')
import getpass
import os
os.environ["DASHVECTOR_API_KEY"] = getpass.getpass("DashVector API Key:")
os.environ["DASHSCOPE_API_KEY"] = getpass.getpass("DashScope API Key:")
from langchain_community.embeddings.dashscope import DashScopeEmbeddings
from langchain_community.vectorstores import DashVector
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = DashScopeEmbeddings()
dashvector = | DashVector.from_documents(docs, embeddings) | langchain_community.vectorstores.DashVector.from_documents |
get_ipython().system('pip3 install clickhouse-sqlalchemy InstructorEmbedding sentence_transformers openai langchain-experimental')
import getpass
from os import environ
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.utilities import SQLDatabase
from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain
from langchain_openai import OpenAI
from sqlalchemy import MetaData, create_engine
MYSCALE_HOST = "msc-4a9e710a.us-east-1.aws.staging.myscale.cloud"
MYSCALE_PORT = 443
MYSCALE_USER = "chatdata"
MYSCALE_PASSWORD = "myscale_rocks"
OPENAI_API_KEY = getpass.getpass("OpenAI API Key:")
engine = create_engine(
f"clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}/default?protocol=https"
)
metadata = MetaData(bind=engine)
environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
from langchain_experimental.sql.vector_sql import VectorSQLOutputParser
output_parser = VectorSQLOutputParser.from_embeddings(
model=HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs={"device": "cpu"}
)
)
from langchain.callbacks import StdOutCallbackHandler
from langchain_community.utilities.sql_database import SQLDatabase
from langchain_experimental.sql.prompt import MYSCALE_PROMPT
from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain
from langchain_openai import OpenAI
chain = VectorSQLDatabaseChain(
llm_chain=LLMChain(
llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0),
prompt=MYSCALE_PROMPT,
),
top_k=10,
return_direct=True,
sql_cmd_parser=output_parser,
database=SQLDatabase(engine, None, metadata),
)
import pandas as pd
pd.DataFrame(
chain.run(
"Please give me 10 papers to ask what is PageRank?",
callbacks=[StdOutCallbackHandler()],
)
)
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
from langchain_experimental.retrievers.vector_sql_database import (
VectorSQLDatabaseChainRetriever,
)
from langchain_experimental.sql.prompt import MYSCALE_PROMPT
from langchain_experimental.sql.vector_sql import (
VectorSQLDatabaseChain,
VectorSQLRetrieveAllOutputParser,
)
from langchain_openai import ChatOpenAI
output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings(
output_parser.model
)
chain = VectorSQLDatabaseChain.from_llm(
llm= | OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet yfinance')
import os
os.environ["OPENAI_API_KEY"] = "..."
from langchain.agents import AgentType, initialize_agent
from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool
from langchain_openai import ChatOpenAI
llm = | ChatOpenAI(temperature=0.0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pyvespa')
from vespa.package import ApplicationPackage, Field, RankProfile
app_package = ApplicationPackage(name="testapp")
app_package.schema.add_fields(
Field(
name="text", type="string", indexing=["index", "summary"], index="enable-bm25"
),
Field(
name="embedding",
type="tensor<float>(x[384])",
indexing=["attribute", "summary"],
attribute=["distance-metric: angular"],
),
)
app_package.schema.add_rank_profile(
RankProfile(
name="default",
first_phase="closeness(field, embedding)",
inputs=[("query(query_embedding)", "tensor<float>(x[384])")],
)
)
from vespa.deployment import VespaDocker
vespa_docker = VespaDocker()
vespa_app = vespa_docker.deploy(application_package=app_package)
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
vespa_config = dict(
page_content_field="text",
embedding_field="embedding",
input_field="query_embedding",
)
from langchain_community.vectorstores import VespaStore
db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config)
query = "What did the president say about Ketanji Brown Jackson"
results = db.similarity_search(query)
print(results[0].page_content)
query = "What did the president say about Ketanji Brown Jackson"
results = db.similarity_search(query)
result = results[0]
result.page_content = "UPDATED: " + result.page_content
db.add_texts([result.page_content], [result.metadata], result.metadata["id"])
results = db.similarity_search(query)
print(results[0].page_content)
result = db.similarity_search(query)
db.delete(["32"])
result = db.similarity_search(query)
results = db.similarity_search_with_score(query)
result = results[0]
db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config)
retriever = db.as_retriever()
query = "What did the president say about Ketanji Brown Jackson"
results = retriever.get_relevant_documents(query)
app_package.schema.add_fields(
Field(name="date", type="string", indexing=["attribute", "summary"]),
Field(name="rating", type="int", indexing=["attribute", "summary"]),
Field(name="author", type="string", indexing=["attribute", "summary"]),
)
vespa_app = vespa_docker.deploy(application_package=app_package)
for i, doc in enumerate(docs):
doc.metadata["date"] = f"2023-{(i % 12)+1}-{(i % 28)+1}"
doc.metadata["rating"] = range(1, 6)[i % 5]
doc.metadata["author"] = ["Joe Biden", "Unknown"][min(i, 1)]
vespa_config.update(dict(metadata_fields=["date", "rating", "author"]))
db = | VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config) | langchain_community.vectorstores.VespaStore.from_documents |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet jsonformer > /dev/null')
import logging
logging.basicConfig(level=logging.ERROR)
import json
import os
import requests
from langchain.tools import tool
HF_TOKEN = os.environ.get("HUGGINGFACE_API_KEY")
@tool
def ask_star_coder(query: str, temperature: float = 1.0, max_new_tokens: float = 250):
"""Query the BigCode StarCoder model about coding questions."""
url = "https://api-inference.huggingface.co/models/bigcode/starcoder"
headers = {
"Authorization": f"Bearer {HF_TOKEN}",
"content-type": "application/json",
}
payload = {
"inputs": f"{query}\n\nAnswer:",
"temperature": temperature,
"max_new_tokens": int(max_new_tokens),
}
response = requests.post(url, headers=headers, data=json.dumps(payload))
response.raise_for_status()
return json.loads(response.content.decode("utf-8"))
prompt = """You must respond using JSON format, with a single action and single action input.
You may 'ask_star_coder' for help on coding problems.
{arg_schema}
EXAMPLES
----
Human: "So what's all this about a GIL?"
AI Assistant:{{
"action": "ask_star_coder",
"action_input": {{"query": "What is a GIL?", "temperature": 0.0, "max_new_tokens": 100}}"
}}
Observation: "The GIL is python's Global Interpreter Lock"
Human: "Could you please write a calculator program in LISP?"
AI Assistant:{{
"action": "ask_star_coder",
"action_input": {{"query": "Write a calculator program in LISP", "temperature": 0.0, "max_new_tokens": 250}}
}}
Observation: "(defun add (x y) (+ x y))\n(defun sub (x y) (- x y ))"
Human: "What's the difference between an SVM and an LLM?"
AI Assistant:{{
"action": "ask_star_coder",
"action_input": {{"query": "What's the difference between SGD and an SVM?", "temperature": 1.0, "max_new_tokens": 250}}
}}
Observation: "SGD stands for stochastic gradient descent, while an SVM is a Support Vector Machine."
BEGIN! Answer the Human's question as best as you are able.
------
Human: 'What's the difference between an iterator and an iterable?'
AI Assistant:""".format(arg_schema=ask_star_coder.args)
from langchain_community.llms import HuggingFacePipeline
from transformers import pipeline
hf_model = pipeline(
"text-generation", model="cerebras/Cerebras-GPT-590M", max_new_tokens=200
)
original_model = | HuggingFacePipeline(pipeline=hf_model) | langchain_community.llms.HuggingFacePipeline |
from langchain.agents import Tool
from langchain_experimental.utilities import PythonREPL
python_repl = PythonREPL()
python_repl.run("print(1+1)")
repl_tool = | Tool(
name="python_repl",
description="A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...) | langchain.agents.Tool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sqlite-vss')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import SQLiteVSS
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wandb')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet textstat')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy')
get_ipython().system('python -m spacy download en_core_web_sm')
import os
os.environ["WANDB_API_KEY"] = ""
from datetime import datetime
from langchain.callbacks import StdOutCallbackHandler, WandbCallbackHandler
from langchain_openai import OpenAI
"""Main function.
This function is used to try the callback handler.
Scenarios:
1. OpenAI LLM
2. Chain with multiple SubChains on multiple generations
3. Agent with Tools
"""
session_group = datetime.now().strftime("%m.%d.%Y_%H.%M.%S")
wandb_callback = WandbCallbackHandler(
job_type="inference",
project="langchain_callback_demo",
group=f"minimal_{session_group}",
name="llm",
tags=["test"],
)
callbacks = [StdOutCallbackHandler(), wandb_callback]
llm = | OpenAI(temperature=0, callbacks=callbacks) | langchain_openai.OpenAI |
from langchain_community.utils.openai_functions import (
convert_pydantic_to_openai_function,
)
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, validator
from langchain_openai import ChatOpenAI
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
openai_functions = [convert_pydantic_to_openai_function(Joke)]
model = ChatOpenAI(temperature=0)
prompt = ChatPromptTemplate.from_messages(
[("system", "You are helpful assistant"), ("user", "{input}")]
)
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
parser = JsonOutputFunctionsParser()
chain = prompt | model.bind(functions=openai_functions) | parser
chain.invoke({"input": "tell me a joke"})
for s in chain.stream({"input": "tell me a joke"}):
print(s)
from typing import List
from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser
class Jokes(BaseModel):
"""Jokes to tell user."""
joke: List[Joke]
funniness_level: int
parser = JsonKeyOutputFunctionsParser(key_name="joke")
openai_functions = [convert_pydantic_to_openai_function(Jokes)]
chain = prompt | model.bind(functions=openai_functions) | parser
chain.invoke({"input": "tell me two jokes"})
for s in chain.stream({"input": "tell me two jokes"}):
print(s)
from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = | Field(description="answer to resolve the joke") | langchain_core.pydantic_v1.Field |
import re
from typing import Union
from langchain.agents import (
AgentExecutor,
AgentOutputParser,
LLMSingleActionAgent,
)
from langchain.chains import LLMChain
from langchain.prompts import StringPromptTemplate
from langchain_community.agent_toolkits import NLAToolkit
from langchain_community.tools.plugin import AIPlugin
from langchain_core.agents import AgentAction, AgentFinish
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
urls = [
"https://datasette.io/.well-known/ai-plugin.json",
"https://api.speak.com/.well-known/ai-plugin.json",
"https://www.wolframalpha.com/.well-known/ai-plugin.json",
"https://www.zapier.com/.well-known/ai-plugin.json",
"https://www.klarna.com/.well-known/ai-plugin.json",
"https://www.joinmilo.com/.well-known/ai-plugin.json",
"https://slack.com/.well-known/ai-plugin.json",
"https://schooldigger.com/.well-known/ai-plugin.json",
]
AI_PLUGINS = [AIPlugin.from_url(url) for url in urls]
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
docs = [
Document(
page_content=plugin.description_for_model,
metadata={"plugin_name": plugin.name_for_model},
)
for plugin in AI_PLUGINS
]
vector_store = FAISS.from_documents(docs, embeddings)
toolkits_dict = {
plugin.name_for_model: NLAToolkit.from_llm_and_ai_plugin(llm, plugin)
for plugin in AI_PLUGINS
}
retriever = vector_store.as_retriever()
def get_tools(query):
docs = retriever.get_relevant_documents(query)
tool_kits = [toolkits_dict[d.metadata["plugin_name"]] for d in docs]
tools = []
for tk in tool_kits:
tools.extend(tk.nla_tools)
return tools
tools = get_tools("What could I do today with my kiddo")
[t.name for t in tools]
tools = get_tools("what shirts can i buy?")
[t.name for t in tools]
template = """Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin! Remember to speak as a pirate when giving your final answer. Use lots of "Arg"s
Question: {input}
{agent_scratchpad}"""
from typing import Callable
class CustomPromptTemplate(StringPromptTemplate):
template: str
tools_getter: Callable
def format(self, **kwargs) -> str:
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
kwargs["agent_scratchpad"] = thoughts
tools = self.tools_getter(kwargs["input"])
kwargs["tools"] = "\n".join(
[f"{tool.name}: {tool.description}" for tool in tools]
)
kwargs["tool_names"] = ", ".join([tool.name for tool in tools])
return self.template.format(**kwargs)
prompt = CustomPromptTemplate(
template=template,
tools_getter=get_tools,
input_variables=["input", "intermediate_steps"],
)
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
if "Final Answer:" in llm_output:
return AgentFinish(
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
return AgentAction(
tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output
)
output_parser = CustomOutputParser()
llm = OpenAI(temperature=0)
llm_chain = | LLMChain(llm=llm, prompt=prompt) | langchain.chains.LLMChain |
from langchain.chains import RetrievalQAWithSourcesChain
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores.jaguar import Jaguar
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
"""
Load a text file into a set of documents
"""
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=300)
docs = text_splitter.split_documents(documents)
"""
Instantiate a Jaguar vector store
"""
url = "http://192.168.5.88:8080/fwww/"
embeddings = OpenAIEmbeddings()
pod = "vdb"
store = "langchain_rag_store"
vector_index = "v"
vector_type = "cosine_fraction_float"
vector_dimension = 1536
vectorstore = Jaguar(
pod, store, vector_index, vector_type, vector_dimension, url, embeddings
)
"""
Login must be performed to authorize the client.
The environment variable JAGUAR_API_KEY or file $HOME/.jagrc
should contain the API key for accessing JaguarDB servers.
"""
vectorstore.login()
"""
Create vector store on the JaguarDB database server.
This should be done only once.
"""
metadata = "category char(16)"
text_size = 4096
vectorstore.create(metadata, text_size)
"""
Add the texts from the text splitter to our vectorstore
"""
vectorstore.add_documents(docs)
""" Get the retriever object """
retriever = vectorstore.as_retriever()
template = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.
Question: {question}
Context: {context}
Answer:
"""
prompt = ChatPromptTemplate.from_template(template)
""" Obtain a Large Language Model """
LLM = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
""" Create a chain for the RAG flow """
rag_chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| LLM
| StrOutputParser()
)
resp = rag_chain.invoke("What did the president say about Justice Breyer?")
print(resp)
from langchain_community.vectorstores.jaguar import Jaguar
from langchain_openai import OpenAIEmbeddings
url = "http://192.168.3.88:8080/fwww/"
pod = "vdb"
store = "langchain_test_store"
vector_index = "v"
vector_type = "cosine_fraction_float"
vector_dimension = 10
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pypdf pymongo langchain-openai tiktoken')
import getpass
MONGODB_ATLAS_CLUSTER_URI = getpass.getpass("MongoDB Atlas Cluster URI:")
from pymongo import MongoClient
client = MongoClient(MONGODB_ATLAS_CLUSTER_URI)
DB_NAME = "langchain_db"
COLLECTION_NAME = "test"
ATLAS_VECTOR_SEARCH_INDEX_NAME = "index_name"
MONGODB_COLLECTION = client[DB_NAME][COLLECTION_NAME]
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader("https://arxiv.org/pdf/2303.08774.pdf")
data = loader.load()
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
docs = text_splitter.split_documents(data)
print(docs[0])
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
from langchain_openai import OpenAIEmbeddings
vector_search = MongoDBAtlasVectorSearch.from_documents(
documents=docs,
embedding=OpenAIEmbeddings(disallowed_special=()),
collection=MONGODB_COLLECTION,
index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,
)
query = "What were the compute requirements for training GPT 4"
results = vector_search.similarity_search(query)
print(results[0].page_content)
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
from langchain_openai import OpenAIEmbeddings
vector_search = MongoDBAtlasVectorSearch.from_connection_string(
MONGODB_ATLAS_CLUSTER_URI,
DB_NAME + "." + COLLECTION_NAME,
| OpenAIEmbeddings(disallowed_special=()) | langchain_openai.OpenAIEmbeddings |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import ModernTreasuryLoader
modern_treasury_loader = ModernTreasuryLoader("payment_orders")
index = | VectorstoreIndexCreator() | langchain.indexes.VectorstoreIndexCreator |
from langchain_community.embeddings.fake import FakeEmbeddings
from langchain_community.vectorstores import Tair
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = FakeEmbeddings(size=128)
tair_url = "redis://localhost:6379"
| Tair.drop_index(tair_url=tair_url) | langchain_community.vectorstores.Tair.drop_index |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymilvus')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Milvus
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vector_db = Milvus.from_documents(
docs,
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
)
query = "What did the president say about Ketanji Brown Jackson"
docs = vector_db.similarity_search(query)
docs[0].page_content
vector_db = Milvus.from_documents(
docs,
embeddings,
collection_name="collection_1",
connection_args={"host": "127.0.0.1", "port": "19530"},
)
vector_db = Milvus(
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
collection_name="collection_1",
)
from langchain_core.documents import Document
docs = [
Document(page_content="i worked at kensho", metadata={"namespace": "harrison"}),
Document(page_content="i worked at facebook", metadata={"namespace": "ankush"}),
]
vectorstore = Milvus.from_documents(
docs,
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
drop_old=True,
partition_key_field="namespace", # Use the "namespace" field as the partition key
)
vectorstore.as_retriever(
search_kwargs={"expr": 'namespace == "ankush"'}
).get_relevant_documents("where did i work?")
vectorstore.as_retriever(
search_kwargs={"expr": 'namespace == "harrison"'}
).get_relevant_documents("where did i work?")
from langchain.docstore.document import Document
docs = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
| Document(page_content="baz", metadata={"id": 3}) | langchain.docstore.document.Document |
from IPython.display import SVG
from langchain_experimental.cpal.base import CPALChain
from langchain_experimental.pal_chain import PALChain
from langchain_openai import OpenAI
llm = OpenAI(temperature=0, max_tokens=512)
cpal_chain = | CPALChain.from_univariate_prompt(llm=llm, verbose=True) | langchain_experimental.cpal.base.CPALChain.from_univariate_prompt |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import OpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = WikipediaQueryRun(api_wrapper=api_wrapper)
tools = [tool]
prompt = hub.pull("hwchase17/react")
llm = OpenAI(temperature=0)
agent = | create_react_agent(llm, tools, prompt) | langchain.agents.create_react_agent |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymysql')
get_ipython().system('pip install sqlalchemy')
get_ipython().system('pip install langchain')
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import (
DirectoryLoader,
UnstructuredMarkdownLoader,
)
from langchain_community.vectorstores.apache_doris import (
ApacheDoris,
ApacheDorisSettings,
)
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import TokenTextSplitter
update_vectordb = False
loader = DirectoryLoader(
"./docs", glob="**/*.md", loader_cls=UnstructuredMarkdownLoader
)
documents = loader.load()
text_splitter = | TokenTextSplitter(chunk_size=400, chunk_overlap=50) | langchain_text_splitters.TokenTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sqlite-vss')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import SQLiteVSS
from langchain_text_splitters import CharacterTextSplitter
loader = | TextLoader("../../modules/state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
import requests
def download_drive_file(url: str, output_path: str = "chat.db") -> None:
file_id = url.split("/")[-2]
download_url = f"https://drive.google.com/uc?export=download&id={file_id}"
response = requests.get(download_url)
if response.status_code != 200:
print("Failed to download the file.")
return
with open(output_path, "wb") as file:
file.write(response.content)
print(f"File {output_path} downloaded.")
url = (
"https://drive.google.com/file/d/1NebNKqTA2NXApCmeH6mu0unJD2tANZzo/view?usp=sharing"
)
download_drive_file(url)
from langchain_community.chat_loaders.imessage import IMessageChatLoader
loader = IMessageChatLoader(
path="./chat.db",
)
from typing import List
from langchain_community.chat_loaders.base import ChatSession
from langchain_community.chat_loaders.utils import (
map_ai_messages,
merge_chat_runs,
)
raw_messages = loader.lazy_load()
merged_messages = merge_chat_runs(raw_messages)
chat_sessions: List[ChatSession] = list(
map_ai_messages(merged_messages, sender="Tortoise")
)
chat_sessions[0]["messages"][:3]
from langchain.adapters.openai import convert_messages_for_finetuning
training_data = | convert_messages_for_finetuning(chat_sessions) | langchain.adapters.openai.convert_messages_for_finetuning |
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryByteStore
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loaders = [
TextLoader("../../paul_graham_essay.txt"),
TextLoader("../../state_of_the_union.txt"),
]
docs = []
for loader in loaders:
docs.extend(loader.load())
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)
docs = text_splitter.split_documents(docs)
vectorstore = Chroma(
collection_name="full_documents", embedding_function=OpenAIEmbeddings()
)
store = InMemoryByteStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
byte_store=store,
id_key=id_key,
)
import uuid
doc_ids = [str(uuid.uuid4()) for _ in docs]
child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
sub_docs = []
for i, doc in enumerate(docs):
_id = doc_ids[i]
_sub_docs = child_text_splitter.split_documents([doc])
for _doc in _sub_docs:
_doc.metadata[id_key] = _id
sub_docs.extend(_sub_docs)
retriever.vectorstore.add_documents(sub_docs)
retriever.docstore.mset(list(zip(doc_ids, docs)))
retriever.vectorstore.similarity_search("justice breyer")[0]
len(retriever.get_relevant_documents("justice breyer")[0].page_content)
from langchain.retrievers.multi_vector import SearchType
retriever.search_type = SearchType.mmr
len(retriever.get_relevant_documents("justice breyer")[0].page_content)
import uuid
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
chain = (
{"doc": lambda x: x.page_content}
| ChatPromptTemplate.from_template("Summarize the following document:\n\n{doc}")
| ChatOpenAI(max_retries=0)
| StrOutputParser()
)
summaries = chain.batch(docs, {"max_concurrency": 5})
vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings())
store = InMemoryByteStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
byte_store=store,
id_key=id_key,
)
doc_ids = [str(uuid.uuid4()) for _ in docs]
summary_docs = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(summaries)
]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, docs)))
sub_docs = vectorstore.similarity_search("justice breyer")
sub_docs[0]
retrieved_docs = retriever.get_relevant_documents("justice breyer")
len(retrieved_docs[0].page_content)
functions = [
{
"name": "hypothetical_questions",
"description": "Generate hypothetical questions",
"parameters": {
"type": "object",
"properties": {
"questions": {
"type": "array",
"items": {"type": "string"},
},
},
"required": ["questions"],
},
}
]
from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser
chain = (
{"doc": lambda x: x.page_content}
| ChatPromptTemplate.from_template(
"Generate a list of exactly 3 hypothetical questions that the below document could be used to answer:\n\n{doc}"
)
| | ChatOpenAI(max_retries=0, model="gpt-4") | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai wikipedia')
from operator import itemgetter
from langchain.agents import AgentExecutor, load_tools
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_core.prompt_values import ChatPromptValue
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
wiki = WikipediaQueryRun(
api_wrapper=WikipediaAPIWrapper(top_k_results=5, doc_content_chars_max=10_000)
)
tools = [wiki]
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm = | ChatOpenAI(model="gpt-3.5-turbo") | langchain_openai.ChatOpenAI |
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt", encoding="utf-8")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, text in enumerate(texts):
text.metadata["source"] = f"{i}-pl"
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings)
from langchain.chains import create_qa_with_sources_chain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
qa_chain = create_qa_with_sources_chain(llm)
doc_prompt = PromptTemplate(
template="Content: {page_content}\nSource: {source}",
input_variables=["page_content", "source"],
)
final_qa_chain = StuffDocumentsChain(
llm_chain=qa_chain,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain
)
query = "What did the president say about russia"
retrieval_qa.run(query)
qa_chain_pydantic = create_qa_with_sources_chain(llm, output_parser="pydantic")
final_qa_chain_pydantic = StuffDocumentsChain(
llm_chain=qa_chain_pydantic,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa_pydantic = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain_pydantic
)
retrieval_qa_pydantic.run(query)
from langchain.chains import ConversationalRetrievalChain, LLMChain
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\
Make sure to avoid using any unclear pronouns.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
condense_question_chain = LLMChain(
llm=llm,
prompt=CONDENSE_QUESTION_PROMPT,
)
qa = ConversationalRetrievalChain(
question_generator=condense_question_chain,
retriever=docsearch.as_retriever(),
memory=memory,
combine_docs_chain=final_qa_chain,
)
query = "What did the president say about Ketanji Brown Jackson"
result = qa({"question": query})
result
query = "what did he say about her predecessor?"
result = qa({"question": query})
result
from typing import List
from langchain.chains.openai_functions import create_qa_with_structure_chain
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.messages import HumanMessage, SystemMessage
from pydantic import BaseModel, Field
class CustomResponseSchema(BaseModel):
"""An answer to the question being asked, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
countries_referenced: List[str] = Field(
..., description="All of the countries mentioned in the sources"
)
sources: List[str] = Field(
..., description="List of sources used to answer the question"
)
prompt_messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
| HumanMessagePromptTemplate.from_template("{context}") | langchain.prompts.chat.HumanMessagePromptTemplate.from_template |
from langchain_community.document_loaders.blob_loaders.youtube_audio import (
YoutubeAudioLoader,
)
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers import (
OpenAIWhisperParser,
OpenAIWhisperParserLocal,
)
get_ipython().run_line_magic('pip', 'install --upgrade --quiet yt_dlp')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pydub')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet librosa')
local = False
urls = ["https://youtu.be/kCc8FmEb1nY", "https://youtu.be/VMj-3S1tku0"]
save_dir = "~/Downloads/YouTube"
if local:
loader = GenericLoader(
YoutubeAudioLoader(urls, save_dir), OpenAIWhisperParserLocal()
)
else:
loader = GenericLoader( | YoutubeAudioLoader(urls, save_dir) | langchain_community.document_loaders.blob_loaders.youtube_audio.YoutubeAudioLoader |
from langchain.prompts import PromptTemplate
prompt = (
PromptTemplate.from_template("Tell me a joke about {topic}")
+ ", make it funny"
+ "\n\nand in {language}"
)
prompt
prompt.format(topic="sports", language="spanish")
from langchain.chains import LLMChain
from langchain_openai import ChatOpenAI
model = ChatOpenAI()
chain = | LLMChain(llm=model, prompt=prompt) | langchain.chains.LLMChain |
from langchain_community.document_loaders import WebBaseLoader
loader_web = WebBaseLoader(
"https://github.com/basecamp/handbook/blob/master/37signals-is-you.md"
)
from langchain_community.document_loaders import PyPDFLoader
loader_pdf = | PyPDFLoader("../MachineLearning-Lecture01.pdf") | langchain_community.document_loaders.PyPDFLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("trajectory")
import subprocess
from urllib.parse import urlparse
from langchain.agents import AgentType, initialize_agent
from langchain.tools import tool
from langchain_openai import ChatOpenAI
from pydantic import HttpUrl
@tool
def ping(url: HttpUrl, return_error: bool) -> str:
"""Ping the fully specified url. Must include https:// in the url."""
hostname = urlparse(str(url)).netloc
completed_process = subprocess.run(
["ping", "-c", "1", hostname], capture_output=True, text=True
)
output = completed_process.stdout
if return_error and completed_process.returncode != 0:
return completed_process.stderr
return output
@tool
def trace_route(url: HttpUrl, return_error: bool) -> str:
"""Trace the route to the specified url. Must include https:// in the url."""
hostname = urlparse(str(url)).netloc
completed_process = subprocess.run(
["traceroute", hostname], capture_output=True, text=True
)
output = completed_process.stdout
if return_error and completed_process.returncode != 0:
return completed_process.stderr
return output
llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0)
agent = initialize_agent(
llm=llm,
tools=[ping, trace_route],
agent=AgentType.OPENAI_MULTI_FUNCTIONS,
return_intermediate_steps=True, # IMPORTANT!
)
result = agent("What's the latency like for https://langchain.com?")
evaluation_result = evaluator.evaluate_agent_trajectory(
prediction=result["output"],
input=result["input"],
agent_trajectory=result["intermediate_steps"],
)
evaluation_result
get_ipython().run_line_magic('pip', 'install --upgrade --quiet anthropic')
from langchain_community.chat_models import ChatAnthropic
eval_llm = ChatAnthropic(temperature=0)
evaluator = | load_evaluator("trajectory", llm=eval_llm) | langchain.evaluation.load_evaluator |
from langchain_community.llms import KoboldApiLLM
llm = | KoboldApiLLM(endpoint="http://192.168.1.144:5000", max_length=80) | langchain_community.llms.KoboldApiLLM |
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
conversation = ConversationChain(
llm=llm, verbose=True, memory=ConversationBufferMemory()
)
conversation.predict(input="Hi there!")
conversation.predict(input="What's the weather?")
from langchain.prompts.prompt import PromptTemplate
template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Human: {input}
AI Assistant:"""
PROMPT = PromptTemplate(input_variables=["history", "input"], template=template)
conversation = ConversationChain(
prompt=PROMPT,
llm=llm,
verbose=True,
memory=ConversationBufferMemory(ai_prefix="AI Assistant"),
)
conversation.predict(input="Hi there!")
conversation.predict(input="What's the weather?")
from langchain.prompts.prompt import PromptTemplate
template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Friend: {input}
AI:"""
PROMPT = | PromptTemplate(input_variables=["history", "input"], template=template) | langchain.prompts.prompt.PromptTemplate |
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
llm = OpenAI(temperature=0)
from pathlib import Path
relevant_parts = []
for p in Path(".").absolute().parts:
relevant_parts.append(p)
if relevant_parts[-3:] == ["langchain", "docs", "modules"]:
break
doc_path = str(Path(*relevant_parts) / "state_of_the_union.txt")
from langchain_community.document_loaders import TextLoader
loader = TextLoader(doc_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = | Chroma.from_documents(texts, embeddings, collection_name="state-of-union") | langchain_community.vectorstores.Chroma.from_documents |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import os
os.environ["OUTLINE_API_KEY"] = "xxx"
os.environ["OUTLINE_INSTANCE_URL"] = "https://app.getoutline.com"
from langchain.retrievers import OutlineRetriever
retriever = OutlineRetriever()
retriever.get_relevant_documents(query="LangChain", doc_content_chars_max=100)
import os
from getpass import getpass
os.environ["OPENAI_API_KEY"] = getpass("OpenAI API Key:")
from langchain.chains import ConversationalRetrievalChain
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model_name="gpt-3.5-turbo")
qa = | ConversationalRetrievalChain.from_llm(model, retriever=retriever) | langchain.chains.ConversationalRetrievalChain.from_llm |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wandb')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet textstat')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy')
get_ipython().system('python -m spacy download en_core_web_sm')
import os
os.environ["WANDB_API_KEY"] = ""
from datetime import datetime
from langchain.callbacks import StdOutCallbackHandler, WandbCallbackHandler
from langchain_openai import OpenAI
"""Main function.
This function is used to try the callback handler.
Scenarios:
1. OpenAI LLM
2. Chain with multiple SubChains on multiple generations
3. Agent with Tools
"""
session_group = datetime.now().strftime("%m.%d.%Y_%H.%M.%S")
wandb_callback = WandbCallbackHandler(
job_type="inference",
project="langchain_callback_demo",
group=f"minimal_{session_group}",
name="llm",
tags=["test"],
)
callbacks = [StdOutCallbackHandler(), wandb_callback]
llm = OpenAI(temperature=0, callbacks=callbacks)
llm_result = llm.generate(["Tell me a joke", "Tell me a poem"] * 3)
wandb_callback.flush_tracker(llm, name="simple_sequential")
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
prompt_template = | PromptTemplate(input_variables=["title"], template=template) | langchain.prompts.PromptTemplate |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet banana-dev')
import os
os.environ["BANANA_API_KEY"] = "YOUR_API_KEY"
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import Banana
template = """Question: {question}
Answer: Let's think step by step."""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet neo4j')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Neo4jVector
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
url = "bolt://localhost:7687"
username = "neo4j"
password = "pleaseletmein"
db = Neo4jVector.from_documents(
docs, OpenAIEmbeddings(), url=url, username=username, password=password
)
query = "What did the president say about Ketanji Brown Jackson"
docs_with_score = db.similarity_search_with_score(query, k=2)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print(doc.page_content)
print("-" * 80)
index_name = "vector" # default index name
store = Neo4jVector.from_existing_index(
OpenAIEmbeddings(),
url=url,
username=username,
password=password,
index_name=index_name,
)
store.query("CREATE (p:Person {name: 'Tomaz', location:'Slovenia', hobby:'Bicycle'})")
existing_graph = Neo4jVector.from_existing_graph(
embedding=OpenAIEmbeddings(),
url=url,
username=username,
password=password,
index_name="person_index",
node_label="Person",
text_node_properties=["name", "location"],
embedding_node_property="embedding",
)
result = existing_graph.similarity_search("Slovenia", k=1)
result[0]
store.add_documents([Document(page_content="foo")])
docs_with_score = store.similarity_search_with_score("foo")
docs_with_score[0]
hybrid_db = Neo4jVector.from_documents(
docs,
| OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
from langchain.chains import HypotheticalDocumentEmbedder, LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI, OpenAIEmbeddings
base_embeddings = OpenAIEmbeddings()
llm = OpenAI()
embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, "web_search")
result = embeddings.embed_query("Where is the Taj Mahal?")
multi_llm = OpenAI(n=4, best_of=4)
embeddings = HypotheticalDocumentEmbedder.from_llm(
multi_llm, base_embeddings, "web_search"
)
result = embeddings.embed_query("Where is the Taj Mahal?")
prompt_template = """Please answer the user's question about the most recent state of the union address
Question: {question}
Answer:"""
prompt = PromptTemplate(input_variables=["question"], template=prompt_template)
llm_chain = LLMChain(llm=llm, prompt=prompt)
embeddings = HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=base_embeddings
)
result = embeddings.embed_query(
"What did the president say about Ketanji Brown Jackson"
)
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import CharacterTextSplitter
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet networkx')
from langchain.indexes import GraphIndexCreator
from langchain_openai import OpenAI
index_creator = GraphIndexCreator(llm=OpenAI(temperature=0))
with open("../../../modules/state_of_the_union.txt") as f:
all_text = f.read()
text = "\n".join(all_text.split("\n\n")[105:108])
text
graph = index_creator.from_text(text)
graph.get_triples()
from langchain.chains import GraphQAChain
chain = GraphQAChain.from_llm( | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic')
import os
import boto3
comprehend_client = boto3.client("comprehend", region_name="us-east-1")
from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client,
verbose=True, # optional
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPiiConfig,
ModerationPromptSafetyConfig,
ModerationToxicityConfig,
)
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = | ModerationPromptSafetyConfig(threshold=0.5) | langchain_experimental.comprehend_moderation.ModerationPromptSafetyConfig |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun
search = | DuckDuckGoSearchRun() | langchain.tools.DuckDuckGoSearchRun |
import os
import chromadb
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import DocumentCompressorPipeline
from langchain.retrievers.merger_retriever import MergerRetriever
from langchain_community.document_transformers import (
EmbeddingsClusteringFilter,
EmbeddingsRedundantFilter,
)
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
all_mini = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
multi_qa_mini = HuggingFaceEmbeddings(model_name="multi-qa-MiniLM-L6-dot-v1")
filter_embeddings = OpenAIEmbeddings()
ABS_PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(ABS_PATH, "db")
client_settings = chromadb.config.Settings(
is_persistent=True,
persist_directory=DB_DIR,
anonymized_telemetry=False,
)
db_all = Chroma(
collection_name="project_store_all",
persist_directory=DB_DIR,
client_settings=client_settings,
embedding_function=all_mini,
)
db_multi_qa = Chroma(
collection_name="project_store_multi",
persist_directory=DB_DIR,
client_settings=client_settings,
embedding_function=multi_qa_mini,
)
retriever_all = db_all.as_retriever(
search_type="similarity", search_kwargs={"k": 5, "include_metadata": True}
)
retriever_multi_qa = db_multi_qa.as_retriever(
search_type="mmr", search_kwargs={"k": 5, "include_metadata": True}
)
lotr = MergerRetriever(retrievers=[retriever_all, retriever_multi_qa])
filter = EmbeddingsRedundantFilter(embeddings=filter_embeddings)
pipeline = DocumentCompressorPipeline(transformers=[filter])
compression_retriever = ContextualCompressionRetriever(
base_compressor=pipeline, base_retriever=lotr
)
filter_ordered_cluster = EmbeddingsClusteringFilter(
embeddings=filter_embeddings,
num_clusters=10,
num_closest=1,
)
filter_ordered_by_retriever = EmbeddingsClusteringFilter(
embeddings=filter_embeddings,
num_clusters=10,
num_closest=1,
sorted=True,
)
pipeline = DocumentCompressorPipeline(transformers=[filter_ordered_by_retriever])
compression_retriever = ContextualCompressionRetriever(
base_compressor=pipeline, base_retriever=lotr
)
from langchain_community.document_transformers import LongContextReorder
filter = | EmbeddingsRedundantFilter(embeddings=filter_embeddings) | langchain_community.document_transformers.EmbeddingsRedundantFilter |
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0, model="gpt-4-turbo-preview")
from langchain import hub
from langchain_core.prompts import PromptTemplate
select_prompt = hub.pull("hwchase17/self-discovery-select")
select_prompt.pretty_print()
adapt_prompt = | hub.pull("hwchase17/self-discovery-adapt") | langchain.hub.pull |
get_ipython().system('pip install -U openai langchain langchain-experimental')
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=256)
chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "What is this image showing"},
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/static/img/langchain_stack.png",
"detail": "auto",
},
},
]
)
]
)
from langchain.agents.openai_assistant import OpenAIAssistantRunnable
interpreter_assistant = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=[{"type": "code_interpreter"}],
model="gpt-4-1106-preview",
)
output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"})
output
get_ipython().system('pip install e2b duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun, E2BDataAnalysisTool
tools = [E2BDataAnalysisTool(api_key="..."), DuckDuckGoSearchRun()]
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. Write and run code to answer math questions. You can also search the internet.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True,
)
from langchain.agents import AgentExecutor
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"content": "What's the weather in SF today divided by 2.7"})
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True,
)
from langchain_core.agents import AgentFinish
def execute_agent(agent, tools, input):
tool_map = {tool.name: tool for tool in tools}
response = agent.invoke(input)
while not isinstance(response, AgentFinish):
tool_outputs = []
for action in response:
tool_output = tool_map[action.tool].invoke(action.tool_input)
print(action.tool, action.tool_input, tool_output, end="\n\n")
tool_outputs.append(
{"output": tool_output, "tool_call_id": action.tool_call_id}
)
response = agent.invoke(
{
"tool_outputs": tool_outputs,
"run_id": action.run_id,
"thread_id": action.thread_id,
}
)
return response
response = execute_agent(agent, tools, {"content": "What's 10 - 4 raised to the 2.7"})
print(response.return_values["output"])
next_response = execute_agent(
agent, tools, {"content": "now add 17.241", "thread_id": response.thread_id}
)
print(next_response.return_values["output"])
chat = ChatOpenAI(model="gpt-3.5-turbo-1106").bind(
response_format={"type": "json_object"}
)
output = chat.invoke(
[
SystemMessage(
content="Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list."
),
HumanMessage(
content="Google was founded in the USA, while Deepmind was founded in the UK"
),
]
)
print(output.content)
import json
json.loads(output.content)
chat = ChatOpenAI(model="gpt-3.5-turbo-1106")
output = chat.generate(
[
[
SystemMessage(
content="Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list."
),
HumanMessage(
content="Google was founded in the USA, while Deepmind was founded in the UK"
),
]
]
)
print(output.llm_output)
from typing import Literal
from langchain.output_parsers.openai_tools import PydanticToolsParser
from langchain.utils.openai_functions import convert_pydantic_to_openai_tool
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
class GetCurrentWeather(BaseModel):
"""Get the current weather in a location."""
location: str = | Field(description="The city and state, e.g. San Francisco, CA") | langchain_core.pydantic_v1.Field |
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
template = """Answer the users question based only on the following context:
<context>
{context}
</context>
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0)
search = DuckDuckGoSearchAPIWrapper()
def retriever(query):
return search.run(query)
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
simple_query = "what is langchain?"
chain.invoke(simple_query)
distracted_query = "man that sam bankman fried trial was crazy! what is langchain?"
chain.invoke(distracted_query)
retriever(distracted_query)
template = """Provide a better search query for \
web search engine to answer the given question, end \
the queries with ’**’. Question: \
{x} Answer:"""
rewrite_prompt = ChatPromptTemplate.from_template(template)
from langchain import hub
rewrite_prompt = hub.pull("langchain-ai/rewrite")
print(rewrite_prompt.template)
def _parse(text):
return text.strip("**")
rewriter = rewrite_prompt | ChatOpenAI(temperature=0) | StrOutputParser() | _parse
rewriter.invoke({"x": distracted_query})
rewrite_retrieve_read_chain = (
{
"context": {"x": | RunnablePassthrough() | langchain_core.runnables.RunnablePassthrough |
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"}
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-datastore')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
get_ipython().system('gcloud services enable datastore.googleapis.com')
from langchain_core.documents import Document
from langchain_google_datastore import DatastoreSaver
data = [Document(page_content="Hello, World!")]
saver = DatastoreSaver()
saver.upsert_documents(data)
saver = DatastoreSaver("Collection")
saver.upsert_documents(data)
doc_ids = ["AnotherCollection/doc_id", "foo/bar"]
saver = DatastoreSaver()
saver.upsert_documents(documents=data, document_ids=doc_ids)
from langchain_google_datastore import DatastoreLoader
loader_collection = | DatastoreLoader("Collection") | langchain_google_datastore.DatastoreLoader |
from langchain.memory import ConversationKGMemory
from langchain_openai import OpenAI
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0, model="gpt-4-turbo-preview")
from langchain import hub
from langchain_core.prompts import PromptTemplate
select_prompt = | hub.pull("hwchase17/self-discovery-select") | langchain.hub.pull |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet neo4j')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Neo4jVector
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
url = "bolt://localhost:7687"
username = "neo4j"
password = "pleaseletmein"
db = Neo4jVector.from_documents(
docs, OpenAIEmbeddings(), url=url, username=username, password=password
)
query = "What did the president say about Ketanji Brown Jackson"
docs_with_score = db.similarity_search_with_score(query, k=2)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print(doc.page_content)
print("-" * 80)
index_name = "vector" # default index name
store = Neo4jVector.from_existing_index(
OpenAIEmbeddings(),
url=url,
username=username,
password=password,
index_name=index_name,
)
store.query("CREATE (p:Person {name: 'Tomaz', location:'Slovenia', hobby:'Bicycle'})")
existing_graph = Neo4jVector.from_existing_graph(
embedding=OpenAIEmbeddings(),
url=url,
username=username,
password=password,
index_name="person_index",
node_label="Person",
text_node_properties=["name", "location"],
embedding_node_property="embedding",
)
result = existing_graph.similarity_search("Slovenia", k=1)
result[0]
store.add_documents([Document(page_content="foo")])
docs_with_score = store.similarity_search_with_score("foo")
docs_with_score[0]
hybrid_db = Neo4jVector.from_documents(
docs,
OpenAIEmbeddings(),
url=url,
username=username,
password=password,
search_type="hybrid",
)
index_name = "vector" # default index name
keyword_index_name = "keyword" # default keyword index name
store = Neo4jVector.from_existing_index(
OpenAIEmbeddings(),
url=url,
username=username,
password=password,
index_name=index_name,
keyword_index_name=keyword_index_name,
search_type="hybrid",
)
retriever = store.as_retriever()
retriever.get_relevant_documents(query)[0]
from langchain.chains import RetrievalQAWithSourcesChain
from langchain_openai import ChatOpenAI
chain = RetrievalQAWithSourcesChain.from_chain_type(
| ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
from langchain.chains import RetrievalQAWithSourcesChain
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores.jaguar import Jaguar
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
"""
Load a text file into a set of documents
"""
loader = | TextLoader("../../modules/state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet U lark elasticsearch')
import getpass
import os
from langchain_core.documents import Document
from langchain_elasticsearch import ElasticsearchStore
from langchain_openai import OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
embeddings = OpenAIEmbeddings()
docs = [
Document(
page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"},
),
Document(
page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2},
),
Document(
page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6},
),
Document(
page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3},
),
Document(
page_content="Toys come alive and have a blast doing so",
metadata={"year": 1995, "genre": "animated"},
),
Document(
page_content="Three men walk into the Zone, three men walk out of the Zone",
metadata={
"year": 1979,
"director": "Andrei Tarkovsky",
"genre": "science fiction",
"rating": 9.9,
},
),
]
vectorstore = ElasticsearchStore.from_documents(
docs,
embeddings,
index_name="elasticsearch-self-query-demo",
es_url="http://localhost:9200",
)
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_openai import OpenAI
metadata_field_info = [
AttributeInfo(
name="genre",
description="The genre of the movie",
type="string or list[string]",
),
AttributeInfo(
name="year",
description="The year the movie was released",
type="integer",
),
AttributeInfo(
name="director",
description="The name of the movie director",
type="string",
),
AttributeInfo(
name="rating", description="A 1-10 rating for the movie", type="float"
),
]
document_content_description = "Brief summary of a movie"
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
from langchain.callbacks import get_openai_callback
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-4")
with | get_openai_callback() | langchain.callbacks.get_openai_callback |
import os
os.environ["OCTOAI_API_TOKEN"] = "OCTOAI_API_TOKEN"
os.environ["ENDPOINT_URL"] = "https://text.octoai.run/v1/chat/completions"
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
template = """Below is an instruction that describes a task. Write a response that appropriately completes the request.\n Instruction:\n{question}\n Response: """
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet opensearch-py')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import OpenSearchVectorSearch
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0).configurable_fields(
temperature=ConfigurableField(
id="llm_temperature",
name="LLM Temperature",
description="The temperature of the LLM",
)
)
model.invoke("pick a random number")
model.with_config(configurable={"llm_temperature": 0.9}).invoke("pick a random number")
prompt = PromptTemplate.from_template("Pick a random number above {x}")
chain = prompt | model
chain.invoke({"x": 0})
chain.with_config(configurable={"llm_temperature": 0.9}).invoke({"x": 0})
from langchain.runnables.hub import HubRunnable
prompt = HubRunnable("rlm/rag-prompt").configurable_fields(
owner_repo_commit=ConfigurableField(
id="hub_commit",
name="Hub Commit",
description="The Hub commit to pull from",
)
)
prompt.invoke({"question": "foo", "context": "bar"})
prompt.with_config(configurable={"hub_commit": "rlm/rag-prompt-llama"}).invoke(
{"question": "foo", "context": "bar"}
)
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatAnthropic
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
llm = ChatAnthropic(temperature=0).configurable_alternatives(
ConfigurableField(id="llm"),
default_key="anthropic",
openai=ChatOpenAI(),
gpt4=ChatOpenAI(model="gpt-4"),
)
prompt = PromptTemplate.from_template("Tell me a joke about {topic}")
chain = prompt | llm
chain.invoke({"topic": "bears"})
chain.with_config(configurable={"llm": "openai"}).invoke({"topic": "bears"})
chain.with_config(configurable={"llm": "anthropic"}).invoke({"topic": "bears"})
llm = ChatAnthropic(temperature=0)
prompt = PromptTemplate.from_template(
"Tell me a joke about {topic}"
).configurable_alternatives(
ConfigurableField(id="prompt"),
default_key="joke",
poem=PromptTemplate.from_template("Write a short poem about {topic}"),
)
chain = prompt | llm
chain.invoke({"topic": "bears"})
chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"})
llm = | ChatAnthropic(temperature=0) | langchain_community.chat_models.ChatAnthropic |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import OpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = WikipediaQueryRun(api_wrapper=api_wrapper)
tools = [tool]
prompt = hub.pull("hwchase17/react")
llm = OpenAI(temperature=0)
agent = create_react_agent(llm, tools, prompt)
agent_executor = | AgentExecutor(agent=agent, tools=tools, verbose=True) | langchain.agents.AgentExecutor |
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from langchain.prompts import PromptTemplate
from langchain_community.utilities import GoogleSearchAPIWrapper
from langchain_openai import OpenAI
template = """This is a conversation between a human and a bot:
{chat_history}
Write a summary of the conversation for {input}:
"""
prompt = PromptTemplate(input_variables=["input", "chat_history"], template=template)
memory = ConversationBufferMemory(memory_key="chat_history")
readonlymemory = ReadOnlySharedMemory(memory=memory)
summary_chain = LLMChain(
llm=OpenAI(),
prompt=prompt,
verbose=True,
memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory
)
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="Summary",
func=summary_chain.run,
description="useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.",
),
]
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=memory
)
agent_chain.run(input="What is ChatGPT?")
agent_chain.run(input="Who developed it?")
agent_chain.run(
input="Thanks. Summarize the conversation, for my daughter 5 years old."
)
print(agent_chain.memory.buffer)
template = """This is a conversation between a human and a bot:
{chat_history}
Write a summary of the conversation for {input}:
"""
prompt = PromptTemplate(input_variables=["input", "chat_history"], template=template)
memory = ConversationBufferMemory(memory_key="chat_history")
summary_chain = LLMChain(
llm= | OpenAI() | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --quiet pypdf chromadb tiktoken openai')
get_ipython().run_line_magic('pip', 'uninstall -y langchain-fireworks')
get_ipython().run_line_magic('pip', 'install --editable /mnt/disks/data/langchain/libs/partners/fireworks')
import fireworks
print(fireworks)
import fireworks.client
import requests
from langchain_community.document_loaders import PyPDFLoader
url = "https://storage.googleapis.com/deepmind-media/gemma/gemma-report.pdf"
response = requests.get(url, stream=True)
file_name = "temp_file.pdf"
with open(file_name, "wb") as pdf:
pdf.write(response.content)
loader = PyPDFLoader(file_name)
data = loader.load()
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
from langchain_community.vectorstores import Chroma
from langchain_fireworks.embeddings import FireworksEmbeddings
vectorstore = Chroma.from_documents(
documents=all_splits,
collection_name="rag-chroma",
embedding=FireworksEmbeddings(),
)
retriever = vectorstore.as_retriever()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
from langchain_together import Together
llm = Together(
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
temperature=0.0,
max_tokens=2000,
top_k=1,
)
chain = (
RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
| prompt
| llm
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
Subsets and Splits