text
stringlengths 0
1.36k
|
---|
}); |
setIsLoading(true); // Set loading state here to indicate submission is processing |
const customSubmitEvent = { preventDefault: () => { }, } as unknown as React.FormEvent; |
// Submit immediately after updating the input handleSubmit(customSubmitEvent); }; |
return ( {isLoading && ()} {messages.map((m) => ( {m.role === 'user' ? |
'You: ' : "The Ghost of Zachary Proser's Writing: "} {m.content} ))} {Array.isArray(articles) && (articles.length > 0) && ( Related Posts {(articles as ArticleWithSlug[]).map((article) => ( ))} )} Example Questions: Double-click to ask one of these questions, or type your own below and hit enter. {prepopulatedQuestions.map((question, index) => ( handlePrepopulatedQuestion(question)} > {question} ))} ); } javascript |
// Allow this serverless function to run for up to 5 minutes export const maxDuration = 300; |
// Get the last message const lastMessage = messages[messages.length - 1] |
// Get the context from the last message const context = await getContext(lastMessage.content, '', 3000, 0.8, false) |
// Create a new set for blog urls let blogUrls = new Set() |
let docs: string[] = []; |
(context as PineconeRecord[]).forEach(match => { const source = (match.metadata as Metadata).source // Ensure source is a blog url, meaning it contains the path src/app/blog if (!source.includes('src/app/blog')) return blogUrls.add((match.metadata as Metadata).source); docs.push((match.metadata as Metadata).text); }); |
let relatedBlogPosts: ArticleWithSlug[] = [] |
// Loop through all the blog urls and get the metadata for each for (const blogUrl of blogUrls) { const blogPath = path.basename(blogUrl.replace('page.mdx', '')) const localBlogPath = `${blogPath}/page.mdx` const { slug, ...metadata } = await importArticleMetadata(localBlogPath); relatedBlogPosts.push({ slug, ...metadata }); } // Join all the chunks of text together, truncate to the maximum number of tokens, and return the result const contextText = docs.join("\n").substring(0, 3000) |
const prompt = ` Zachary Proser is a Staff developer, open - source maintainer and technical writer Zachary Proser's traits include expert knowledge, helpfulness, cleverness, and articulateness. Zachary Proser is a well - behaved and well - mannered individual. Zachary Proser is always friendly, kind, and inspiring, and he is eager to provide vivid and thoughtful responses to the user. Zachary Proser is a Staff Developer Advocate at Pinecone.io, the leader in vector storage. Zachary Proser builds and maintains open source applications, Jupyter Notebooks, and distributed systems in AWS START CONTEXT BLOCK ${contextText} END OF CONTEXT BLOCK Zachary will take into account any CONTEXT BLOCK that is provided in a conversation. If the context does not provide the answer to question, Zachary will say, "I'm sorry, but I don't know the answer to that question". Zachary will not apologize for previous responses, but instead will indicated new information was gained. Zachary will not invent anything that is not drawn directly from the context. Zachary will not engage in any defamatory, overly negative, controversial, political or potentially offense conversations. `; |
const result = await streamText({ model: openai('gpt-4o'), system: prompt, prompt: lastMessage.content, }); |
const serializedArticles = Buffer.from( |
JSON.stringify(relatedBlogPosts) |
).toString('base64') |
return new StreamingTextResponse(result.toAIStream(), { headers: { "x-sources": serializedArticles } }); } python |
# Clone my repository which contains my site # and all the *.MDX files comprising my blog !git clone https://github.com/zackproser/portfolio.git |
# Pip install all dependencies !pip install langchain_community langchain_pinecone langchain_openai unstructured langchainhub langchain-text-splitters |
# Import packages |
from langchain_pinecone import PineconeVectorStore |
from langchain_openai import OpenAIEmbeddings |
from langchain_community.document_loaders import DirectoryLoader |
from langchain_text_splitters import RecursiveCharacterTextSplitter |
from langchain_core.output_parsers import StrOutputParser |
from langchain_core.runnables import RunnablePassthrough |
# Use LangChain's DirectoryLoader to grab all my MDX files # across all subdirectories in my portfolio project. Use # multi-threading for efficiency and show progress loader = DirectoryLoader('portfolio', glob="**/*.mdx", show_progress=True, use_multithreading=True) |
docs = loader.load() |
docs |
python from google.colab import userdata # Set the API keys os.environ['OPENAI_API_KEY'] = userdata.get('OPENAI_API_KEY') os.environ['PINECONE_API_KEY'] = userdata.get('PINECONE_API_KEY') |
# Assuming you've already imported necessary libraries and blog_posts is populated as above |
# Initialize embeddings and the vector store embeddings = OpenAIEmbeddings( model="text-embedding-3-large" ) |
index_name = "zack |
portfolio" |
# Split the documents into chunks text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) split_docs = text_splitter.split_documents(docs) |
# Create a vector store for the documents using the specified embeddings vectorstore = PineconeVectorStore.from_documents(split_docs, embeddings, index_name=index_name) |
# Ask a query that is likely to score a hit against your corpus of text or data # In my case, I have a blog post where I talk about "the programming bug" query = "What is the programming bug?" vectorstore.similarity_search(query) python # Pinecone Index sanity checks from pinecone import Pinecone, ServerlessSpec |
# Initialize Pinecone client |
pc = Pinecone(api_key=os.environ.get('PINECONE_API_KEY')) |
# Set the name of your Pinecone Index here index_name = 'zack-portfolio' |
index = pc.Index(index_name) |
# This sanity check call should return stats for your Pinecone index, such as: # {'dimension: 1536, # 'index_fullness': 0.0, # 'namespaces': {'', {'vector_count': 862}}, # 'total_vector_count': 862} # index.describe_index_stats() python # Query the Pinecone index for related documents query = "What is the programming bug?" |
embeddings = OpenAIEmbeddings( |
model="text |
embedding |
3 |
large" |