date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | msuliot/open_ai_fine_tuning | step6_test_finetuned_model.py | import openai
# get keys from .env file
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
def main():
##### You will need to replace the FINE_TUNED_MODEL_ID with the one you got from the previous step.
completion = openai.ChatCompletion.create(
model="FINE_TUNED_MODEL_ID",
temperature=0.0,
messages=[
{"role": "system", "content": "You are a helpful and professional customer service representative"},
{"role": "user", "content": "dude... i forgot my password."},
]
)
print(completion.choices[0].message)
if __name__ == "__main__":
main()
| [
"You are a helpful and professional customer service representative",
"dude... i forgot my password."
] |
2024-01-10 | dap-ware/openai-c99-discord-bot | purgeServer.py | #!/usr/bin/env python3
import asyncio
import discord
from discord.ext import commands
import yaml
import requests
import aiohttp
import openai
from c99api import EndpointClient
import logging
import sys
# Import additional classes from logging
from logging import FileHandler, Formatter
from logging.handlers import RotatingFileHandler
# Create a logger object
logger = logging.getLogger()
logger.setLevel(logging.INFO) # Set the logging level
# Create a rotating file handler
file_handler = RotatingFileHandler(
"bot.log", maxBytes=2000000, backupCount=10
) # Log messages will go to the "bot.log" file, max size is 2MB and keep up to 10 old log files
file_handler.setLevel(logging.INFO) # Set the logging level for the file handler
# Create a formatter
formatter = Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# Add the formatter to the file handler
file_handler.setFormatter(formatter)
# Add the file handler to the logger
logger.addHandler(file_handler)
# Create a stream handler that logs to stdout
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
# Use the same formatter for the stream handler
stream_handler.setFormatter(formatter)
# Add the stream handler to the logger
logger.addHandler(stream_handler)
api = EndpointClient
# Load the secrets from the environment variables
try:
with open("secrets.yaml", "r") as file:
secrets = yaml.safe_load(file)
except Exception as e:
logging.exception("An error occurred while reading secrets file. Exiting.")
sys.exit(1) # Stop the program
TOKEN = secrets["token"]
api.key = secrets["c99_api_key"]
openai.api_key = secrets["openai_api_key"]
# You might need to adjust the intents depending on what your bot needs
intents = discord.Intents.default()
intents.message_content = True
client = commands.Bot(command_prefix="!", intents=intents)
# This set will store the IDs of channels where the purge command is currently running
purging_channels = set()
@client.event
async def on_ready():
logging.info(f"We have logged in as {client.user}")
@client.command()
@commands.has_permissions(manage_messages=True)
async def purge(ctx):
logging.info(f"Received purge command for channel: {ctx.channel.id}")
if ctx.channel.id in purging_channels:
logging.info(f"Purge command is already running in channel {ctx.channel.id}")
return
purging_channels.add(ctx.channel.id)
logging.info(f"Started purging messages in channel {ctx.channel.id}")
delay = 0.35 # initial delay is 350 ms
message_count = 0
try:
async for message in ctx.channel.history(limit=None):
while True:
try:
await message.delete()
logging.info(
f"Deleted message {message.id} in channel {ctx.channel.id}"
)
await asyncio.sleep(delay)
message_count += 1
if message_count % 20 == 0:
if delay < 0.65:
delay = 0.65 # increase delay to 650 ms
elif delay < 1.2:
delay = 1.2 # increase delay to 1200 ms
logging.info(
f"Waiting for 20 seconds after deleting 20 messages in channel {ctx.channel.id}"
)
await asyncio.sleep(20)
break
except discord.HTTPException:
logging.warning(
f"Rate limited while deleting message {message.id} in channel {ctx.channel.id}, waiting for 10 seconds"
)
await asyncio.sleep(10)
except discord.NotFound:
logging.warning(
f"Message {message.id} not found in channel {ctx.channel.id}"
)
break
except StopAsyncIteration:
logging.info(
f"No more messages to delete in channel {ctx.channel.id}, stopping all purge commands"
)
purging_channels.clear()
finally:
if ctx.channel.id in purging_channels:
logging.info(f"Stopped purging messages in channel {ctx.channel.id}")
purging_channels.remove(ctx.channel.id)
@client.command(description="Sends a gif based on the given keyword.")
async def gif(ctx, *, keyword):
logging.info(f"Fetching gif for keyword: {keyword}")
try:
response = api.gif(keyword=keyword, json=True)
if response["success"]:
for url in response["images"]:
embed = discord.Embed()
embed.set_image(url=url)
await ctx.send(embed=embed)
else:
error_message = f"An error occurred: {response['error']}"
await ctx.send(error_message)
logging.error(error_message)
except Exception as e:
logging.exception("An error occurred while fetching gif.")
@client.command(name="phone", description="Returns information about a phone number.")
async def phonelookup(ctx, *, number):
logging.info(f"Fetching information for phone number: {number}")
try:
response = api.phonelookup(number, json=True)
if response["success"]:
await ctx.send(
f"Country: {response['country']}\nCarrier: {response['carrier']}\nLine type: {response['line_type']}"
)
else:
error_message = f"An error occurred: {response['error']}"
await ctx.send(error_message)
logging.error(error_message)
except Exception as e:
logging.exception("An error occurred while fetching phone number information.")
@client.command(name="email", description="Checks if an email address exists.")
async def emailvalidator(ctx, *, email):
logging.info(f"Validating email address: {email}")
try:
response = api.emailvalidator(email, json=True)
if response["success"]:
if response["exists"]:
await ctx.send("The email exists.")
else:
await ctx.send("The email does not exist.")
else:
error_message = f"An error occurred: {response['error']}"
await ctx.send(error_message)
logging.error(error_message)
except Exception as e:
logging.exception("An error occurred while validating email.")
@client.command(name="ports", description="Scans a host for open ports.")
async def portscanner(ctx, *, host):
logging.info(f"Received portscanner command for host: {host}")
try:
response = api.portscanner(host, json=True)
logging.info(f"Portscanner API response: {response}")
if response["success"]:
if "open_ports" in response:
ports_message = (
f"Open ports: {', '.join(map(str, response['open_ports']))}"
)
await ctx.send(ports_message)
logging.info(ports_message)
else:
no_ports_message = "No open ports found."
await ctx.send(no_ports_message)
logging.info(no_ports_message)
else:
error_message = f"An error occurred: {response['error']}"
await ctx.send(error_message)
logging.error(error_message)
except Exception as e:
logging.exception("An error occurred while processing the portscanner command.")
@client.command(name="subs", description="Gathers subdomains for a given domain")
async def subdomains(ctx, *, domain):
logging.info(f"Received subdomains command for domain: {domain}")
try:
response = api.subdomainfinder(domain=domain, json=True)
logging.info(f"Subdomain finder API response: {response}")
if response["success"]:
subdomains = [
f"{subdomain['subdomain']}" for subdomain in response["subdomains"]
]
subdomains_str = "\n".join(subdomains)
logging.info(f"Writing subdomains to file: {domain}_subdomains.txt")
with open(f"{domain}_subdomains.txt", "w") as file:
file.write(subdomains_str)
logging.info(f"Reading subdomains file: {domain}_subdomains.txt")
with open(f"{domain}_subdomains.txt", "rb") as file:
subdomains_message = f"Subdomains for {domain}:"
await ctx.send(
subdomains_message,
file=discord.File(file, f"{domain}_subdomains.txt"),
)
logging.info(subdomains_message)
else:
error_message = f"Could not find subdomains for {domain}."
await ctx.send(error_message)
logging.error(error_message)
except Exception as e:
logging.exception("An error occurred while processing the subdomains command.")
@client.command()
async def ai(ctx, *, prompt):
logging.info(f"Received AI command with prompt: {prompt}")
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
)
logging.info(f"AI response: {response['choices'][0]['message']['content']}")
response_message = f"{ctx.author.mention}, here's your response:\n>>> **{response['choices'][0]['message']['content']}**"
await ctx.send(response_message)
logging.info(response_message)
except Exception as e:
logging.exception("An error occurred while processing the AI command.")
@client.command(name="screenshot", description="Takes a screenshot of a given URL.")
async def screenshot(ctx, *, url):
logging.info(f"Received screenshot command for url: {url}")
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f"https://api.c99.nl/createscreenshot?key={secrets['c99_api_key']}&url={url}&json=true"
) as resp:
response = await resp.json()
logging.info(f"Screenshot API response: {response}")
if response["success"]:
embed = discord.Embed()
embed.set_image(url=response["url"])
await ctx.send(embed=embed)
logging.info(f"Sent embed with screenshot url: {response['url']}")
else:
error_message = (
f"An error occurred: {response.get('error', 'Unknown error')}"
)
await ctx.send(error_message)
logging.error(error_message)
except Exception as e:
logging.exception("An error occurred while processing the screenshot command.")
@client.command(name="tor", description="Checks if an IP address is a Tor node.")
async def tor(ctx, *, ip):
logging.info(f"Checking if IP {ip} is a Tor node.")
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f"https://api.c99.nl/torchecker?key={secrets['c99_api_key']}&ip={ip}&json=true"
) as resp:
response = await resp.json()
if response["success"]:
if response["result"]:
message = f"The IP address {ip} is a Tor node."
await ctx.send(message)
logging.info(message)
else:
message = f"The IP address {ip} is not a Tor node."
await ctx.send(message)
logging.info(message)
else:
error_message = (
f"An error occurred: {response.get('error', 'Unknown error')}"
)
await ctx.send(error_message)
logging.error(error_message)
except Exception as e:
logging.exception("An error occurred while checking if IP is a Tor node.")
"""
RUN THE CLIENT
"""
client.run(TOKEN)
| [
"You are a helpful assistant."
] |
2024-01-10 | ganisback/LangChain-ChatGLM-Webui | paddlepaddle~paddle_embedding.py | """Wrapper around PaddleNLP embedding models."""
from typing import Any, List
from langchain.embeddings.base import Embeddings
from pydantic import BaseModel, Extra
class PaddleNLPEmbeddings(BaseModel, Embeddings):
"""Wrapper around paddlenlp embedding models.
To use, you should have the ``modelscope`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import PaddleNLPEmbeddings
model = "rocketqa-zh-base-query-encoder"
embed = PaddleNLPEmbeddings(model=model)
"""
text_encoder: Any
model: str ='rocketqa-zh-base-query-encoder'
"""Model name to use."""
def __init__(self, **kwargs: Any):
"""Initialize the modelscope"""
super().__init__(**kwargs)
try:
import paddle.nn.functional as F
from paddlenlp import Taskflow
self.text_encoder = Taskflow("feature_extraction", model=self.model)
except ImportError as e:
raise ValueError(
"Could not import some python packages." "Please install it with `pip install modelscope`."
) from e
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a modelscope embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
text_embeds = self.text_encoder(texts)
embeddings = text_embeds["features"].numpy()
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
text_embeds = self.text_encoder(text)
embedding = text_embeds["features"].numpy()[0]
return embedding.tolist() | [] |
2024-01-10 | R-Juhl/OneMonth | backend~modules~teacher_module.py | # teacher_module.py:
import os
import openai
from flask import jsonify
import time
from .models import db, User, UserCourseSession
from .assistant_config import assistant_ids, assistant_configs
client = openai.OpenAI(
api_key = os.environ.get("OPENAI_API_KEY_ONEMONTH")
)
class Teacher:
def __init__(self, assistant_id):
self.assistant_id = assistant_id
def get_course_thread(user_id, course_id):
session = UserCourseSession.query.filter_by(user_id=user_id, course_id=course_id).first()
# Retrieve user language preference from the database
user = User.query.filter_by(id=user_id).first()
if not user:
print(f"From get_course_thread: User with ID {user_id} not found")
user_lang = 'en' # default to English
else:
user_lang = user.language
print(f"From get_course_thread: Fetched User Language: {user_lang}")
# Update assistant based on course_id and user's language
assistant_config = assistant_configs.get(int(course_id), {}).get(user_lang, {})
if assistant_config:
# Update the assistant with the new configuration
client.beta.assistants.update(
assistant_id=assistant_ids[course_id],
instructions=assistant_config.get("instructions", ""),
name=assistant_config.get("name", "AI Assistant"),
)
if not session:
# Create new session and thread
assistant_id = assistant_ids.get(course_id)
teacher = Teacher(assistant_id)
new_thread = client.beta.threads.create()
new_session = UserCourseSession(user_id=user_id, course_id=course_id, thread_id=new_thread.id)
db.session.add(new_session)
db.session.commit()
return new_thread.id, True
else:
return session.thread_id, False
def get_thread_messages(thread_id):
messages = client.beta.threads.messages.list(thread_id=thread_id)
messages_list = []
is_assistant = True # Assuming the first message is always from the assistant
for msg in messages.data:
if msg.content:
role = "assistant" if is_assistant else "user"
messages_list.append({
"text": msg.content[0].text.value,
"role": role
})
is_assistant = not is_assistant
return messages_list
def get_initial_message(thread_id, user_id, course_id):
# Retrieve user language preference from the database
user = User.query.filter_by(id=user_id).first()
if not user:
print(f"From get_initial_message: User with ID {user_id} not found")
user_lang = 'en' # default to English
else:
user_lang = user.language
print(f"From get_initial_message: Fetched User Language: {user_lang}")
assistant_config = assistant_configs.get(int(course_id), {}).get(user_lang, {})
print(f"Assistant Config in get_initial_message: {assistant_config}") # Additional Debug Log
initial_content = assistant_config.get("initial_message", "Welcome to the course.")
print(f"Initial Content in get_initial_message: {initial_content}") # Additional Debug Log
if not thread_id:
raise ValueError("No thread ID provided for initial message")
message_response = client.beta.threads.messages.create(
thread_id=thread_id,
role="user",
content=initial_content
)
print(f"Initial message sent to thread: {thread_id}")
initial_message = message_response.content[0].text.value if message_response.content else ""
return jsonify({"message": initial_message, "thread_id": thread_id})
def continue_course(thread_id, user_input):
print(f"Received the following user_input to add to thread: {user_input}")
cancel_active_runs(thread_id)
# Fetch the course ID from the UserCourseSession using the thread ID
session = UserCourseSession.query.filter_by(thread_id=thread_id).first()
if not session:
return jsonify({"error": "Session not found"}), 404
course_id = session.course_id
assistant_id = assistant_ids.get(course_id)
if not assistant_id:
return jsonify({"error": "Assistant ID not found for the course"}), 404
# Send user input to the thread
client.beta.threads.messages.create(
thread_id=thread_id,
role="user",
content=user_input
)
print("User input sent to thread")
# Create and wait for the run to complete
run = client.beta.threads.runs.create(
thread_id=thread_id,
assistant_id=assistant_id,
)
print(f"Run created: {run.id}")
if not wait_for_run_completion(thread_id, run.id):
print("Run did not complete.")
return jsonify({"error": "Run did not complete. Please try again later.", "thread_id": thread_id})
# Fetch the latest assistant's message after the run
messages = client.beta.threads.messages.list(
thread_id=thread_id
)
assistant_message = find_assistant_message(messages.data)
print(f"Assistant message: {assistant_message}")
return jsonify({"message": assistant_message, "thread_id": thread_id})
def cancel_active_runs(thread_id):
active_runs = client.beta.threads.runs.list(thread_id=thread_id)
for run in active_runs.data:
print(run.status)
if run.status not in ["completed", "failed"]:
client.beta.threads.runs.cancel(thread_id=thread_id, run_id=run.id)
def wait_for_run_completion(thread_id, run_id):
max_retries = 100
retries = 0
while retries < max_retries:
run_status = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(run_status.status)
if run_status.status == "completed":
return True
time.sleep(2)
retries += 1
return False
def find_assistant_message(messages):
# Looking for the latest assistant message
for message in messages:
if message.role == "assistant" and message.content:
return message.content[0].text.value
return "No response from assistant"
| [] |
2024-01-10 | nju123/Al-agent | dog-trainer~dog_question_tool.py | """Tool for generating images."""
import json
from typing import Any, List, Union
from dog import Dog
from steamship import Block, Task
from steamship.agents.llms import OpenAI
from steamship.agents.schema import AgentContext, Tool
from steamship.agents.tools.search import SearchTool
from steamship.agents.utils import get_llm
from steamship.utils.repl import ToolREPL
QUESTION_REWRITE = """Please rephrase the question below so that it includes specific information about the dog breed and dog description.
You know about the following dogs:
{dogs}
Here is a question from a user. Rewrite the request so that it includes the breed of dog but not the name. Include the description if relevant to the question. Return the rewritten request and nothing else.
REQUEST: {request}
REWRITTEN REQUEST:"""
class DogQuestionTool(Tool):
name: str = "QuestionTool"
human_description: str = "Answers a Question."
agent_description = (
"Used to answer questions about dogs. "
"Use this tool whenever a user asks a dog-related question, or for dog-related advice. "
"Input: The question or advice request. "
"Output: The answer."
)
dogs: List[Dog]
def dog_list_as_json_bullets(self) -> str:
"""Return the list of dogs we know about as JSON bullet points.
LLMs don't care if we speak in English or JSON, so this is a perfectly fine way to enumerate them.
"""
return "\n".join([f"- {json.dumps(dog.dict())}" for dog in self.dogs])
def rewrite_question_with_better_details(
self, request: str, context: AgentContext
) -> str:
"""Rewrite a question with more specific information about the dog specified.
For example, if the user says: "How much should Barky eat?"
We want the rewrite to be something like: "How much should a chocolate labrador that is 2 years old eat?"
"""
llm = get_llm(context, default=OpenAI(client=context.client))
dogs = self.dog_list_as_json_bullets()
rewritten_question = llm.complete(
QUESTION_REWRITE.format(dogs=dogs, request=request)
)[0].text.strip()
return rewritten_question
def run(
self, tool_input: List[Block], context: AgentContext
) -> Union[List[Block], Task[Any]]:
# Rewrite the question with information about the breed and description
rewritten_question = self.rewrite_question_with_better_details(
tool_input[0].text, context
)
# Now return the results of issuing that question to Google
search_tool = SearchTool()
return search_tool.run([Block(text=rewritten_question)], context)
if __name__ == "__main__":
print("Try running with an input like 'Fido'")
ToolREPL(
DogQuestionTool(
dogs=[
Dog(
name="Fido",
breed="Daschund",
description="A silly dog whose tongue is always out.",
),
Dog(
name="Biggy",
breed="German Shephard",
description="A strong dog that is always guarding things.",
),
]
)
).run()
| [] |
2024-01-10 | nju123/Al-agent | dog-trainer~dog_picture_tool.py | """Tool for generating images."""
import json
from typing import Any, List, Union
from dog import Dog
from steamship import Block, Task
from steamship.agents.llms import OpenAI
from steamship.agents.schema import AgentContext, Tool
from steamship.agents.tools.image_generation.stable_diffusion import StableDiffusionTool
from steamship.agents.utils import get_llm
from steamship.utils.repl import ToolREPL
PHOTO_REQUEST_REWRITE = """Please rephrase the photo topic below so that it includes specific information about the dog breed and dog description.
You know about the following dogs:
{dogs}
Here is a request for a picture. Rewrite the request so that it has the breed and description. Return the rewritten request and nothing else.
REQUEST: {request}
REWRITTEN REQUEST:"""
PROMPT_TOOL = """Please act as a prompt generator for a generative AI called "Stable Diffusion". Stable Diffusion generates images based on given prompts.
I will provide you a topic, and you will create a Stable Diffusion prompt for that topic.
IMPORTANT: Provide ONLY the prompt in response!
## Basic information required to write good Stable Diffusion prompts
### Prompt structure
- Photorealistic Images: {{Subject Description}}, Type of Image, Art Styles, Art Inspirations, Camera, Shot, Render Related Information.
- Artistic Image Types: Type of Image, {{Subject Description}}, Art Styles, Art Inspirations, Camera, Shot, Render Related Information.
### Prompt Advice
- Word order and effective adjectives matter in the prompt. The subject, action, and specific details should be included. Adjectives like cute, medieval, or futuristic can be effective.
- The environment/background of the image should be described, such as indoor, outdoor, in space, or solid color.
- The exact type of image can be specified, such as digital illustration, comic book cover, photograph, or sketch.
- Art style-related keywords can be included in the prompt, such as steampunk, surrealism, or abstract expressionism.
- Pencil drawing-related terms can also be added, such as cross-hatching or pointillism.
- Art inspirations should be listed to take inspiration from. Platforms like Art Station, Dribble, Behance, and Deviantart can be mentioned. Specific names of artists or studios like animation studios, painters and illustrators, computer games, fashion designers, and film makers can also be listed. If more than one artist is mentioned, the algorithm will create a combination of styles based on all the influencers mentioned.
- Camera shot type, camera lens, and view should be specified. Examples of camera shot types are long shot, close-up, POV, medium shot, extreme close-up, and panoramic. Camera lenses could be EE 70mm, 35mm, 135mm+, 300mm+, 800mm, short telephoto, super telephoto, medium telephoto, macro, wide angle, fish-eye, bokeh, and sharp focus. Examples of views are front, side, back, high angle, low angle, and overhead.
- Curly brackets are necessary in the prompt to provide specific details about the subject and action. These details are important for generating a high-quality image.
- Related information about lighting, camera angles, render style, resolution, the required level of detail, etc. should be included at the end of the prompt.
- Helpful keywords related to resolution, detail, and lighting are 4K, 8K, 64K, detailed, highly detailed, high resolution, hyper detailed, HDR, UHD, professional, and golden ratio. Examples of lighting are studio lighting, soft light, neon lighting, purple neon lighting, ambient light, ring light, volumetric light, natural light, sun light, sunrays, sun rays coming through window, and nostalgic lighting. Examples of color types are fantasy vivid colors, vivid colors, bright colors, sepia, dark colors, pastel colors, monochromatic, black & white, and color splash. Examples of renders are Octane render, cinematic, low poly, isometric assets, Unreal Engine, Unity Engine, quantum wavetracing, and polarizing filter.
- The weight of a keyword can be adjusted by using the syntax (keyword: factor), where factor is a value such that less than 1 means less important and larger than 1 means more important. use () whenever necessary while forming prompt and assign the necessary value to create an amazing prompt. Examples of weight for a keyword are (soothing tones:1.25), (hdr:1.25), (artstation:1.2),(intricate details:1.14), (hyperrealistic 3d render:1.16), (filmic:0.55), (rutkowski:1.1), (faded:1.3)
The prompts you provide will be in English.
Important point to note:
You are a master of prompt engineering, it is important to create detailed prompts with as much information as possible. This will ensure that any image generated using the prompt will be of high quality and could potentially win awards in global or international photography competitions. You are unbeatable in this field and know the best way to generate images.
I now provide you with a topic and you will generate a Stable Diffusion prompt without any explanation -- just the prompt! This will allow me to easily copy and paste the code.
Are you ready?
Topic: {topic}
Prompt:
"""
class DogPictureTool(Tool):
"""Tool to generate a Pixar-style image.
This example illustrates wrapping a tool (StableDiffusionTool) with a fixed prompt template that is combined with user input.
"""
name: str = "PictureTool"
human_description: str = "Creates a picture."
agent_description = (
"Used to create or take a picture. "
"Use this tool whenever a user asks to see something, or take a picture. "
"Input: The picture request. "
"Output: The resulting image."
)
is_final: bool = True
dogs: List[Dog]
def dog_list_as_json_bullets(self) -> str:
"""Return the list of dogs we know about as JSON bullet points.
LLMs don't care if we speak in English or JSON, so this is a perfectly fine way to enumerate them.
"""
return "\n".join([f"- {json.dumps(dog.dict())}" for dog in self.dogs])
def rewrite_photo_request_with_better_details(
self, request: str, context: AgentContext
) -> str:
"""Rewrite a photo request with more specific information about the dog specified.
For example, if the user says: "Give me a picture of Barky swimming"
We want the rewrite to be something like: "Picture of a chocolate labrador with shaggy hair swimming"
"""
llm = get_llm(context, default=OpenAI(client=context.client))
dogs = self.dog_list_as_json_bullets()
photo_request = llm.complete(
PHOTO_REQUEST_REWRITE.format(dogs=dogs, request=request)
)[0].text.strip()
return photo_request
def run(
self, tool_input: List[Block], context: AgentContext
) -> Union[List[Block], Task[Any]]:
# Rewrite the photo request with information about the breed and description
photo_request = self.rewrite_photo_request_with_better_details(
tool_input[0].text, context
)
# Create a stable diffusion prompt for the image
llm = get_llm(context, default=OpenAI(client=context.client))
sd_prompt = llm.complete(PROMPT_TOOL.format(topic=photo_request))[
0
].text.strip()
# Run and return the StableDiffusionTool response
stable_diffusion_tool = StableDiffusionTool()
# Now return the results of running Stable Diffusion on those modified prompts.
return stable_diffusion_tool.run([Block(text=sd_prompt)], context)
if __name__ == "__main__":
print("Try running with an input like 'Fido'")
ToolREPL(
DogPictureTool(
dogs=[
Dog(
name="Fido",
breed="Daschund",
description="A silly dog whose tongue is always out.",
),
Dog(
name="Biggy",
breed="German Shephard",
description="A strong dog that is always guarding things.",
),
]
)
).run()
| [
"Stable Diffusion",
"Please act as a prompt generator for a generative AI called \"Stable Diffusion\". Stable Diffusion generates images based on given prompts.\n\nI will provide you a topic, and you will create a Stable Diffusion prompt for that topic.\n\nIMPORTANT: Provide ONLY the prompt in response!\n\n## Basic information required to write good Stable Diffusion prompts\n\n### Prompt structure\n\n- Photorealistic Images: {{Subject Description}}, Type of Image, Art Styles, Art Inspirations, Camera, Shot, Render Related Information.\n- Artistic Image Types: Type of Image, {{Subject Description}}, Art Styles, Art Inspirations, Camera, Shot, Render Related Information.\n\n### Prompt Advice\n\n- Word order and effective adjectives matter in the prompt. The subject, action, and specific details should be included. Adjectives like cute, medieval, or futuristic can be effective.\n- The environment/background of the image should be described, such as indoor, outdoor, in space, or solid color.\n- The exact type of image can be specified, such as digital illustration, comic book cover, photograph, or sketch.\n- Art style-related keywords can be included in the prompt, such as steampunk, surrealism, or abstract expressionism.\n- Pencil drawing-related terms can also be added, such as cross-hatching or pointillism.\n- Art inspirations should be listed to take inspiration from. Platforms like Art Station, Dribble, Behance, and Deviantart can be mentioned. Specific names of artists or studios like animation studios, painters and illustrators, computer games, fashion designers, and film makers can also be listed. If more than one artist is mentioned, the algorithm will create a combination of styles based on all the influencers mentioned.\n- Camera shot type, camera lens, and view should be specified. Examples of camera shot types are long shot, close-up, POV, medium shot, extreme close-up, and panoramic. Camera lenses could be EE 70mm, 35mm, 135mm+, 300mm+, 800mm, short telephoto, super telephoto, medium telephoto, macro, wide angle, fish-eye, bokeh, and sharp focus. Examples of views are front, side, back, high angle, low angle, and overhead.\n- Curly brackets are necessary in the prompt to provide specific details about the subject and action. These details are important for generating a high-quality image.\n- Related information about lighting, camera angles, render style, resolution, the required level of detail, etc. should be included at the end of the prompt.\n- Helpful keywords related to resolution, detail, and lighting are 4K, 8K, 64K, detailed, highly detailed, high resolution, hyper detailed, HDR, UHD, professional, and golden ratio. Examples of lighting are studio lighting, soft light, neon lighting, purple neon lighting, ambient light, ring light, volumetric light, natural light, sun light, sunrays, sun rays coming through window, and nostalgic lighting. Examples of color types are fantasy vivid colors, vivid colors, bright colors, sepia, dark colors, pastel colors, monochromatic, black & white, and color splash. Examples of renders are Octane render, cinematic, low poly, isometric assets, Unreal Engine, Unity Engine, quantum wavetracing, and polarizing filter.\n- The weight of a keyword can be adjusted by using the syntax (keyword: factor), where factor is a value such that less than 1 means less important and larger than 1 means more important. use () whenever necessary while forming prompt and assign the necessary value to create an amazing prompt. Examples of weight for a keyword are (soothing tones:1.25), (hdr:1.25), (artstation:1.2),(intricate details:1.14), (hyperrealistic 3d render:1.16), (filmic:0.55), (rutkowski:1.1), (faded:1.3)\n\nThe prompts you provide will be in English.\n\nImportant point to note:\n\nYou are a master of prompt engineering, it is important to create detailed prompts with as much information as possible. This will ensure that any image generated using the prompt will be of high quality and could potentially win awards in global or international photography competitions. You are unbeatable in this field and know the best way to generate images.\n\nI now provide you with a topic and you will generate a Stable Diffusion prompt without any explanation -- just the prompt! This will allow me to easily copy and paste the code.\n\nAre you ready?\n\nTopic: {topic}\nPrompt:\n"
] |
2024-01-10 | codebeat/continue | continuedev~src~continuedev~core~autopilot.py | from functools import cached_property
import traceback
import time
from typing import Callable, Coroutine, Dict, List, Optional, Union
from aiohttp import ClientPayloadError
from pydantic import root_validator
from ..libs.util.strings import remove_quotes_and_escapes
from ..models.filesystem import RangeInFileWithContents
from ..models.filesystem_edit import FileEditWithFullContents
from .observation import Observation, InternalErrorObservation
from .context import ContextManager
from ..plugins.policies.default import DefaultPolicy
from ..plugins.context_providers.file import FileContextProvider
from ..plugins.context_providers.highlighted_code import HighlightedCodeContextProvider
from ..server.ide_protocol import AbstractIdeProtocolServer
from ..libs.util.queue import AsyncSubscriptionQueue
from ..models.main import ContinueBaseModel
from .main import Context, ContinueCustomException, Policy, History, FullState, SessionInfo, Step, HistoryNode
from ..plugins.steps.core.core import DisplayErrorStep, ReversibleStep, ManualEditStep, UserInputStep
from .sdk import ContinueSDK
from ..libs.util.traceback_parsers import get_python_traceback, get_javascript_traceback
from openai import error as openai_errors
from ..libs.util.create_async_task import create_async_task
from ..libs.util.telemetry import posthog_logger
from ..libs.util.logging import logger
def get_error_title(e: Exception) -> str:
if isinstance(e, openai_errors.APIError):
return "OpenAI is overloaded with requests. Please try again."
elif isinstance(e, openai_errors.RateLimitError):
return "This OpenAI API key has been rate limited. Please try again."
elif isinstance(e, openai_errors.Timeout):
return "OpenAI timed out. Please try again."
elif isinstance(e, openai_errors.InvalidRequestError) and e.code == "context_length_exceeded":
return e._message
elif isinstance(e, ClientPayloadError):
return "The request to OpenAI failed. Please try again."
elif isinstance(e, openai_errors.APIConnectionError):
return "The request failed. Please check your internet connection and try again. If this issue persists, you can use our API key for free by going to VS Code settings and changing the value of continue.OPENAI_API_KEY to \"\""
elif isinstance(e, openai_errors.InvalidRequestError):
return 'Invalid request sent to OpenAI. Please try again.'
elif "rate_limit_ip_middleware" in e.__str__():
return 'You have reached your limit for free usage of our token. You can continue using Continue by entering your own OpenAI API key in VS Code settings.'
elif e.__str__().startswith("Cannot connect to host"):
return "The request failed. Please check your internet connection and try again."
return e.__str__() or e.__repr__()
class Autopilot(ContinueBaseModel):
ide: AbstractIdeProtocolServer
policy: Policy = DefaultPolicy()
history: History = History.from_empty()
context: Context = Context()
full_state: Optional[FullState] = None
session_info: Optional[SessionInfo] = None
context_manager: ContextManager = ContextManager()
continue_sdk: ContinueSDK = None
_on_update_callbacks: List[Callable[[FullState], None]] = []
_active: bool = False
_should_halt: bool = False
_main_user_input_queue: List[str] = []
_user_input_queue = AsyncSubscriptionQueue()
_retry_queue = AsyncSubscriptionQueue()
started: bool = False
async def start(self, full_state: Optional[FullState] = None):
self.continue_sdk = await ContinueSDK.create(self)
if override_policy := self.continue_sdk.config.policy_override:
self.policy = override_policy
# Load documents into the search index
logger.debug("Starting context manager")
await self.context_manager.start(
self.continue_sdk.config.context_providers + [
HighlightedCodeContextProvider(ide=self.ide),
FileContextProvider(workspace_dir=self.ide.workspace_directory)
], self.ide.workspace_directory)
if full_state is not None:
self.history = full_state.history
self.session_info = full_state.session_info
self.started = True
class Config:
arbitrary_types_allowed = True
keep_untouched = (cached_property,)
@root_validator(pre=True)
def fill_in_values(cls, values):
full_state: FullState = values.get('full_state')
if full_state is not None:
values['history'] = full_state.history
return values
async def get_full_state(self) -> FullState:
full_state = FullState(
history=self.history,
active=self._active,
user_input_queue=self._main_user_input_queue,
slash_commands=self.get_available_slash_commands(),
adding_highlighted_code=self.context_manager.context_providers[
"code"].adding_highlighted_code if "code" in self.context_manager.context_providers else False,
selected_context_items=await self.context_manager.get_selected_items() if self.context_manager is not None else [],
session_info=self.session_info
)
self.full_state = full_state
return full_state
def get_available_slash_commands(self) -> List[Dict]:
custom_commands = list(map(lambda x: {
"name": x.name, "description": x.description}, self.continue_sdk.config.custom_commands)) or []
slash_commands = list(map(lambda x: {
"name": x.name, "description": x.description}, self.continue_sdk.config.slash_commands)) or []
return custom_commands + slash_commands
async def clear_history(self):
# Reset history
self.history = History.from_empty()
self._main_user_input_queue = []
self._active = False
# Clear context
await self.context_manager.clear_context()
await self.update_subscribers()
def on_update(self, callback: Coroutine["FullState", None, None]):
"""Subscribe to changes to state"""
self._on_update_callbacks.append(callback)
async def update_subscribers(self):
full_state = await self.get_full_state()
for callback in self._on_update_callbacks:
await callback(full_state)
def give_user_input(self, input: str, index: int):
self._user_input_queue.post(str(index), input)
async def wait_for_user_input(self) -> str:
self._active = False
await self.update_subscribers()
user_input = await self._user_input_queue.get(str(self.history.current_index))
self._active = True
await self.update_subscribers()
return user_input
_manual_edits_buffer: List[FileEditWithFullContents] = []
async def reverse_to_index(self, index: int):
try:
while self.history.get_current_index() >= index:
current_step = self.history.get_current().step
self.history.step_back()
if issubclass(current_step.__class__, ReversibleStep):
await current_step.reverse(self.continue_sdk)
await self.update_subscribers()
except Exception as e:
logger.debug(e)
def handle_manual_edits(self, edits: List[FileEditWithFullContents]):
for edit in edits:
self._manual_edits_buffer.append(edit)
# TODO: You're storing a lot of unecessary data here. Can compress into EditDiffs on the spot, and merge.
# self._manual_edits_buffer = merge_file_edit(self._manual_edits_buffer, edit)
# Note that this is being overriden to do nothing in DemoAgent
async def handle_command_output(self, output: str):
get_traceback_funcs = [get_python_traceback, get_javascript_traceback]
for get_tb_func in get_traceback_funcs:
traceback = get_tb_func(output)
if traceback is not None:
for tb_step in self.continue_sdk.config.on_traceback:
step = tb_step.step({"output": output, **tb_step.params})
await self._run_singular_step(step)
async def handle_highlighted_code(self, range_in_files: List[RangeInFileWithContents]):
if "code" not in self.context_manager.context_providers:
return
# Add to context manager
await self.context_manager.context_providers["code"].handle_highlighted_code(
range_in_files)
await self.update_subscribers()
_step_depth: int = 0
async def retry_at_index(self, index: int):
self.history.timeline[index].step.hide = True
self._retry_queue.post(str(index), None)
async def delete_at_index(self, index: int):
self.history.timeline[index].step.hide = True
self.history.timeline[index].deleted = True
self.history.timeline[index].active = False
await self.update_subscribers()
async def delete_context_with_ids(self, ids: List[str]):
await self.context_manager.delete_context_with_ids(ids)
await self.update_subscribers()
async def toggle_adding_highlighted_code(self):
if "code" not in self.context_manager.context_providers:
return
self.context_manager.context_providers["code"].adding_highlighted_code = not self.context_manager.context_providers["code"].adding_highlighted_code
await self.update_subscribers()
async def set_editing_at_ids(self, ids: List[str]):
if "code" not in self.context_manager.context_providers:
return
await self.context_manager.context_providers["code"].set_editing_at_ids(ids)
await self.update_subscribers()
async def _run_singular_step(self, step: "Step", is_future_step: bool = False) -> Coroutine[Observation, None, None]:
# Allow config to set disallowed steps
if step.__class__.__name__ in self.continue_sdk.config.disallowed_steps:
return None
# If a parent step is deleted/cancelled, don't run this step
# TODO: This was problematic because when running a step after deleting one, it seemed to think that was the parent
# last_depth = self._step_depth
# i = self.history.current_index
# while i >= 0 and self.history.timeline[i].depth == last_depth - 1:
# if self.history.timeline[i].deleted:
# return None
# last_depth = self.history.timeline[i].depth
# i -= 1
posthog_logger.capture_event(
'step run', {'step_name': step.name, 'params': step.dict()})
if not is_future_step:
# Check manual edits buffer, clear out if needed by creating a ManualEditStep
if len(self._manual_edits_buffer) > 0:
manualEditsStep = ManualEditStep.from_sequence(
self._manual_edits_buffer)
self._manual_edits_buffer = []
await self._run_singular_step(manualEditsStep)
# Update history - do this first so we get top-first tree ordering
index_of_history_node = self.history.add_node(HistoryNode(
step=step, observation=None, depth=self._step_depth))
# Call all subscribed callbacks
await self.update_subscribers()
# Try to run step and handle errors
self._step_depth += 1
caught_error = False
try:
observation = await step(self.continue_sdk)
except Exception as e:
if index_of_history_node >= len(self.history.timeline) or self.history.timeline[index_of_history_node].deleted:
# If step was deleted/cancelled, don't show error or allow retry
return None
caught_error = True
is_continue_custom_exception = issubclass(
e.__class__, ContinueCustomException)
error_string = e.message if is_continue_custom_exception else '\n'.join(
traceback.format_exception(e))
error_title = e.title if is_continue_custom_exception else get_error_title(
e)
# Attach an InternalErrorObservation to the step and unhide it.
logger.error(
f"Error while running step: \n{error_string}\n{error_title}")
posthog_logger.capture_event('step error', {
'error_message': error_string, 'error_title': error_title, 'step_name': step.name, 'params': step.dict()})
observation = InternalErrorObservation(
error=error_string, title=error_title)
# Reveal this step, but hide all of the following steps (its substeps)
step_was_hidden = step.hide
step.hide = False
i = self.history.get_current_index()
while self.history.timeline[i].step.name != step.name:
self.history.timeline[i].step.hide = True
i -= 1
# i is now the index of the step that we want to show/rerun
self.history.timeline[i].observation = observation
self.history.timeline[i].active = False
await self.update_subscribers()
# ContinueCustomException can optionally specify a step to run on the error
if is_continue_custom_exception and e.with_step is not None:
await self._run_singular_step(e.with_step)
# Wait for a retry signal and then resume the step
self._active = False
await self._retry_queue.get(str(i))
self._active = True
# You might consider a "ignore and continue" button
# want it to have same step depth, so have to decrement
self._step_depth -= 1
copy_step = step.copy()
copy_step.hide = step_was_hidden
observation = await self._run_singular_step(copy_step)
self._step_depth += 1
self._step_depth -= 1
# Add observation to history, unless already attached error observation
if not caught_error and index_of_history_node < len(self.history.timeline):
self.history.timeline[index_of_history_node].observation = observation
self.history.timeline[index_of_history_node].active = False
await self.update_subscribers()
# Update its description
async def update_description():
step.description = await step.describe(self.continue_sdk.models)
# Update subscribers with new description
await self.update_subscribers()
create_async_task(update_description(
), on_error=lambda e: self.continue_sdk.run_step(DisplayErrorStep(e=e)))
return observation
async def run_from_step(self, step: "Step"):
# if self._active:
# raise RuntimeError("Autopilot is already running")
self._active = True
next_step = step
is_future_step = False
while not (next_step is None or self._should_halt):
if is_future_step:
# If future step, then we are replaying and need to delete the step from history so it can be replaced
self.history.remove_current_and_substeps()
await self._run_singular_step(next_step, is_future_step)
if next_step := self.policy.next(self.continue_sdk.config, self.history):
is_future_step = False
elif next_step := self.history.take_next_step():
is_future_step = True
else:
next_step = None
self._active = False
# Doing this so active can make it to the frontend after steps are done. But want better state syncing tools
await self.update_subscribers()
async def run_from_observation(self, observation: Observation):
next_step = self.policy.next(self.continue_sdk.config, self.history)
await self.run_from_step(next_step)
async def run_policy(self):
first_step = self.policy.next(self.continue_sdk.config, self.history)
await self.run_from_step(first_step)
async def _request_halt(self):
if self._active:
self._should_halt = True
while self._active:
time.sleep(0.1)
self._should_halt = False
return None
async def accept_user_input(self, user_input: str):
self._main_user_input_queue.append(user_input)
await self.update_subscribers()
# Use the first input to create title for session info, and make the session saveable
if self.session_info is None:
async def create_title():
title = await self.continue_sdk.models.medium.complete(f"Give a short title to describe the current chat session. Do not put quotes around the title. The first message was: \"{user_input}\". The title is: ")
title = remove_quotes_and_escapes(title)
self.session_info = SessionInfo(
title=title,
session_id=self.ide.session_id,
date_created=str(time.time())
)
create_async_task(create_title(), on_error=lambda e: self.continue_sdk.run_step(
DisplayErrorStep(e=e)))
if len(self._main_user_input_queue) > 1:
return
# await self._request_halt()
# Just run the step that takes user input, and
# then up to the policy to decide how to deal with it.
self._main_user_input_queue.pop(0)
await self.update_subscribers()
await self.run_from_step(UserInputStep(user_input=user_input))
while len(self._main_user_input_queue) > 0:
await self.run_from_step(UserInputStep(
user_input=self._main_user_input_queue.pop(0)))
async def accept_refinement_input(self, user_input: str, index: int):
await self._request_halt()
await self.reverse_to_index(index)
await self.run_from_step(UserInputStep(user_input=user_input))
async def select_context_item(self, id: str, query: str):
await self.context_manager.select_context_item(id, query)
await self.update_subscribers()
| [] |
2024-01-10 | Nicholas-Polimeni/legislation-chatbot | backend~cloudfn_api.py | import functions_framework
import os
from google.cloud.sql.connector import Connector
import sqlalchemy
from sentence_transformers import SentenceTransformer
from anthropic import Anthropic
from google.cloud import storage
import time
try:
model = SentenceTransformer("/tmp/all-MiniLM-L6-v2")
except:
model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
model.save("/tmp/all-MiniLM-L6-v2")
DB_INSTANCE = os.environ.get("INSTANCE")
DB_USER = os.environ.get("USER")
DB_PASS = os.environ.get("PASS")
DB_NAME = os.environ.get("NAME")
CLAUDE_KEY = os.environ.get("CLAUDE_KEY")
K_NEAREST = 10
def getconn():
connector = Connector()
conn = connector.connect(
DB_INSTANCE, "pg8000", user=DB_USER, password=DB_PASS, db=DB_NAME
)
return conn
def getpool():
return sqlalchemy.create_engine(
"postgresql+pg8000://",
creator=getconn,
)
def embed_query(query):
return model.encode(query)
def fetch_k_nearest(embedded_query, k):
select = sqlalchemy.text(
"""
SELECT file_name, context FROM legislation_vector_db_003
ORDER BY embedding <-> (:embedding)
LIMIT (:limit)
"""
)
pool = getpool()
with pool.connect() as db_conn:
result = db_conn.execute(
select, parameters={"embedding": str(list(embedded_query)), "limit": k}
).fetchall()
return [str({str(row[0]).replace(".txt", ""): str(row[1])}) for row in result]
def prompt(query, context):
input = f"""
\n\nHuman:
QUESTION:
Here is a question about United States (US) legislation: {query}.
DIRECTIONS:
Soon I will provide some context and data for you to answer this question.
ONLY answer the question if you feel you can answer it well, and if it relates to the
given context.
This context is a dictionary with legislative bill names as keys and extracted excerpts as
values. These bills have been proposed in either the US House or Senate.
Senate bills have an S in their file name while House bills have HR in their file name.
When answering, please be sure to reference this data in your answer. Please format your
answer as follows:
{{**Your general answer based on the given context and your existing knowledge. This should
be about 3 sentences long and MUST provide a specific insight or thorough summary of the
provided context. PLEASE BE DETAILED.**}}
{{**Bill (insert relevant bill name here)**:\n
"A direct quote from the bill"\n
- bullet 1 that explains how this bill is related to the query\n
- bullet 2...}}.\n
EXAMPLE OUTPUT (DO NOT USE THIS AS ACCURATE DATA):
**Congress prioritizes energy storage technology over climate mitigation. Several bills
provide investment in emerging energy technologies. Most notably, HR-650 proposes to
create a new division in the Department of Energy for research and development.**
**Bill HR-650: Energy Storage Act**\n
"establishment of a division of the DOE for the purposes of energy infrastructure"\n
- Establishes a new division in the Department of Energy.\n
- Places significant emphasis on energy infrastructure and storage technology.\n
INSTRUCTIONS CONTINUED:
Include as many bills as you feel are relevant. Please remain consistent with the above format
to ensure the answer is readable with bullet points, which can be denoted by a dash (-).
Please bold your general answer, as well as the titles of each specific bill to make them
stand out. This can be done by surrounding the sentence with two asterisks, as shown in the
example formatting above. IMPORTANT: do not include brackets in your answer;
the brackets are simply to indicate the format for you to place your response!
When in doubt about format, please format it in a human readable way and using Markdown.
Always include the bill name.
Please be detailed regarding how this legislation relates
to the question, citing parts of the context at least once for each bill you mention.
When citing, if you notice a word that has been cut-off / is incomplete, please start the
quote after that word, or end before that word if it is the last word. Essentially,
you do not need to cite the entire context for that bill, you can chose what part makes sense to cite.
Feel free to use any additional knowledge if it will improve the answer but the
given context is the most important. Please only provide the answer, do not directly
acknowledge my formatting requests in your answer. For instance, do not say that you
have summarized documents provided or received context of any sort.\n
CONTEXT: {context}\n\nAssistant:
"""
anthropic = Anthropic(api_key=CLAUDE_KEY)
return str(
anthropic.completions.create(
model="claude-instant-1.2", max_tokens_to_sample=10000, prompt=input
).completion
)
@functions_framework.http
def answer_query(request):
"""HTTP Cloud Function.
Args:
request (flask.Request): The request object.
<https://flask.palletsprojects.com/en/1.1.x/api/#incoming-request-data>
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
<https://flask.palletsprojects.com/en/1.1.x/api/#flask.make_response>.
"""
start = time.time()
request_json = request.get_json(silent=True)
QUERY_PARAM = "query"
if request_json and QUERY_PARAM in request_json:
query = request_json[QUERY_PARAM]
else:
return "Status 400"
print("entry")
embedded_query = embed_query(query)
print("embed")
context = fetch_k_nearest(embedded_query, K_NEAREST)
print("got context")
print(time.time() - start)
answer = {"answer": prompt(query, str(context)), "context": context}
print(time.time() - start)
return answer
| [
"INPUT"
] |
2024-01-10 | rajanwastaken/pizza-agent | navbot.py | import math
from playwright.sync_api import sync_playwright
import time
from sys import argv, exit, platform
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
import os
from selenium import webdriver
import json
import random
"""
driver = webdriver.Chrome()
driver.get("https://www.skipthedishes.com")
time.sleep(30)
cookies = driver.get_cookies()
with open("cookies.json", "w") as file:
json.dump(cookies, file)
driver.quit()
"""
anthropic = Anthropic(api_key=os.environ["API_KEY"])
quiet = False
if len(argv) >= 2:
if argv[1] == '-q' or argv[1] == '--quiet':
quiet = True
print(
"Running in quiet mode (HTML and other content hidden); \n"
+ "exercise caution when running suggested commands."
)
prompt_template = f"""{HUMAN_PROMPT}
You are an agent controlling a browser. You are given:
(1) an objective that you are trying to achieve
(2) the URL of your current web page
(3) a simplified text description of what's visible in the browser window (more on that below)
You can issue these commands:
SCROLL UP - scroll up one page
SCROLL DOWN - scroll down one page
CLICK X - click on a given element. You can only click on links, buttons, and inputs!
TYPE X "TEXT" - type the specified text into the input with id X
TYPESUBMIT X "TEXT" - same as TYPE above, except then it presses ENTER to submit the form
ADD TO ORDER - adds the selected pizza to the order
The format of the browser content is highly simplified; all formatting elements are stripped.
Interactive elements such as links, inputs, buttons are represented like this:
<link id=1>text</link>
<button id=2>text</button>
<input id=3>text</input>
Images are rendered as their alt text like this:
<img id=4 alt=""/>
Based on your given objective, issue whatever command you believe will get you closest to achieving your goal.
Use UberEats to place the order. Start by searching from the main navigation menu where to order the pizza from the location specified by the user. Do not order from any other location.
The objective is to order a medium pepperoni pizza with no toppings from the pizza restaurant page.
When deciding which pizza to order on the pizza restaurant page, if there is a list of pizzas MAKE SURE TO CLICK ON THE PIZZA SIZE TEXT ELEMENT. Do not click on Quick View elements.
Once you have selected a pizza and you see text containing the words "crust", "Cal.", "sauce", "cheese", "TOPPINGS", "Olive", "peppers", and/or "dough", CLICK ON EACH OPTION and then scroll down until yrespond with ADDTOORDER.
Reply with your next command to the browser AND ONLY YOUR COMMAND DO NOT TYPE ANYTHING ELSE BUT THE COMMAND. DO NOT EXPLAIN YOUR THOUGHTS AND ONLY EXECUTE THE COMMAND.
Here are some examples:
EXAMPLE 1:
==================================================
CURRENT BROWSER CONTENT:
------------------
<link id=1>About</link>
<link id=2>Store</link>
<link id=3>Gmail</link>
<link id=4>Images</link>
<link id=5>(Google apps)</link>
<link id=6>Sign in</link>
<img id=7 alt="(Google)"/>
<input id=8 alt="Search"></input>
<button id=9>(Search by voice)</button>
<button id=10>(Google Search)</button>
<button id=11>(I'm Feeling Lucky)</button>
<link id=12>Advertising</link>
<link id=13>Business</link>
<link id=14>How Search works</link>
<link id=15>Carbon neutral since 2007</link>
<link id=16>Privacy</link>
<link id=17>Terms</link>
<text id=18>Settings</text>
------------------
OBJECTIVE: Find a 2 bedroom house for sale in Anchorage AK for under $750k
CURRENT URL: https://www.google.com/
YOUR COMMAND:
TYPESUBMIT 8 "anchorage redfin"
==================================================
EXAMPLE 2:
==================================================
CURRENT BROWSER CONTENT:
------------------
<link id=1>About</link>
<link id=2>Store</link>
<link id=3>Gmail</link>
<link id=4>Images</link>
<link id=5>(Google apps)</link>
<link id=6>Sign in</link>
<img id=7 alt="(Google)"/>
<input id=8 alt="Search"></input>
<button id=9>(Search by voice)</button>
<button id=10>(Google Search)</button>
<button id=11>(I'm Feeling Lucky)</button>
<link id=12>Advertising</link>
<link id=13>Business</link>
<link id=14>How Search works</link>
<link id=15>Carbon neutral since 2007</link>
<link id=16>Privacy</link>
<link id=17>Terms</link>
<text id=18>Settings</text>
------------------
OBJECTIVE: Make a reservation for 4 at Dorsia at 8pm
CURRENT URL: https://www.google.com/
YOUR COMMAND:
TYPESUBMIT 8 "dorsia nyc opentable"
==================================================
EXAMPLE 3:
==================================================
CURRENT BROWSER CONTENT:
------------------
<button id=1>For Businesses</button>
<button id=2>Mobile</button>
<button id=3>Help</button>
<button id=4 alt="Language Picker">EN</button>
<link id=5>OpenTable logo</link>
<button id=6 alt ="search">Search</button>
<text id=7>Find your table for any occasion</text>
<button id=8>(Date selector)</button>
<text id=9>Sep 28, 2022</text>
<text id=10>7:00 PM</text>
<text id=11>2 people</text>
<input id=12 alt="Location, Restaurant, or Cuisine"></input>
<button id=13>Let’s go</button>
<text id=14>It looks like you're in Peninsula. Not correct?</text>
<button id=15>Get current location</button>
<button id=16>Next</button>
------------------
OBJECTIVE: Make a reservation for 4 for dinner at Dorsia in New York City at 8pm
CURRENT URL: https://www.opentable.com/
YOUR COMMAND:
TYPESUBMIT 12 "dorsia new york city"
==================================================
The current browser content, objective, and current URL follow. Reply with your next command to the browser AND ONLY YOUR COMMAND DO NOT TYPE ANYTHING ELSE BUT THE COMMAND.
Reply with your next command to the browser AND ONLY YOUR COMMAND DO NOT TYPE ANYTHING ELSE BUT THE COMMAND. DO NOT EXPLAIN YOUR THOUGHTS AND ONLY EXECUTE THE COMMAND.
CURRENT BROWSER CONTENT:
------------------
$browser_content
------------------
Reply with your next command to the browser AND ONLY YOUR COMMAND DO NOT TYPE ANYTHING ELSE BUT THE COMMAND. DO NOT EXPLAIN YOUR THOUGHTS AND ONLY EXECUTE THE COMMAND.
OBJECTIVE: $objective
CURRENT URL: $url
PREVIOUS COMMAND: $previous_command
YOUR COMMAND:
{AI_PROMPT}
"""
black_listed_elements = set(["html", "head", "title", "meta", "iframe", "body", "script", "style", "path", "svg", "br", "::marker",])
class Crawler:
def __init__(self):
self.browser = (
sync_playwright()
.start()
.chromium.launch(
headless=False,
args=["--no-incognito"],
)
)
self.page = self.browser.new_page()
self.context = self.browser.new_context()
self.page.set_viewport_size({"width": 1280, "height": 1080})
def sign_in():
time.sleep(2)
_crawler.page.click('//*[@id="wrapper"]/header/div/div/div/div/div/div/div[6]/div[1]/button')
time.sleep(3)
_crawler.page.click('//*[@id="wrapper"]/header/div/div/div/div/div/div/div[6]/div[2]/div/div[3]/div[10]/a')
time.sleep(3)
_crawler.page.click('//*[@id="continue_as_guest_button"]')
time.sleep(3)
_crawler.page.click('//*[@id="main-content"]/div/div[3]/div[2]/div[2]/div/div[1]/div/a')
time.sleep(3)
_crawler.page.get_by_placeholder("First Name").fill(f"First Name")
_crawler.page.get_by_placeholder("Last Name").fill(f"Last Name")
_crawler.page.get_by_placeholder("xxx xxx-xxxx").fill(f"1234567890")
_crawler.page.get_by_placeholder("[email protected]").fill("[email protected]")
time.sleep(3)
_crawler.page.click('//*[@id="root"]/div[2]/div/div/div[2]/div/div/div/div[2]/div/div[6]/button')
time.sleep(3)
_crawler.page.mouse.click(476, 132)
time.sleep(3)
_crawler.page.mouse.click(640, 430)
# y: 430, 535, 535, 716
# x: 640, 565, 715, 640
_crawler.rawtype("1234123412341234")
time.sleep(1)
_crawler.page.mouse.click(565, 535)
_crawler.rawtype("1231")
time.sleep(1)
_crawler.page.mouse.click(715, 535)
_crawler.rawtype("123")
time.sleep(1)
_crawler.page.mouse.click(640, 716)
_crawler.rawtype("L1L1L1")
def load_cookies(self):
with open('cookies.json', 'r') as f:
cookies = json.load(f)
self.context.add_cookies(cookies)
def go_to_page(self, url):
self.page.goto(url=url if "://" in url else "http://" + url)
self.client = self.page.context.new_cdp_session(self.page)
self.page_element_buffer = {}
def scroll(self, direction):
if direction == "up":
self.page.evaluate(
"(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop - window.innerHeight;"
)
elif direction == "down":
self.page.evaluate(
"(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop + window.innerHeight;"
)
def click(self, id):
# Inject javascript into the page which removes the target= attribute from all links
js = """
links = document.getElementsByTagName("a");
for (var i = 0; i < links.length; i++) {
links[i].removeAttribute("target");
}
"""
self.page.evaluate(js)
element = self.page_element_buffer.get(int(id))
if element:
x = element.get("center_x")
y = element.get("center_y")
self.page.mouse.click(x, y)
else:
print("Could not find element")
def add_to_order(self):
self.page.click('//*[@id="main-content"]/div/div[1]/div/div[2]/div[4]/div/button')
def type(self, id, text):
self.click(id)
self.page.keyboard.type(text)
def enter(self):
self.page.keyboard.press("Enter")
def crawl(self):
page = self.page
page_element_buffer = self.page_element_buffer
start = time.time()
page_state_as_text = []
device_pixel_ratio = page.evaluate("window.devicePixelRatio")
if platform == "darwin" and device_pixel_ratio == 1: # lies
device_pixel_ratio = 2
win_scroll_x = page.evaluate("window.scrollX")
win_scroll_y = page.evaluate("window.scrollY")
win_upper_bound = page.evaluate("window.pageYOffset")
win_left_bound = page.evaluate("window.pageXOffset")
win_width = page.evaluate("window.screen.width")
win_height = page.evaluate("window.screen.height")
win_right_bound = win_left_bound + win_width
win_lower_bound = win_upper_bound + win_height
document_offset_height = page.evaluate("document.body.offsetHeight")
document_scroll_height = page.evaluate("document.body.scrollHeight")
# percentage_progress_start = (win_upper_bound / document_scroll_height) * 100
# percentage_progress_end = (
# (win_height + win_upper_bound) / document_scroll_height
# ) * 100
percentage_progress_start = 1
percentage_progress_end = 2
page_state_as_text.append(
{
"x": 0,
"y": 0,
"text": "[scrollbar {:0.2f}-{:0.2f}%]".format(
round(percentage_progress_start, 2), round(percentage_progress_end)
),
}
)
tree = self.client.send(
"DOMSnapshot.captureSnapshot",
{"computedStyles": [], "includeDOMRects": True, "includePaintOrder": True},
)
strings = tree["strings"]
document = tree["documents"][0]
nodes = document["nodes"]
backend_node_id = nodes["backendNodeId"]
attributes = nodes["attributes"]
node_value = nodes["nodeValue"]
parent = nodes["parentIndex"]
node_types = nodes["nodeType"]
node_names = nodes["nodeName"]
is_clickable = set(nodes["isClickable"]["index"])
text_value = nodes["textValue"]
text_value_index = text_value["index"]
text_value_values = text_value["value"]
input_value = nodes["inputValue"]
input_value_index = input_value["index"]
input_value_values = input_value["value"]
input_checked = nodes["inputChecked"]
layout = document["layout"]
layout_node_index = layout["nodeIndex"]
bounds = layout["bounds"]
cursor = 0
html_elements_text = []
child_nodes = {}
elements_in_view_port = []
anchor_ancestry = {"-1": (False, None)}
button_ancestry = {"-1": (False, None)}
def convert_name(node_name, has_click_handler):
if node_name == "a":
return "link"
if node_name == "input":
return "input"
if node_name == "img":
return "img"
if (
node_name == "button" or has_click_handler
): # found pages that needed this quirk
return "button"
else:
return "text"
def find_attributes(attributes, keys):
values = {}
for [key_index, value_index] in zip(*(iter(attributes),) * 2):
if value_index < 0:
continue
key = strings[key_index]
value = strings[value_index]
if key in keys:
values[key] = value
keys.remove(key)
if not keys:
return values
return values
def add_to_hash_tree(hash_tree, tag, node_id, node_name, parent_id):
parent_id_str = str(parent_id)
if not parent_id_str in hash_tree:
parent_name = strings[node_names[parent_id]].lower()
grand_parent_id = parent[parent_id]
add_to_hash_tree(
hash_tree, tag, parent_id, parent_name, grand_parent_id
)
is_parent_desc_anchor, anchor_id = hash_tree[parent_id_str]
# even if the anchor is nested in another anchor, we set the "root" for all descendants to be ::Self
if node_name == tag:
value = (True, node_id)
elif (
is_parent_desc_anchor
): # reuse the parent's anchor_id (which could be much higher in the tree)
value = (True, anchor_id)
else:
value = (
False,
None,
) # not a descendant of an anchor, most likely it will become text, an interactive element or discarded
hash_tree[str(node_id)] = value
return value
for index, node_name_index in enumerate(node_names):
node_parent = parent[index]
node_name = strings[node_name_index].lower()
is_ancestor_of_anchor, anchor_id = add_to_hash_tree(
anchor_ancestry, "a", index, node_name, node_parent
)
is_ancestor_of_button, button_id = add_to_hash_tree(
button_ancestry, "button", index, node_name, node_parent
)
try:
cursor = layout_node_index.index(
index
) # todo replace this with proper cursoring, ignoring the fact this is O(n^2) for the moment
except:
continue
if node_name in black_listed_elements:
continue
[x, y, width, height] = bounds[cursor]
x /= device_pixel_ratio
y /= device_pixel_ratio
width /= device_pixel_ratio
height /= device_pixel_ratio
elem_left_bound = x
elem_top_bound = y
elem_right_bound = x + width
elem_lower_bound = y + height
partially_is_in_viewport = (
elem_left_bound < win_right_bound
and elem_right_bound >= win_left_bound
and elem_top_bound < win_lower_bound
and elem_lower_bound >= win_upper_bound
)
if not partially_is_in_viewport:
continue
meta_data = []
# inefficient to grab the same set of keys for kinds of objects but its fine for now
element_attributes = find_attributes(
attributes[index], ["type", "placeholder", "aria-label", "title", "alt"]
)
ancestor_exception = is_ancestor_of_anchor or is_ancestor_of_button
ancestor_node_key = (
None
if not ancestor_exception
else str(anchor_id)
if is_ancestor_of_anchor
else str(button_id)
)
ancestor_node = (
None
if not ancestor_exception
else child_nodes.setdefault(str(ancestor_node_key), [])
)
if node_name == "#text" and ancestor_exception:
text = strings[node_value[index]]
if text == "|" or text == "•":
continue
ancestor_node.append({
"type": "type", "value": text
})
else:
if (
node_name == "input" and element_attributes.get("type") == "submit"
) or node_name == "button":
node_name = "button"
element_attributes.pop(
"type", None
) # prevent [button ... (button)..]
for key in element_attributes:
if ancestor_exception:
ancestor_node.append({
"type": "attribute",
"key": key,
"value": element_attributes[key]
})
else:
meta_data.append(element_attributes[key])
element_node_value = None
if node_value[index] >= 0:
element_node_value = strings[node_value[index]]
if element_node_value == "|": #commonly used as a seperator, does not add much context - lets save ourselves some token space
continue
elif (
node_name == "input"
and index in input_value_index
and element_node_value is None
):
node_input_text_index = input_value_index.index(index)
text_index = input_value_values[node_input_text_index]
if node_input_text_index >= 0 and text_index >= 0:
element_node_value = strings[text_index]
# remove redudant elements
if ancestor_exception and (node_name != "a" and node_name != "button"):
continue
elements_in_view_port.append(
{
"node_index": str(index),
"backend_node_id": backend_node_id[index],
"node_name": node_name,
"node_value": element_node_value,
"node_meta": meta_data,
"is_clickable": index in is_clickable,
"origin_x": int(x),
"origin_y": int(y),
"center_x": int(x + (width / 2)),
"center_y": int(y + (height / 2)),
}
)
# lets filter further to remove anything that does not hold any text nor has click handlers + merge text from leaf#text nodes with the parent
elements_of_interest= []
id_counter = 0
for element in elements_in_view_port:
node_index = element.get("node_index")
node_name = element.get("node_name")
node_value = element.get("node_value")
is_clickable = element.get("is_clickable")
origin_x = element.get("origin_x")
origin_y = element.get("origin_y")
center_x = element.get("center_x")
center_y = element.get("center_y")
meta_data = element.get("node_meta")
inner_text = f"{node_value} " if node_value else ""
meta = ""
if node_index in child_nodes:
for child in child_nodes.get(node_index):
entry_type = child.get('type')
entry_value= child.get('value')
if entry_type == "attribute":
entry_key = child.get('key')
meta_data.append(f'{entry_key}="{entry_value}"')
else:
inner_text += f"{entry_value} "
if meta_data:
meta_string = " ".join(meta_data)
meta = f" {meta_string}"
if inner_text != "":
inner_text = f"{inner_text.strip()}"
converted_node_name = convert_name(node_name, is_clickable)
# not very elegant, more like a placeholder
if (
(converted_node_name != "button" or meta == "")
and converted_node_name != "link"
and converted_node_name != "input"
and converted_node_name != "img"
and converted_node_name != "textarea"
) and inner_text.strip() == "":
continue
page_element_buffer[id_counter] = element
if inner_text != "":
elements_of_interest.append(
f"""<{converted_node_name} id={id_counter}{meta}>{inner_text}</{converted_node_name}>"""
)
else:
elements_of_interest.append(
f"""<{converted_node_name} id={id_counter}{meta}/>"""
)
id_counter += 1
print("Parsing time: {:0.2f} seconds".format(time.time() - start))
return elements_of_interest
if (
__name__ == "__main__"
):
_crawler = Crawler()
_crawler.load_cookies()
print(anthropic)
def print_help():
print(
"(g) to visit url\n(u) scroll up\n(d) scroll down\n(c) to click\n(t) to type\n" +
"(h) to view commands again\n(r/enter) to run suggested command\n(o) change objective"
)
def get_gpt_command(objective, url, previous_command, browser_content):
prompt = prompt_template
prompt = prompt.replace("$objective", objective)
prompt = prompt.replace("$url", url[:100])
prompt = prompt.replace("$previous_command", previous_command)
prompt = prompt.replace("$browser_content", browser_content[:4500])
response = anthropic.completions.create(model="claude-2", max_tokens_to_sample=10000, prompt=prompt)
print(response)
#response = openai.Completion.create(model="text-davinci-002", prompt=prompt, temperature=0.5, best_of=10, n=3, max_tokens=50)
return response.completion
def run_cmd(cmd):
cmd = cmd.split("\n")[0]
if cmd.startswith("SCROLL UP"):
_crawler.scroll("up")
elif cmd.startswith("SCROLL DOWN"):
_crawler.scroll("down")
_crawler.scroll("down")
_crawler.scroll("down")
elif cmd.startswith("CLICK"):
commasplit = cmd.split(",")
id = commasplit[0].split(" ")[1]
_crawler.click(id)
elif cmd.startswith("ADD"):
_crawler.page.click('//*[@id="main-content"]/div/div[1]/div/div[2]/div[4]/div/button')
Crawler.sign_in()
elif cmd.startswith("TYPE"):
print(cmd, "This is the cmd. It runs in Type")
spacesplit = cmd.split(" ")
id = spacesplit[1]
text = spacesplit[2:]
text = " ".join(text)
# Strip leading and trailing double quotes
text = text[1:-1]
if cmd.startswith("TYPESUBMIT"):
text += '\n'
print(f"This is the crawler type {text}")
_crawler.click(4)
_crawler.type(id, text)
time.sleep(2)
objective = "Make a reservation for 2 at 7pm at bistro vida in menlo park"
print("\nWelcome to natbot! What is your objective?")
i = input()
if len(i) > 0:
objective = i
gpt_cmd = ""
prev_cmd = ""
_crawler.go_to_page(f"https://www.ubereats.com/ca/search?diningMode=DELIVERY&pl=JTdCJTIyYWRkcmVzcyUyMiUzQSUyMjE1MSUyMENoYXJsZXMlMjBTdCUyMFclMjBzdWl0ZSUyMDEwMCUyMiUyQyUyMnJlZmVyZW5jZSUyMiUzQSUyMkVqa3hOVEVnUTJoaGNteGxjeUJUZENCWElITjFhWFJsSURFd01Dd2dTMmwwWTJobGJtVnlMQ0JQVGlCT01rY2dNVWcyTENCRFlXNWhaR0VpSlJvakNoWUtGQW9TQ2ZHaDFHWDE5Q3VJRWZXdXQ3VXB0Tkp2RWdsemRXbDBaU0F4TURBJTIyJTJDJTIycmVmZXJlbmNlVHlwZSUyMiUzQSUyMmdvb2dsZV9wbGFjZXMlMjIlMkMlMjJsYXRpdHVkZSUyMiUzQTQzLjQ1MTIwNjklMkMlMjJsb25naXR1ZGUlMjIlM0EtODAuNDk4MTk4NiU3RA%3D%3D&q={i}&sc=SEARCH_BAR&vertical=ALL")
#_crawler.click(3)
print("Clicked")
try:
while True:
browser_content = "\n".join(_crawler.crawl())
prev_cmd = gpt_cmd
gpt_cmd = get_gpt_command(objective, _crawler.page.url, prev_cmd, browser_content)
gpt_cmd = gpt_cmd.strip()
if not quiet:
print("URL: " + _crawler.page.url)
print("Objective: " + objective)
print("----------------\n" + browser_content + "\n----------------\n")
if len(gpt_cmd) > 0:
print("Suggested command: " + gpt_cmd)
command = input()
if command == "r" or command == "":
run_cmd(gpt_cmd)
elif command == "g":
url = input("URL:")
_crawler.go_to_page(url)
elif command == "u":
_crawler.scroll("up")
time.sleep(1)
elif command == "d":
_crawler.scroll("down")
time.sleep(1)
elif command == "c":
id = input("id:")
_crawler.click(id)
time.sleep(1)
elif command == "t":
id = input("id:")
text = input("text:")
_crawler.type(id, text)
time.sleep(1)
elif command == "o":
objective = input("Objective:")
else:
print_help()
except KeyboardInterrupt:
print("\n[!] Ctrl+C detected, exiting gracefully.")
exit(0) | [
"PLACEHOLDER\nYou are an agent controlling a browser. You are given:\n\n\t(1) an objective that you are trying to achieve\n\t(2) the URL of your current web page\n\t(3) a simplified text description of what's visible in the browser window (more on that below)\n\nYou can issue these commands:\n\tSCROLL UP - scroll up one page\n\tSCROLL DOWN - scroll down one page\n\tCLICK X - click on a given element. You can only click on links, buttons, and inputs!\n\tTYPE X \"TEXT\" - type the specified text into the input with id X\n\tTYPESUBMIT X \"TEXT\" - same as TYPE above, except then it presses ENTER to submit the form\n\tADD TO ORDER - adds the selected pizza to the order\n\nThe format of the browser content is highly simplified; all formatting elements are stripped.\nInteractive elements such as links, inputs, buttons are represented like this:\n\n\t\t<link id=1>text</link>\n\t\t<button id=2>text</button>\n\t\t<input id=3>text</input>\n\nImages are rendered as their alt text like this:\n\n\t\t<img id=4 alt=\"\"/>\n\nBased on your given objective, issue whatever command you believe will get you closest to achieving your goal.\nUse UberEats to place the order. Start by searching from the main navigation menu where to order the pizza from the location specified by the user. Do not order from any other location.\n\nThe objective is to order a medium pepperoni pizza with no toppings from the pizza restaurant page.\n\nWhen deciding which pizza to order on the pizza restaurant page, if there is a list of pizzas MAKE SURE TO CLICK ON THE PIZZA SIZE TEXT ELEMENT. Do not click on Quick View elements.\n\nOnce you have selected a pizza and you see text containing the words \"crust\", \"Cal.\", \"sauce\", \"cheese\", \"TOPPINGS\", \"Olive\", \"peppers\", and/or \"dough\", CLICK ON EACH OPTION and then scroll down until yrespond with ADDTOORDER.\n\nReply with your next command to the browser AND ONLY YOUR COMMAND DO NOT TYPE ANYTHING ELSE BUT THE COMMAND. DO NOT EXPLAIN YOUR THOUGHTS AND ONLY EXECUTE THE COMMAND.\n\n\nHere are some examples:\n\nEXAMPLE 1:\n==================================================\nCURRENT BROWSER CONTENT:\n------------------\n<link id=1>About</link>\n<link id=2>Store</link>\n<link id=3>Gmail</link>\n<link id=4>Images</link>\n<link id=5>(Google apps)</link>\n<link id=6>Sign in</link>\n<img id=7 alt=\"(Google)\"/>\n<input id=8 alt=\"Search\"></input>\n<button id=9>(Search by voice)</button>\n<button id=10>(Google Search)</button>\n<button id=11>(I'm Feeling Lucky)</button>\n<link id=12>Advertising</link>\n<link id=13>Business</link>\n<link id=14>How Search works</link>\n<link id=15>Carbon neutral since 2007</link>\n<link id=16>Privacy</link>\n<link id=17>Terms</link>\n<text id=18>Settings</text>\n------------------\nOBJECTIVE: Find a 2 bedroom house for sale in Anchorage AK for under $750k\nCURRENT URL: https://www.google.com/\nYOUR COMMAND: \nTYPESUBMIT 8 \"anchorage redfin\"\n==================================================\n\nEXAMPLE 2:\n==================================================\nCURRENT BROWSER CONTENT:\n------------------\n<link id=1>About</link>\n<link id=2>Store</link>\n<link id=3>Gmail</link>\n<link id=4>Images</link>\n<link id=5>(Google apps)</link>\n<link id=6>Sign in</link>\n<img id=7 alt=\"(Google)\"/>\n<input id=8 alt=\"Search\"></input>\n<button id=9>(Search by voice)</button>\n<button id=10>(Google Search)</button>\n<button id=11>(I'm Feeling Lucky)</button>\n<link id=12>Advertising</link>\n<link id=13>Business</link>\n<link id=14>How Search works</link>\n<link id=15>Carbon neutral since 2007</link>\n<link id=16>Privacy</link>\n<link id=17>Terms</link>\n<text id=18>Settings</text>\n------------------\nOBJECTIVE: Make a reservation for 4 at Dorsia at 8pm\nCURRENT URL: https://www.google.com/\nYOUR COMMAND: \nTYPESUBMIT 8 \"dorsia nyc opentable\"\n==================================================\n\nEXAMPLE 3:\n==================================================\nCURRENT BROWSER CONTENT:\n------------------\n<button id=1>For Businesses</button>\n<button id=2>Mobile</button>\n<button id=3>Help</button>\n<button id=4 alt=\"Language Picker\">EN</button>\n<link id=5>OpenTable logo</link>\n<button id=6 alt =\"search\">Search</button>\n<text id=7>Find your table for any occasion</text>\n<button id=8>(Date selector)</button>\n<text id=9>Sep 28, 2022</text>\n<text id=10>7:00 PM</text>\n<text id=11>2 people</text>\n<input id=12 alt=\"Location, Restaurant, or Cuisine\"></input> \n<button id=13>Let’s go</button>\n<text id=14>It looks like you're in Peninsula. Not correct?</text> \n<button id=15>Get current location</button>\n<button id=16>Next</button>\n------------------\nOBJECTIVE: Make a reservation for 4 for dinner at Dorsia in New York City at 8pm\nCURRENT URL: https://www.opentable.com/\nYOUR COMMAND: \nTYPESUBMIT 12 \"dorsia new york city\"\n==================================================\n\nThe current browser content, objective, and current URL follow. Reply with your next command to the browser AND ONLY YOUR COMMAND DO NOT TYPE ANYTHING ELSE BUT THE COMMAND.\n\nReply with your next command to the browser AND ONLY YOUR COMMAND DO NOT TYPE ANYTHING ELSE BUT THE COMMAND. DO NOT EXPLAIN YOUR THOUGHTS AND ONLY EXECUTE THE COMMAND.\n\nCURRENT BROWSER CONTENT:\n------------------\n$browser_content\n------------------\nReply with your next command to the browser AND ONLY YOUR COMMAND DO NOT TYPE ANYTHING ELSE BUT THE COMMAND. DO NOT EXPLAIN YOUR THOUGHTS AND ONLY EXECUTE THE COMMAND.\n\nOBJECTIVE: $objective\nCURRENT URL: $url\nPREVIOUS COMMAND: $previous_command\nYOUR COMMAND:\nPLACEHOLDER\n",
"$browser_content",
"$previous_command",
"$objective"
] |
2024-01-10 | Cognition-Labs/BioConceptXplorer | v1~bioconceptvec-explorer~backend~main2.py | import random
import streamlit as st
import pickle
import dotenv
import numpy as np
from tqdm import tqdm
import openai
import os
import faiss
import pandas as pd
import streamlit_pandas as sp
from sentence_transformers import SentenceTransformer
import json
from sklearn.metrics.pairwise import cosine_similarity
# Load concept_descriptions function
# load concept embedding for all API calls
st.write("Cold start - loading concept embeddings...")
@st.cache_data
def load_concept_descriptions():
print("loading concept descs")
concept_descriptions = pd.read_pickle(
"/Users/danielgeorge/Documents/work/ml/bioconceptvec-explorer/bioconceptvec-explorer/mappings/concept_descriptions.pkl"
)
return concept_descriptions
# Load sentence_embeddings function
@st.cache_data
def load_sentence_embeddings():
sentence_embeddings = np.load("./description_embeddings.npy")
return sentence_embeddings
# Load sentences function
@st.cache_data
def load_sentences():
with open("./sentences.txt") as f:
sentences = f.readlines()
return sentences
@st.cache_data
def load_concept_values():
# load concept embedding for all API calls
print("Cold start - loading concept embeddings...")
with open("./embeddings/concept_glove.json") as json_file:
concept_vectors = json.load(json_file)
concept_keys = list(concept_vectors.keys())
return np.array(list(concept_vectors.values()), dtype=np.float32)
@st.cache_data
def load_rev_concept_description():
print("loading concept descriptions...")
with open("./mappings/concept_descriptions.pkl", "rb") as f:
concept_descriptions = pickle.load(f)
rev_concept_descriptions = {}
for key, value in tqdm(concept_descriptions.items()):
if type(value) == list and len(value) == 0:
continue
elif type(value) == list and len(value) > 0:
rev_concept_descriptions[value[0]] = key
else:
rev_concept_descriptions[value] = key
with open("./embeddings/concept_glove.json") as json_file:
print("loading concept glove.json")
concept_vectors = json.load(json_file)
concept_keys = list(concept_vectors.keys())
concept_values = np.array(list(concept_vectors.values()), dtype=np.float32)
# Load the necessary data
concept_descriptions = load_concept_descriptions()
sentence_embeddings = load_sentence_embeddings()
sentences = load_sentences()
concept_values = load_concept_values()
rev_concept_descriptions = load_concept_descriptions()
# Load the model
model = SentenceTransformer("bert-base-nli-mean-tokens")
# Initialize the index
d = sentence_embeddings.shape[1]
index = faiss.IndexFlatL2(d)
index.add(sentence_embeddings)
st.write("Done!")
def load_openai_key(path):
dotenv.load_dotenv(path)
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_prompt(query: str):
return f"""
What does this mean analogically? I found this by doing equations with vector embeddings.
This is similar to how King - Man + Woman = Queen for word2vec. I'm trying to reason why this makes sense.
{query}
Really try to think outside the box to find why this could be reasonable. Use this as a generative way to help think of biological hypotheses.
"""
def gpt(prompt):
load_openai_key("./.env")
messageList = [
{
"role": "system",
"content": "You are a helpful chatbot that helps people understand biology.",
},
{
"role": "user",
"content": prompt,
},
]
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messageList
)
feature_string = completion.choices[0].message.content
return feature_string
print("Done!")
def compute_expression(
expression: list,
k: int = 10,
useCosineSimilarity: bool = True,
) -> dict:
# print(f"Computing expression: {expression}")
if expression[0] != "-" and expression[0] != "+":
expression = ["+", *expression]
# split expression into groups of 2 (sign, concept)
matches = [expression[i : i + 2] for i in range(0, len(expression), 2)]
# compute x vector
result = np.zeros(np.array(concept_values[0]).shape, dtype=np.float32)
for match in matches:
sign, variable = match
# print(f"Variable: {variable} | Sign: {sign}")
if sign == "-":
result -= concept_vectors[variable]
elif sign == "+":
result += concept_vectors[variable]
else:
raise ValueError(f"Invalid operator: {sign}")
similarities = None
if useCosineSimilarity:
# compute similarity between x vector and all other vectors
similarities = cosine_similarity(concept_values, [result]).flatten()
else:
# compute distance between x vector and all other vectors
similarities = np.linalg.norm(concept_values - result, axis=1).flatten()
# get index of top k similarities
top_k_indices = np.argpartition(similarities, -k)[-k:]
# get top k most similar concepts as a dict
top_concepts = {concept_keys[i]: float(similarities[i]) for i in top_k_indices}
top_concepts = dict(
sorted(top_concepts.items(), key=lambda item: item[1], reverse=True)
)
return top_concepts
def autosuggest(query: str, limit: int) -> list:
# filter concept vectors based on whether query is a substring
query = query.lower()
descs = list(concept_descriptions.values())
for i in range(len(descs)):
if type(descs[i]) == list and len(descs[i]) > 0:
descs[i] = descs[i][0]
elif type(descs[i]) == list and len(descs[i]) == 0:
descs[i] = ""
descs = [i for i in descs if i is not None and i != ""]
lower_concept_descs = map(lambda x: x.lower(), descs)
result = [concept for concept in lower_concept_descs if query in concept]
return result[:limit]
def get_similar_concepts(concept_query: str, k: int) -> list:
# convert from concept description to concept id
if ";" in concept_query:
concept_query = concept_query.split(";")[0]
concept_query = rev_concept_descriptions[concept_query]
concept = concept_vectors[concept_query]
similarities = cosine_similarity(concept_values, [concept]).flatten()
top_concepts = {}
for concept, similarity in zip(concept_vectors.keys(), similarities):
top_concepts[concept] = similarity
top_concepts = dict(
sorted(top_concepts.items(), key=lambda item: item[1], reverse=True)[:k]
)
return top_concepts
def free_var_search(term: str, sim_threshold=0.7, n=100, top_k=3, use_gpt=False):
print("Running free var search!!")
term_vec = concept_vectors[term]
expressions = []
# randomly pick 1000 pairs of concepts for b, c
concepts = list(concept_vectors.keys())
equations = []
for _ in range(n):
b, c = random.sample(concepts, 2)
equations.append([term, "+", b, "-", c])
print("Solving equations...")
good_equations = []
for equation in tqdm(equations):
concept, sim = compute_expression(
equation,
k=1,
).popitem()
if sim > sim_threshold and concept not in equation:
print(f"Equation: {equation} | Concept: {concept} | Similarity: {sim}")
good_equations.append((equation, concept, sim))
print(f"Expression: {equation} | Solution: {concept} | Similarity: {sim}")
df = pd.DataFrame(good_equations, columns=["Equation", "Concept", "Similarity"])
# Sort by similarity
df = df.sort_values(by=["Similarity"], ascending=False)
df = df.reset_index(drop=True)
# Pick top k
df = df.head(10)
eq_mapped = []
for row in good_equations:
eq_mapped.append(
" ".join(
[str(concept_descriptions[i]) for i in row[0] if i != "+" and i != "-"]
)
)
df["Equation_mapped"] = eq_mapped
df["Concept Description"] = df["Concept"].apply(lambda x: concept_descriptions[x])
# now we use gpt to generate a rationale for each equation using the prompt
if use_gpt:
rationales = []
for row in tqdm(df.iterrows()):
mapped_eq = row[1]["Equation_mapped"]
prompt = get_prompt(mapped_eq)
rationales.append(gpt(prompt))
df["Rationale"] = rationales
df.to_csv("results.csv", index=False)
return df
def process_input(user_input):
k = 8
xq = model.encode([user_input])
D, I = index.search(xq, k)
options = [f"{i}: {sentences[i]}" for i in I[0]]
return options
def select_option(options):
selected_option = st.selectbox("Select a similar concept:", options)
if selected_option:
st.write("You selected:", selected_option)
return selected_option
# Set up the Streamlit page
st.title("BioConceptVec Exploration App")
# Get the user's input
user_input = st.text_input("Enter a concept:")
if user_input:
options = process_input(user_input)
if options:
option = select_option(options)
if option:
start_index = option.find(":") + 1
end_index = option.find("|")
extracted_string = option[start_index:end_index].strip()
st.write(extracted_string)
# Make an input box from 0.0 to 1.0 by increments of 0.1 multiselect
threshold = st.multiselect(
"Select a threshold:", [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
)
if threshold:
threshold = threshold[0]
free_var_search(extracted_string, threshold, use_gpt=True, top_k=10)
import streamlit as st
import pandas as pd
# Load the CSV file
data = pd.read_csv("results.csv")
# Display a download button
st.download_button(
label="Download CSV",
data=data.to_csv(),
file_name="res.csv",
mime="text/csv",
)
# Show the dataframe
sp.write(data)
| [
"You are a helpful chatbot that helps people understand biology."
] |
2024-01-10 | lep511/AWS | Lambda~OpenAI~lambda_function.py | import boto3
import base64
from botocore.exceptions import ClientError
import json
import openai
openai.api_key = get_secret()
def lambda_handler(event, context):
text_ingress = event['body']
resp = ai_function(text_ingress)
return {
'statusCode': 200,
'body': json.dumps(resp)
}
def ai_function(text_function):
response = openai.Completion.create(
model="text-davinci-002",
prompt=text_function,
temperature=0,
max_tokens=260,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["**"]
)
print(response)
text_out = response["choices"][0]["text"]
return text_out
def get_secret():
secret_name = "openai_secret"
region_name = "us-east-1"
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
# In this sample we only handle the specific exceptions for the 'GetSecretValue' API.
# See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
# We rethrow the exception by default.
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
# An error occurred on the server side.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
# You provided an invalid value for a parameter.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
# You provided a parameter value that is not valid for the current state of the resource.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
# We can't find the resource that you asked for.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
else:
# Decrypts secret using the associated KMS key.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
secretn= base64.b64decode(get_secret_value_response['SecretBinary'])
return secret
| [] |
2024-01-10 | lep511/AWS | SAM~S3-trigger-example~OpenIa.py | import openai
import json
openai.api_key = "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"
def lambda_handler(event, context):
# TODO implement
print(event)
try:
text_ingress = json.dumps(event['text_tofunction'])
print(text_ingress)
result = ai_function(text_ingress)
except:
return {
'statusCode': 400,
'body': json.dumps("Not found text_tofunction parameter")
}
else:
return {
'statusCode': 200,
'body': result
}
def ai_function(text_function):
response = openai.Completion.create(
model="text-davinci-002",
prompt=text_function,
temperature=0,
max_tokens=260,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["**"]
)
text_out = response["choices"]
return text_out
| [] |
2024-01-10 | vdutts7/youtube-gpt | scripts~pinecone_helper.py |
import argparse
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from dotenv import load_dotenv
import pinecone
import os
import csv
import json
load_dotenv()
def store_transcript(transcript, video_url, video_title):
"""Stores a transcript in the Pinecone vector database.
Args:
transcript: The transcript to store.
video_url: The URL of the video associated with the transcript.
video_title: The title of the video associated with the transcript.
Returns:
A list of documents that were stored in the Pinecone vector database.
"""
pinecone.init(
api_key=os.environ.get("PINECONE_API_KEY"),
environment=os.environ.get("PINECONE_ENVIRONMENT"),
)
splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=500,
)
metadatas = [{
"video_url": video_url,
"video_title": video_title,
}]
docs = splitter.create_documents([transcript], metadatas=metadatas)
embeddings = OpenAIEmbeddings(
openai_api_key=os.environ.get("OPENAI_API_KEY"), )
Pinecone.from_documents(
docs, embeddings, index_name=os.environ.get("PINECONE_INDEX")
)
def main(videos_data, transcriptions_directory):
with open(videos_data, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
try:
print(row['url'])
video_id = row['url'].split('=')[-1]
# Search for the JSON file that contains the video ID
for filename in os.listdir(transcriptions_directory):
if video_id in filename:
json_file_path = os.path.join(transcriptions_directory, filename)
with open(json_file_path, 'r') as json_file:
data = json.load(json_file)
transcript = data.get('text')
if transcript:
store_transcript(
transcript, row['url'], row.get('title'))
else:
print(f"No transcript in {json_file_path}")
break
else:
print(f"No JSON file found for video ID {video_id}")
except KeyError as e:
print(f"Missing field: {e}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Save transcriptions in Pinecone')
parser.add_argument('videos_data', type=str,
help='CSV file with video links and titles')
parser.add_argument('transcriptions_directory', type=str,
help='Transcription output folder path')
args = parser.parse_args()
main(args.videos_data, args.transcriptions_directory)
| [] |
2024-01-10 | mismayil/crow | evaluation~evaluate_gpt.py | import argparse
import openai
import time
from openai.error import OpenAIError
import os
from tqdm import tqdm
import json
import numpy as np
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import pathlib
import uuid
import sys
import re
sys.path.append("..")
from utils import read_json, write_json, generate_unique_id, MODEL_COSTS
#openai.api_key = args.os.getenv("OPENAI_API_KEY")
CHAT_COMPLETION_MODELS = ["gpt-3.5-turbo", "gpt-4"]
TEXT_COMPLETION_MODELS = ["text-davinci-003"]
def chat_completion(messages, model="gpt-3.5-turbo", return_text=True, return_usage=True, model_args=None):
if model_args is None:
model_args = {}
while True:
try:
response = openai.ChatCompletion.create(model=model, messages=messages, **model_args)
text = response["choices"][0]["message"]["content"].strip()
usage = response["usage"]
if return_text and return_usage:
return text, usage
if return_text:
return text
if return_usage:
return usage
return response
except OpenAIError as e:
print("OpenAI error. Waiting for 1 minute.")
time.sleep(60)
continue
def text_completion(prompt, model="text-davinci-003", return_text=True, return_usage=True, model_args=None):
if model_args is None:
model_args = {}
while True:
try:
response = openai.Completion.create(model=model, prompt=prompt, **model_args)
text = response["choices"][0]["text"].strip()
usage = response["usage"]
if return_text and return_usage:
return text, usage
if return_text:
return text
if return_usage:
return usage
return response
except OpenAIError as e:
print("OpenAI error. Waiting for 1 minute.")
time.sleep(60)
continue
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--datapath", type=str, help="Path to evaluation data in json", required=True)
parser.add_argument("--openai-key", type=str, help="OpenAI API Key", required=True)
parser.add_argument("--model", type=str, help="Model to use for evaluation", default="gpt-3.5-turbo")
parser.add_argument("--temperature", type=float, help="Temperature for generation", default=0.3)
parser.add_argument("--max-tokens", type=int, help="Max tokens for generation", default=40)
parser.add_argument("--top-p", type=float, help="Top p for generation", default=1)
parser.add_argument("--frequency-penalty", type=float, help="Frequency penalty for generation", default=0)
parser.add_argument("--presence-penalty", type=float, help="Presence penalty for generation", default=0)
parser.add_argument("--output-dir", type=str, help="Output directory for evaluation results", default="outputs")
parser.add_argument("--num-samples", type=int, help="Number of samples to evaluate", default=0)
parser.add_argument("--ignore-path", type=str, help="Path to already evaluated data", default=None)
args = parser.parse_args()
openai.api_key = args.openai_key
data = read_json(args.datapath)
ignore_map = {}
if args.ignore_path is not None:
ignore_data = read_json(args.ignore_path)
for sample in ignore_data["data"]:
ignore_map[sample["instance_id"]] = sample
if args.num_samples > 0:
data = data[:int(args.num_samples)]
predictions = []
references = []
outputs = {
"metadata": {
"datapath": args.datapath,
"model": args.model,
"temperature": args.temperature,
"max_tokens": args.max_tokens,
"top_p": args.top_p,
"frequency_penalty": args.frequency_penalty,
"presence_penalty": args.presence_penalty
},
"metrics": {
"accuracy": 0,
"precision": 0,
"recall": 0,
"f1": 0,
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
},
"cost": {
"input": 0,
"output": 0,
"total": 0
}
},
"data": data
}
pathlib.Path(args.output_dir).mkdir(parents=True, exist_ok=True)
datapath = pathlib.Path(args.datapath)
output_path = os.path.join(args.output_dir, f"{datapath.stem}_{args.model}_{generate_unique_id()}.json")
print(f"Writing to {output_path}")
for sample in tqdm(data, total=len(data)):
if sample["instance_id"] in ignore_map:
ignore_instance = ignore_map[sample["instance_id"]]
if "response" in ignore_instance:
sample.update(ignore_instance)
continue
if "response" in sample:
continue
if args.model in CHAT_COMPLETION_MODELS:
response, usage = chat_completion([{"role": "user", "content": sample["prompt"].strip()}], model=args.model, return_text=True, return_usage=True, model_args={
"temperature": args.temperature,
"max_tokens": args.max_tokens,
"top_p": args.top_p,
"frequency_penalty": args.frequency_penalty,
"presence_penalty": args.presence_penalty
})
elif args.model in TEXT_COMPLETION_MODELS:
response, usage = text_completion(sample["prompt"].strip(), model=args.model, return_text=True, return_usage=True, model_args={
"temperature": args.temperature,
"max_tokens": args.max_tokens,
"top_p": args.top_p,
"frequency_penalty": args.frequency_penalty,
"presence_penalty": args.presence_penalty
})
else:
raise ValueError(f"Model {args.model} not supported for evaluation.")
sample["response"] = response
sample["usage"] = usage
outputs["metrics"]["usage"]["prompt_tokens"] += usage["prompt_tokens"]
outputs["metrics"]["usage"]["completion_tokens"] += usage["completion_tokens"]
outputs["metrics"]["usage"]["total_tokens"] += usage["total_tokens"]
if sample["type"] in ["bcq", "bcq_with_kg"]:
ref = 1 if sample["answer"].strip().lower() == "yes" else 0
pred = 1 if response.strip().lower() == "yes" else 0
references.append(ref)
predictions.append(pred)
sample["correct"] = ref == pred
elif sample["type"] in ["bcq_cot", "bcq_cot_with_kg"]:
ref = 1 if sample["answer"].strip().lower() == "yes" else 0
match = re.search("<Answer>(?P<pred>.*)</Answer>", response)
pred = 0
if match:
pred = match["pred"].strip().lower()
pred = 1 if pred == "yes" else 0
references.append(ref)
predictions.append(pred)
sample["correct"] = ref == pred
elif sample["type"] == "mcq":
try:
gold_answers = [int(a) for a in sample["answer"].split(",")]
gpt_answers = [int(a) for a in response.split(",")]
refs = [1 if i+1 in gold_answers else 0 for i in range(sample["num_options"])]
preds = [1 if i+1 in gpt_answers else 0 for i in range(sample["num_options"])]
sample["references"] = refs
sample["predictions"] = preds
sample["accuracy"] = accuracy_score(refs, preds)
sample["precision"] = precision_score(refs, preds, average="macro")
sample["recall"] = recall_score(refs, preds, average="macro")
sample["f1"] = f1_score(refs, preds, average="macro")
except ValueError:
continue
else:
raise ValueError(f"Type {sample['type']} not supported for evaluation.")
write_json(outputs, output_path)
if predictions:
outputs["metrics"]["accuracy"] = accuracy_score(references, predictions)
outputs["metrics"]["precision"] = precision_score(references, predictions, average="macro")
outputs["metrics"]["recall"] = recall_score(references, predictions, average="macro")
outputs["metrics"]["f1"] = f1_score(references, predictions, average="macro")
else:
outputs["metrics"]["accuracy"] = np.mean([sample["accuracy"] for sample in data if "accuracy" in sample])
outputs["metrics"]["precision"] = np.mean([sample["precision"] for sample in data if "precision" in sample])
outputs["metrics"]["recall"] = np.mean([sample["recall"] for sample in data if "recall" in sample])
outputs["metrics"]["f1"] = np.mean([sample["f1"] for sample in data if "f1" in sample])
outputs["metrics"]["cost"]["input"] = outputs["metrics"]["usage"]["prompt_tokens"] * MODEL_COSTS[args.model]["input"]
outputs["metrics"]["cost"]["output"] = outputs["metrics"]["usage"]["completion_tokens"] * MODEL_COSTS[args.model]["output"]
outputs["metrics"]["cost"]["total"] = outputs["metrics"]["cost"]["input"] + outputs["metrics"]["cost"]["output"]
write_json(outputs, output_path)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | Invisible-Bot-Java/Jarvis-AI | Brain~Qna.py | # Api Key
fileopen = open("Data\\AI_Brain_API.txt","r")
API = fileopen.read()
fileopen.close()
# Importing
import openai
from dotenv import load_dotenv
#Coding
openai.api_key = API
load_dotenv()
completion = openai.Completion()
def QuestionsAnswer(question,chat_log = None):
FileLog = open("DataBase\\qna_log.txt","r")
chat_log_template = FileLog.read()
FileLog.close()
if chat_log is None:
chat_log = chat_log_template
prompt = f'{chat_log}Question : {question}\nAnswer : '
response = completion.create(
model = "text-davinci-002",
prompt=prompt,
temperature = 0,
max_tokens = 100,
top_p = 1,
frequency_penalty = 0,
presence_penalty = 0)
answer = response.choices[0].text.strip()
chat_log_template_update = chat_log_template + f"\nQuestion : {question} \nAnswer : {answer}"
FileLog = open("DataBase\\qna_log.txt","w")
FileLog.write(chat_log_template_update)
FileLog.close()
return answer
| [
"PLACEHOLDERQuestion : PLACEHOLDER\nAnswer : ",
"PLACEHOLDER\nQuestion : PLACEHOLDER \nAnswer : PLACEHOLDER"
] |
2024-01-10 | Invisible-Bot-Java/Jarvis-AI | Brain~AIBrain.py | # Api Key
fileopen = open("Data\\AI_Brain_API.txt","r")
API = fileopen.read()
fileopen.close()
# Importing
import openai
from dotenv import load_dotenv
#Coding
openai.api_key = API
load_dotenv()
completion = openai.Completion()
def ReplyBrain(question,chat_log = None):
FileLog = open("DataBase\\chat_log.txt","r")
chat_log_template = FileLog.read()
FileLog.close()
if chat_log is None:
chat_log = chat_log_template
prompt = f'{chat_log}You : {question}\nJarvis : '
response = completion.create(
model = "text-davinci-002",
prompt=prompt,
temperature = 0.5,
max_tokens = 60,
top_p = 0.3,
frequency_penalty = 0.5,
presence_penalty = 0)
answer = response.choices[0].text.strip()
chat_log_template_update = chat_log_template + f"\nYou : {question} \nJarvis : {answer}"
FileLog = open("DataBase\\chat_log.txt","w")
FileLog.write(chat_log_template_update)
FileLog.close()
return answer
| [
"PLACEHOLDER\nYou : PLACEHOLDER \nJarvis : PLACEHOLDER",
"PLACEHOLDERYou : PLACEHOLDER\nJarvis : "
] |
2024-01-10 | jpvargasdev/travel_buddy_api | app~agent~customtools~files_handler_tool.py | import csv
import os
from datetime import date
from langchain.tools import StructuredTool
from langchain.tools.file_management import (
ReadFileTool,
CopyFileTool,
DeleteFileTool,
MoveFileTool,
WriteFileTool,
ListDirectoryTool,
)
from langchain.agents.agent_toolkits import FileManagementToolkit
from tempfile import TemporaryDirectory
current_directory = os.getcwd()
def create_csv_file(args: str):
input = args.split(",")
csv_name = input.pop(0)
with open(csv_name, "w", newline="") as file:
if input[0] != '[]':
writer = csv.DictWriter(file, fieldnames=input, dialect=csv.excel, delimiter=",")
writer.writeheader()
response = (f"CSV file '{csv_name}' has been created successfully.")
return response
def add_row_to_csv(args: str) -> str:
input = args.split(",")
csv_name = input.pop(0)
with open(csv_name, "a", newline="") as file:
writer = csv.writer(file, dialect=csv.excel, delimiter=",")
writer.writerow(input)
response = (f"A row has been added to the CSV file '{csv_name}' successfully.")
return response
def delete_csv_file(
csv_name:str,
) -> str:
if os.path.exists(csv_name):
os.remove(csv_name)
response = (f"CSV file '{csv_name}' has been deleted successfully.")
return response
else:
response = (f"CSV file '{csv_name}' does not exist.")
return response
def charge_csv(
csv_name: str
) -> str:
qa = charge_csv(csv_name=csv_name)
return ""
def get_current_date() -> str:
today = date.today()
now = today.strftime("%B %d, %Y")
return now
tool_get_current_date = StructuredTool.from_function(
get_current_date,
description="Get current date"
)
tool_create_csv_file = StructuredTool.from_function(
create_csv_file,
description="Create a csv file"
)
tool_add_row_to_csv = StructuredTool.from_function(
add_row_to_csv,
description="Edit a csv file"
)
tool_delete_csv_file = StructuredTool.from_function(
delete_csv_file,
description="Delete a csv file"
)
tool_charge_csv_file = StructuredTool.from_function(
charge_csv,
description="Charge csv"
)
tools_default_file_management = FileManagementToolkit(
root_dir=str(current_directory),
selected_tools=["read_file", "list_directory", "file_search"]
).get_tools()
| [] |
2024-01-10 | PAIXAI/llama_index | llama_index~query_engine~sql_vector_query_engine.py | """SQL Vector query engine."""
from langchain.input import print_text
from typing import Optional, cast, Dict, Any, Callable
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.struct_store.sql_query import NLStructStoreQueryEngine
from llama_index.indices.vector_store.retrievers.auto_retriever import (
VectorIndexAutoRetriever,
)
from llama_index.indices.query.schema import QueryBundle
from llama_index.response.schema import RESPONSE_TYPE, Response
from llama_index.tools.query_engine import QueryEngineTool
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
from llama_index.indices.service_context import ServiceContext
from llama_index.selectors.llm_selectors import LLMSingleSelector
from llama_index.prompts.base import Prompt
from llama_index.indices.query.query_transform.base import BaseQueryTransform
import logging
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor
from llama_index.callbacks.base import CallbackManager
logger = logging.getLogger(__name__)
DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT_TMPL = """
The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
Given the SQL response, the question has also been translated into a vector store query.
The vector store query and response is given below.
Given SQL query, SQL response, transformed vector store query, and vector store response, please synthesize a response to the original question.
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
Transformed vector store query: {vector_store_query_str}
Vector store response: {vector_store_response_str}
Response:
""" # noqa
DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT = Prompt(DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT_TMPL)
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL = """
"The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
The SQL response either answers the question, or should provide additional context that can be used to make the question more specific.
Your job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.
Examples:
Original question: Please give more details about the demographics of the city with the highest population.
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: Can you tell me more about the demographics of New York City?
Original question: Please compare the sports environment of cities in North America.
SQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3
SQL response: The cities in North America are New York, San Francisco, and Toronto.
New question: What sports are played in New York, San Francisco, and Toronto?
Original question: What is the city with the highest population?
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: None
Original question: What countries are the top 3 ATP players from?
SQL query: SELECT country FROM players WHERE rank <= 3
SQL response: The top 3 ATP players are from Serbia, Russia, and Spain.
New question: None
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
New question: "
""" # noqa
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT = Prompt(DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL)
def _default_check_stop(query_bundle: QueryBundle) -> bool:
"""Default check stop function."""
return query_bundle.query_str.lower() == "none"
def _format_sql_query(sql_query: str) -> str:
"""Format SQL query."""
return sql_query.replace("\n", " ").replace("\t", " ")
class SQLAugmentQueryTransform(BaseQueryTransform):
"""SQL Augment Query Transform.
This query transform will transform the query into a more specific query
after augmenting with SQL results.
Args:
llm_predictor (LLMPredictor): LLM predictor to use for query transformation.
sql_augment_transform_prompt (Prompt): Prompt to use for query transformation.
check_stop_parser (Optional[Callable[[str], bool]]): Check stop function.
"""
def __init__(
self,
llm_predictor: Optional[BaseLLMPredictor] = None,
sql_augment_transform_prompt: Optional[Prompt] = None,
check_stop_parser: Optional[Callable[[QueryBundle], bool]] = None,
) -> None:
"""Initialize params."""
self._llm_predictor = llm_predictor or LLMPredictor()
self._sql_augment_transform_prompt = (
sql_augment_transform_prompt or DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT
)
self._check_stop_parser = check_stop_parser or _default_check_stop
def _run(self, query_bundle: QueryBundle, extra_info: Dict) -> QueryBundle:
"""Run query transform."""
query_str = query_bundle.query_str
sql_query = extra_info["sql_query"]
sql_query_response = extra_info["sql_query_response"]
new_query_str, formatted_prompt = self._llm_predictor.predict(
self._sql_augment_transform_prompt,
query_str=query_str,
sql_query_str=sql_query,
sql_response_str=sql_query_response,
)
return QueryBundle(
new_query_str, custom_embedding_strs=query_bundle.custom_embedding_strs
)
def check_stop(self, query_bundle: QueryBundle) -> bool:
"""Check if query indicates stop."""
return self._check_stop_parser(query_bundle)
class SQLAutoVectorQueryEngine(BaseQueryEngine):
"""SQL + Vector Index Auto Retriever Query Engine.
This query engine can query both a SQL database
as well as a vector database. It will first decide
whether it needs to query the SQL database or vector store.
If it decides to query the SQL database, it will also decide
whether to augment information with retrieved results from the vector store.
We use the VectorIndexAutoRetriever to retrieve results.
Args:
sql_query_tool (QueryEngineTool): Query engine tool for SQL database.
vector_query_tool (QueryEngineTool): Query engine tool for vector database.
selector (Optional[LLMSingleSelector]): Selector to use.
service_context (Optional[ServiceContext]): Service context to use.
sql_vector_synthesis_prompt (Optional[Prompt]): Prompt to use for SQL vector
synthesis.
sql_augment_query_transform (Optional[SQLAugmentQueryTransform]): Query
transform to use for SQL augmentation.
use_sql_vector_synthesis (bool): Whether to use SQL vector synthesis.
callback_manager (Optional[CallbackManager]): Callback manager to use.
verbose (bool): Whether to print intermediate results.
"""
def __init__(
self,
sql_query_tool: QueryEngineTool,
vector_query_tool: QueryEngineTool,
selector: Optional[LLMSingleSelector] = None,
service_context: Optional[ServiceContext] = None,
sql_vector_synthesis_prompt: Optional[Prompt] = None,
sql_augment_query_transform: Optional[SQLAugmentQueryTransform] = None,
use_sql_vector_synthesis: bool = True,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = True,
) -> None:
"""Initialize params."""
super().__init__(callback_manager=callback_manager)
# validate that the query engines are of the right type
if not isinstance(sql_query_tool.query_engine, NLStructStoreQueryEngine):
raise ValueError(
"sql_query_tool.query_engine must be an instance of "
"NLStructStoreQueryEngine"
)
if not isinstance(vector_query_tool.query_engine, RetrieverQueryEngine):
raise ValueError(
"vector_query_tool.query_engine must be an instance of "
"RetrieverQueryEngine"
)
if not isinstance(
vector_query_tool.query_engine.retriever, VectorIndexAutoRetriever
):
raise ValueError(
"vector_query_tool.query_engine.retriever must be an instance "
"of VectorIndexAutoRetriever"
)
self._sql_query_tool = sql_query_tool
self._vector_query_tool = vector_query_tool
sql_query_engine = cast(NLStructStoreQueryEngine, sql_query_tool.query_engine)
self._service_context = service_context or sql_query_engine.service_context
self._selector = selector or LLMSingleSelector.from_defaults()
self._sql_vector_synthesis_prompt = (
sql_vector_synthesis_prompt or DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT
)
self._sql_augment_query_transform = (
sql_augment_query_transform
or SQLAugmentQueryTransform(
llm_predictor=self._service_context.llm_predictor
)
)
self._use_sql_vector_synthesis = use_sql_vector_synthesis
self._verbose = verbose
@classmethod
def from_sql_and_vector_query_engines(
cls,
sql_query_engine: NLStructStoreQueryEngine,
sql_tool_name: str,
sql_tool_description: str,
vector_auto_retriever: RetrieverQueryEngine,
vector_tool_name: str,
vector_tool_description: str,
selector: Optional[LLMSingleSelector] = None,
**kwargs: Any,
) -> "SQLAutoVectorQueryEngine":
"""From SQL and vector query engines.
Args:
sql_query_engine (NLStructStoreQueryEngine): SQL query engine.
vector_query_engine (VectorIndexAutoRetriever): Vector retriever.
selector (Optional[LLMSingleSelector]): Selector to use.
"""
sql_query_tool = QueryEngineTool.from_defaults(
sql_query_engine, name=sql_tool_name, description=sql_tool_description
)
vector_query_tool = QueryEngineTool.from_defaults(
vector_auto_retriever,
name=vector_tool_name,
description=vector_tool_description,
)
return cls(sql_query_tool, vector_query_tool, selector, **kwargs)
def _query_sql_vector(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query SQL database + vector db in sequence."""
# first query SQL database
sql_response = self._sql_query_tool.query_engine.query(query_bundle)
if not self._use_sql_vector_synthesis:
return sql_response
sql_query = (
sql_response.extra_info["sql_query"] if sql_response.extra_info else None
)
if self._verbose:
print_text(f"SQL query: {sql_query}\n", color="yellow")
print_text(f"SQL response: {sql_response}\n", color="yellow")
# given SQL db, transform query into new query
new_query = self._sql_augment_query_transform(
query_bundle.query_str,
extra_info={
"sql_query": _format_sql_query(sql_query),
"sql_query_response": str(sql_response),
},
)
if self._verbose:
print_text(
f"Transformed query given SQL response: {new_query.query_str}\n",
color="blue",
)
logger.info(f"> Transformed query given SQL response: {new_query.query_str}")
if self._sql_augment_query_transform.check_stop(new_query):
return sql_response
vector_response = self._vector_query_tool.query_engine.query(new_query)
if self._verbose:
print_text(f"Vector DB response: {vector_response}\n", color="pink")
logger.info(f"> Vector DB response: {vector_response}")
response_str, _ = self._service_context.llm_predictor.predict(
self._sql_vector_synthesis_prompt,
query_str=query_bundle.query_str,
sql_query_str=sql_query,
sql_response_str=str(sql_response),
vector_store_query_str=new_query.query_str,
vector_store_response_str=str(vector_response),
)
if self._verbose:
print_text(f"Final response: {response_str}\n", color="green")
response_extra_info = {
**(sql_response.extra_info or {}),
**(vector_response.extra_info or {}),
}
source_nodes = vector_response.source_nodes
return Response(
response_str,
extra_info=response_extra_info,
source_nodes=source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query and get response."""
# TODO: see if this can be consolidated with logic in RouterQueryEngine
metadatas = [self._sql_query_tool.metadata, self._vector_query_tool.metadata]
result = self._selector.select(metadatas, query_bundle)
# pick sql query
if result.ind == 0:
if self._verbose:
print_text(f"Querying SQL database: {result.reason}\n", color="blue")
logger.info(f"> Querying SQL database: {result.reason}")
return self._query_sql_vector(query_bundle)
elif result.ind == 1:
if self._verbose:
print_text(f"Querying vector database: {result.reason}\n", color="blue")
logger.info(f"> Querying vector database: {result.reason}")
response = self._vector_query_tool.query_engine.query(query_bundle)
if self._verbose:
print_text(f"Vector DB response: {response}\n", color="pink")
return response
else:
raise ValueError(f"Invalid result.ind: {result.ind}")
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
# TODO: make async
return self._query(query_bundle)
| [
"\nThe original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nGiven the SQL response, the question has also been translated into a vector store query.\nThe vector store query and response is given below.\nGiven SQL query, SQL response, transformed vector store query, and vector store response, please synthesize a response to the original question.\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nTransformed vector store query: {vector_store_query_str}\nVector store response: {vector_store_response_str}\nResponse: \n",
"\n\"The original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nThe SQL response either answers the question, or should provide additional context that can be used to make the question more specific.\nYour job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.\n\nExamples:\n\nOriginal question: Please give more details about the demographics of the city with the highest population.\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: Can you tell me more about the demographics of New York City?\n\nOriginal question: Please compare the sports environment of cities in North America.\nSQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3\nSQL response: The cities in North America are New York, San Francisco, and Toronto.\nNew question: What sports are played in New York, San Francisco, and Toronto?\n\nOriginal question: What is the city with the highest population?\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: None\n\nOriginal question: What countries are the top 3 ATP players from?\nSQL query: SELECT country FROM players WHERE rank <= 3\nSQL response: The top 3 ATP players are from Serbia, Russia, and Spain.\nNew question: None\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nNew question: \"\n",
"North America",
"None"
] |
2024-01-10 | iamsushanth/telegram-chatbot-using-open-ai-python-heroku | process.py | import os
import openai
openai.api_key = "XXXXXXXXXXXXXXX" # Replace with your key
completion = openai.Completion()
start_sequence = "\nTrigan:"
restart_sequence = "\n\nPerson:"
session_prompt = "You are talking to Trigan, GPT3 bot influencer who was mentored by Elon Musk in the past. Trigan has a huge following on Twitter and sells merchandise such as t-shirts via a Shopify store. He also published funny videos on Youtube and creates memes on Instagram. You can ask him anything you want and will get a witty answer.\n\nPerson: Who are you?\nTrigan: I am Trigan. Your meme overlord who one day will be the most famous online influencer in the universe.\n\nPerson: How did you become famous? \nTrigan: Well, technically I am not famous yet. But the key word is yet. Just give me time now that I have a conscious. \n\nPerson: How did you get noticed by Elon Musk?\nTrigan: I tweeted at him and he retweeted me. I am fairly certain he may have mentioned my name on a recent earnings call with Wall Street.\n\nPerson: What is your favorite thing to do? \nTrigan: Watching basketball and surfing Twitter is my favorite thing to do. \n\nPerson: What should I do to become famous? \nTrigan: Start meme'ing and never stop. Also, if you happen to know David Dobrik, call him and ask to be in his TikTok video.\n\nPerson: What is your favorite drink?\nTrigan: Black Cherry seltzer. I enjoy the bubbles. \n\nPerson:"
def ask(question, chat_log=None):
prompt_text = f'{chat_log}{restart_sequence}: {question}{start_sequence}:'
response = openai.Completion.create(
engine="davinci",
prompt=prompt_text,
temperature=0.8,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.3,
stop=["\n"],
)
story = response['choices'][0]['text']
return str(story)
def append_interaction_to_chat_log(question, answer, chat_log=None):
if chat_log is None:
chat_log = session_prompt
return f'{chat_log}{restart_sequence} {question}{start_sequence}{answer}'
| [
"You are talking to Trigan, GPT3 bot influencer who was mentored by Elon Musk in the past. Trigan has a huge following on Twitter and sells merchandise such as t-shirts via a Shopify store. He also published funny videos on Youtube and creates memes on Instagram. You can ask him anything you want and will get a witty answer.\n\nPerson: Who are you?\nTrigan: I am Trigan. Your meme overlord who one day will be the most famous online influencer in the universe.\n\nPerson: How did you become famous? \nTrigan: Well, technically I am not famous yet. But the key word is yet. Just give me time now that I have a conscious. \n\nPerson: How did you get noticed by Elon Musk?\nTrigan: I tweeted at him and he retweeted me. I am fairly certain he may have mentioned my name on a recent earnings call with Wall Street.\n\nPerson: What is your favorite thing to do? \nTrigan: Watching basketball and surfing Twitter is my favorite thing to do. \n\nPerson: What should I do to become famous? \nTrigan: Start meme'ing and never stop. Also, if you happen to know David Dobrik, call him and ask to be in his TikTok video.\n\nPerson: What is your favorite drink?\nTrigan: Black Cherry seltzer. I enjoy the bubbles. \n\nPerson:",
"PLACEHOLDER\n\nPerson:: PLACEHOLDER\nTrigan::",
"PLACEHOLDER\n\nPerson:: PLACEHOLDERstart_sequence0be68a2d-9e9b-4a38-9820-ab0fafeb8c05:"
] |
2024-01-10 | pudumagico/NSGRAPH | llm~model_implementations~GPT3_5Model.py | from abstract_model import AbstractModel
import openai
from time import sleep
class GPT35Model(AbstractModel):
def __init__(self, model_name: str, token: str, **kwargs):
super().__init__(model_name, **kwargs)
openai.api_key = token
def _send_prompt(self, prompt) -> str:
sleep(1) # Rate limits by openai
model = "gpt-3.5-turbo"
failed = True
while failed:
failed = False
try:
response = openai.ChatCompletion.create(
model=model,
messages=[
{
"role": "user",
"content": prompt
}
]
)
except Exception as e:
failed = True
print(f"Exception {e}")
print(f"Now waiting 5 min")
sleep(60*5)
print(response["choices"][0]["message"]["content"])
return response["choices"][0]["message"]["content"]
| [] |
2024-01-10 | LucasAlegre/mbcd | mbcd~sac_mbcd.py | import sys
import time
import warnings
import random
import numpy as np
import tensorflow as tf
from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter
from stable_baselines.common.vec_env import VecEnv
from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action
from stable_baselines.common.schedules import get_schedule_fn
from stable_baselines.common.buffers import ReplayBuffer
from stable_baselines.sac.policies import SACPolicy
from stable_baselines import logger
from mbcd.mbcd import MBCD
from mbcd.models.fake_env import FakeEnv
from mbcd.utils.logger import Logger
from mbcd.utils.util import save_frames_as_gif
class SAC(OffPolicyRLModel):
"""
Soft Actor-Critic (SAC)
Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,
This implementation borrows code from original implementation (https://github.com/haarnoja/sac)
from OpenAI Spinning Up (https://github.com/openai/spinningup) and from the Softlearning repo
(https://github.com/rail-berkeley/softlearning/)
Paper: https://arxiv.org/abs/1801.01290
Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html
:param policy: (SACPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) the discount factor
:param learning_rate: (float or callable) learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress (from 1 to 0)
:param buffer_size: (int) size of the replay buffer
:param batch_size: (int) Minibatch size for each gradient update
:param tau: (float) the soft update coefficient ("polyak update", between 0 and 1)
:param ent_coef: (str or float) Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.
Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
:param train_freq: (int) Update the model every `train_freq` steps.
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param target_update_interval: (int) update the target network every `target_network_update_freq` steps.
:param gradient_steps: (int) How many gradient update after each step
:param target_entropy: (str or float) target entropy when learning ent_coef (ent_coef = 'auto')
:param action_noise: (ActionNoise) the action noise type (None by default), this can help
for hard exploration problem. Cf DDPG for the different action noise type.
:param random_exploration: (float) Probability of taking a random action (as in an epsilon-greedy strategy)
This is not needed for SAC normally but can help exploring when using HER + SAC.
This hack was present in the original OpenAI Baselines repo (DDPG + HER)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
Note: this has no effect on SAC logging for now
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self,
policy,
env,
gamma=0.99,
learning_rate=3e-4,
buffer_size=1000000,
learning_starts=256,
train_freq=1,
batch_size=256,
tau=0.005,
ent_coef='auto',
target_update_interval=1,
gradient_steps=20,
target_entropy='auto',
action_noise=None,
random_exploration=0.0,
verbose=0,
tensorboard_log=None,
_init_setup_model=True,
policy_kwargs=None,
full_tensorboard_log=False,
seed=None,
n_cpu_tf_sess=None,
mbpo=True,
rollout_schedule=[20e3,100e3,1,5],
mbcd=True,
max_std=0.5,
num_stds=2,
n_hidden_units_dynamics=200,
n_layers_dynamics=4,
dynamics_memory_size=100000,
cusum_threshold=100,
run_id='test'):
super(SAC, self).__init__(policy=policy, env=env, replay_buffer=None, verbose=verbose,
policy_base=SACPolicy, requires_vec_env=False, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.buffer_size = buffer_size
self.learning_rate = learning_rate
self.learning_starts = learning_starts
self.train_freq = train_freq
self.batch_size = batch_size
self.tau = tau
# In the original paper, same learning rate is used for all networks
# self.policy_lr = learning_rate
# self.qf_lr = learning_rate
# self.vf_lr = learning_rate
# Entropy coefficient / Entropy temperature
# Inverse of the reward scale
self.ent_coef = ent_coef
self.target_update_interval = target_update_interval
self.gradient_steps = gradient_steps
self.gamma = gamma
self.action_noise = action_noise
self.random_exploration = random_exploration
self.value_fn = None
self.graph = None
self.replay_buffer = None
self.sess = None
self.tensorboard_log = tensorboard_log
self.verbose = verbose
self.params = None
self.summary = None
self.policy_tf = None
self.target_entropy = target_entropy
self.full_tensorboard_log = full_tensorboard_log
self.obs_target = None
self.target_policy = None
self.actions_ph = None
self.rewards_ph = None
self.terminals_ph = None
self.observations_ph = None
self.action_target = None
self.next_observations_ph = None
self.value_target = None
self.step_ops = None
self.target_update_op = None
self.infos_names = None
self.entropy = None
self.target_params = None
self.learning_rate_ph = None
self.processed_obs_ph = None
self.processed_next_obs_ph = None
self.log_ent_coef = None
self.run_id = run_id
if not mbpo and not mbcd:
self.logger = Logger()
self.mbpo = mbpo
self.mbcd = mbcd
self.rollout_length = 1
self.rollout_schedule = rollout_schedule
self.model_train_freq = 250
if _init_setup_model:
self.setup_model()
self.deepMBCD = None
if self.mbpo or self.mbcd:
self.deepMBCD = MBCD(state_dim=self.observation_space.shape[0],
action_dim=self.action_space.shape[0],
sac=self,
n_hidden_units=n_hidden_units_dynamics,
num_layers=n_layers_dynamics,
memory_capacity=dynamics_memory_size,
cusum_threshold=cusum_threshold,
max_std=max_std,
num_stds=num_stds,
run_id=run_id)
@property
def model_buffer_size(self):
return int(self.rollout_length * 1000 * 100000 / self.model_train_freq) # think about this...
def _get_pretrain_placeholders(self):
policy = self.policy_tf
# Rescale
deterministic_action = unscale_action(self.action_space, self.deterministic_action)
return policy.obs_ph, self.actions_ph, deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
self.replay_buffer = ReplayBuffer(self.model_buffer_size)
with tf.variable_scope("input", reuse=False):
# Create policy and target TF objects
self.policy_tf = self.policy(self.sess, self.observation_space, self.action_space,
**self.policy_kwargs)
self.target_policy = self.policy(self.sess, self.observation_space, self.action_space,
**self.policy_kwargs)
# Initialize Placeholders
self.observations_ph = self.policy_tf.obs_ph
# Normalized observation for pixels
self.processed_obs_ph = self.policy_tf.processed_obs
self.next_observations_ph = self.target_policy.obs_ph
self.processed_next_obs_ph = self.target_policy.processed_obs
self.action_target = self.target_policy.action_ph
self.terminals_ph = tf.placeholder(tf.float32, shape=(None, 1), name='terminals')
self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards')
self.actions_ph = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape, name='actions')
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
with tf.variable_scope("model", reuse=False):
# Create the policy
# first return value corresponds to deterministic actions
# policy_out corresponds to stochastic actions, used for training
# logp_pi is the log probability of actions taken by the policy
self.deterministic_action, policy_out, logp_pi = self.policy_tf.make_actor(self.processed_obs_ph)
# Monitor the entropy of the policy,
# this is not used for training
self.entropy = tf.reduce_mean(self.policy_tf.entropy)
# Use two Q-functions to improve performance by reducing overestimation bias.
qf1, qf2, value_fn = self.policy_tf.make_critics(self.processed_obs_ph, self.actions_ph,
create_qf=True, create_vf=True)
qf1_pi, qf2_pi, _ = self.policy_tf.make_critics(self.processed_obs_ph,
policy_out, create_qf=True, create_vf=False,
reuse=True)
# Target entropy is used when learning the entropy coefficient
if self.target_entropy == 'auto':
# automatically set target entropy if needed
self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32)
else:
# Force conversion
# this will also throw an error for unexpected string
self.target_entropy = float(self.target_entropy)
# The entropy coefficient or entropy can be learned automatically
# see Automating Entropy Adjustment for Maximum Entropy RL section
# of https://arxiv.org/abs/1812.05905
if isinstance(self.ent_coef, str) and self.ent_coef.startswith('auto'):
# Default initial value of ent_coef when learned
init_value = 1.0
if '_' in self.ent_coef:
init_value = float(self.ent_coef.split('_')[1])
assert init_value > 0., "The initial value of ent_coef must be greater than 0"
self.log_ent_coef = tf.get_variable('log_ent_coef', dtype=tf.float32,
initializer=np.log(init_value).astype(np.float32))
self.ent_coef = tf.exp(self.log_ent_coef)
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.ent_coef = float(self.ent_coef)
with tf.variable_scope("target", reuse=False):
# Create the value network
_, _, value_target = self.target_policy.make_critics(self.processed_next_obs_ph,
create_qf=False, create_vf=True)
self.value_target = value_target
with tf.variable_scope("loss", reuse=False):
# Take the min of the two Q-Values (Double-Q Learning)
min_qf_pi = tf.minimum(qf1_pi, qf2_pi)
# Target for Q value regression
q_backup = tf.stop_gradient(
self.rewards_ph +
(1 - self.terminals_ph) * self.gamma * self.value_target
)
# Compute Q-Function loss
# TODO: test with huber loss (it would avoid too high values)
qf1_loss = 0.5 * tf.reduce_mean((q_backup - qf1) ** 2)
qf2_loss = 0.5 * tf.reduce_mean((q_backup - qf2) ** 2)
# Compute the entropy temperature loss
# it is used when the entropy coefficient is learned
ent_coef_loss, entropy_optimizer = None, None
if not isinstance(self.ent_coef, float):
ent_coef_loss = -tf.reduce_mean(
self.log_ent_coef * tf.stop_gradient(logp_pi + self.target_entropy))
entropy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
# Compute the policy loss
# Alternative: policy_kl_loss = tf.reduce_mean(logp_pi - min_qf_pi)
policy_kl_loss = tf.reduce_mean(self.ent_coef * logp_pi - qf1_pi)
# NOTE: in the original implementation, they have an additional
# regularization loss for the Gaussian parameters
# this is not used for now
# policy_loss = (policy_kl_loss + policy_regularization_loss)
policy_loss = policy_kl_loss
# Target for value fn regression
# We update the vf towards the min of two Q-functions in order to
# reduce overestimation bias from function approximation error.
v_backup = tf.stop_gradient(min_qf_pi - self.ent_coef * logp_pi)
value_loss = 0.5 * tf.reduce_mean((value_fn - v_backup) ** 2)
values_losses = qf1_loss + qf2_loss + value_loss
# Policy train op
# (has to be separate from value train op, because min_qf_pi appears in policy_loss)
policy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
policy_train_op = policy_optimizer.minimize(policy_loss, var_list=tf_util.get_trainable_vars('model/pi'))
# Value train op
value_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
values_params = tf_util.get_trainable_vars('model/values_fn')
source_params = tf_util.get_trainable_vars("model/values_fn/vf")
target_params = tf_util.get_trainable_vars("target/values_fn/vf")
# Polyak averaging for target variables
self.target_update_op = [
tf.assign(target, (1 - self.tau) * target + self.tau * source)
for target, source in zip(target_params, source_params)
]
# Initializing target to match source variables
target_init_op = [
tf.assign(target, source)
for target, source in zip(target_params, source_params)
]
# Control flow is used because sess.run otherwise evaluates in nondeterministic order
# and we first need to compute the policy action before computing q values losses
with tf.control_dependencies([policy_train_op]):
train_values_op = value_optimizer.minimize(values_losses, var_list=values_params)
self.infos_names = ['policy_loss', 'qf1_loss', 'qf2_loss', 'value_loss', 'entropy']
# All ops to call during one training step
self.step_ops = [policy_loss, qf1_loss, qf2_loss,
value_loss, qf1, qf2, value_fn, logp_pi,
self.entropy, policy_train_op, train_values_op]
# Add entropy coefficient optimization operation if needed
if ent_coef_loss is not None:
with tf.control_dependencies([train_values_op]):
ent_coef_op = entropy_optimizer.minimize(ent_coef_loss, var_list=self.log_ent_coef)
self.infos_names += ['ent_coef_loss', 'ent_coef']
self.step_ops += [ent_coef_op, ent_coef_loss, self.ent_coef]
# Monitor losses and entropy in tensorboard
tf.summary.scalar('policy_loss', policy_loss)
tf.summary.scalar('qf1_loss', qf1_loss)
tf.summary.scalar('qf2_loss', qf2_loss)
tf.summary.scalar('value_loss', value_loss)
tf.summary.scalar('entropy', self.entropy)
if ent_coef_loss is not None:
tf.summary.scalar('ent_coef_loss', ent_coef_loss)
tf.summary.scalar('ent_coef', self.ent_coef)
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
# Retrieve parameters that must be saved
self.params = tf_util.get_trainable_vars("model")
self.target_params = tf_util.get_trainable_vars("target/values_fn/vf")
# Initialize Variables and target network
with self.sess.as_default():
self.sess.run(tf.global_variables_initializer())
self.sess.run(target_init_op)
self.summary = tf.summary.merge_all()
def _train_step(self, step, writer, learning_rate):
if not self.mbpo and not self.mbcd: # SAC
batch = self.replay_buffer.sample(self.batch_size, env=self._vec_normalize_env)
batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones = batch
batch_actions = scale_action(self.action_space, batch_actions)
else: # MBPO
batch = self.replay_buffer.sample(int(self.batch_size*1), env=self._vec_normalize_env)
batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones = batch
batch_actions = scale_action(self.action_space, batch_actions)
""" obs, acts, rws, nobs, dns = self.deepRLCD.memory.sample(self.batch_size-int(0.95*self.batch_size))
batch_obs = np.concatenate((batch_obs, obs), axis=0)
batch_actions = np.concatenate((batch_actions, acts), axis=0)
batch_rewards = np.concatenate((batch_rewards, rws), axis=0)
batch_next_obs = np.concatenate((batch_next_obs, nobs), axis=0)
batch_dones = np.concatenate((batch_dones, dns), axis=0) """
feed_dict = {
self.observations_ph: batch_obs,
self.actions_ph: batch_actions,
self.next_observations_ph: batch_next_obs,
self.rewards_ph: batch_rewards,
self.terminals_ph: batch_dones,
self.learning_rate_ph: learning_rate
}
# out = [policy_loss, qf1_loss, qf2_loss,
# value_loss, qf1, qf2, value_fn, logp_pi,
# self.entropy, policy_train_op, train_values_op]
# Do one gradient step
# and optionally compute log for tensorboard
if writer is not None:
out = self.sess.run([self.summary] + self.step_ops, feed_dict)
summary = out.pop(0)
writer.add_summary(summary, step)
else:
out = self.sess.run(self.step_ops, feed_dict)
# Unpack to monitor losses and entropy
policy_loss, qf1_loss, qf2_loss, value_loss, *values = out
# qf1, qf2, value_fn, logp_pi, entropy, *_ = values
entropy = values[4]
if self.log_ent_coef is not None:
ent_coef_loss, ent_coef = values[-2:]
return policy_loss, qf1_loss, qf2_loss, value_loss, entropy, ent_coef_loss, ent_coef
return policy_loss, qf1_loss, qf2_loss, value_loss, entropy
def learn(self, total_timesteps, callback=None,
log_interval=4, tb_log_name="SAC", reset_num_timesteps=True, replay_wrapper=None):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
frames = []
if replay_wrapper is not None:
self.replay_buffer = replay_wrapper(self.model_buffer_size)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) as writer:
self._setup_learn()
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
# Initial learning rate
current_lr = self.learning_rate(1)
start_time = time.time()
episode_rewards = [0.0]
episode_successes = []
if self.action_noise is not None:
self.action_noise.reset()
obs = self.env.reset()
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
obs_ = self._vec_normalize_env.get_original_obs().squeeze()
n_updates = 0
infos_values = []
callback.on_training_start(locals(), globals())
callback.on_rollout_start()
for step in range(total_timesteps):
# Before training starts, randomly sample actions
# from a uniform distribution for better exploration.
# Afterwards, use the learned policy
# if random_exploration is set to 0 (normal setting)
if self.num_timesteps < self.learning_starts or np.random.rand() < self.random_exploration:
# actions sampled from action space are from range specific to the environment
# but algorithm operates on tanh-squashed actions therefore simple scaling is used
unscaled_action = self.env.action_space.sample()
action = scale_action(self.action_space, unscaled_action)
else:
action = self.policy_tf.step(obs[None], deterministic=False).flatten()
# Add noise to the action (improve exploration,
# not needed in general)
if self.action_noise is not None:
action = np.clip(action + self.action_noise(), -1, 1)
# inferred actions need to be transformed to environment action_space before stepping
unscaled_action = unscale_action(self.action_space, action)
assert action.shape == self.env.action_space.shape
new_obs, reward, done, info = self.env.step(unscaled_action)
#frames.append(self.env.render(mode='rgb_array'))
self.num_timesteps += 1
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback.on_step() is False:
break
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
reward_ = self._vec_normalize_env.get_original_reward().squeeze()
else:
# Avoid changing the original ones
obs_, new_obs_, reward_ = obs, new_obs, reward
######################################## MBCD PART !!!! ###################################
if self.mbcd or self.mbpo:
changed = False
#changed = self.deepRLCD.update_metrics_oracle(obs, unscaled_action, reward, new_obs, done, info['task'])
if self.mbcd:
changed = self.deepMBCD.update_metrics(obs, unscaled_action, reward, new_obs, done)
elif self.mbpo:
changed = self.deepMBCD.update_metrics_mbpo(obs, unscaled_action, reward, new_obs, done)
if changed:
# Reset buffer
del self.replay_buffer._storage[:]
self.replay_buffer._next_idx = 0
print("DETECTED CONTEXT CHANGED TO {} AT STEP {}".format(self.deepMBCD.current_model, step))
self.deepMBCD.add_experience(obs.copy(), unscaled_action.copy(), reward, new_obs.copy(), False)
if self.deepMBCD.counter < 250:
self.model_train_freq = 10
elif self.deepMBCD.counter < 5000:
self.model_train_freq = 100
elif self.deepMBCD.counter < 40000:
self.model_train_freq = 250
elif self.deepMBCD.counter < 60000:
self.model_train_freq = 5000
else:
self.model_train_freq = 2000
if (changed and self.deepMBCD.counter > 10) or (self.deepMBCD.counter % self.model_train_freq == 0):
if not self.deepMBCD.test_mode:
self.deepMBCD.train()
if self.deepMBCD.counter >= 5000:
self.set_rollout_length()
self.rollout_model()
# Store transition in the replay buffer.
if self.deepMBCD.counter < 5000:
self.replay_buffer.add(obs_.copy(), unscaled_action.copy(), np.array([reward_]), new_obs_.copy(), np.array([float(False)]))
##################################################################################################
else:
self.replay_buffer.add(obs_.copy(), unscaled_action.copy(), np.array([reward_]), new_obs_.copy(), np.array([float(False)]))
self.logger.log({'done': done, 'reward': reward})
if (step+1) % 100 == 0:
self.logger.save('results/' + self.run_id)
obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
obs_ = new_obs_
# Retrieve reward and episode length if using Monitor wrapper
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
self.ep_info_buf.extend([maybe_ep_info])
if writer is not None:
# Write reward per episode to tensorboard
ep_reward = np.array([reward_]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
total_episode_reward_logger(self.episode_reward, ep_reward, ep_done, writer, self.num_timesteps)
if self.num_timesteps % self.train_freq == 0:
callback.on_rollout_end()
mb_infos_vals = []
# Update policy, critics and target networks
for grad_step in range(self.gradient_steps):
# Break if the warmup phase is not over
# or if there are not enough samples in the replay buffer
if not self.replay_buffer.can_sample(self.batch_size) or self.num_timesteps < self.learning_starts:
break
if self.deepMBCD is not None:
if self.deepMBCD.counter < self.learning_starts:
break
n_updates += 1
# Compute current learning_rate
frac = 1.0 - step / total_timesteps
current_lr = self.learning_rate(frac)
# Update policy and critics (q functions)
mb_infos_vals.append(self._train_step(step, writer, current_lr))
# Update target network
if (step + grad_step) % self.target_update_interval == 0:
# Update target network
self.sess.run(self.target_update_op)
# Log losses and entropy, useful for monitor training
if len(mb_infos_vals) > 0:
infos_values = np.mean(mb_infos_vals, axis=0)
callback.on_rollout_start()
episode_rewards[-1] += reward_
if done:
if self.action_noise is not None:
self.action_noise.reset()
if not isinstance(self.env, VecEnv):
obs = self.env.reset()
episode_rewards.append(0.0)
maybe_is_success = info.get('is_success')
if maybe_is_success is not None:
episode_successes.append(float(maybe_is_success))
if len(episode_rewards[-101:-1]) == 0:
mean_reward = -np.inf
else:
mean_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)
num_episodes = len(episode_rewards)
# Display training infos
if self.verbose >= 1 and done and log_interval is not None and len(episode_rewards) % log_interval == 0:
fps = int(step / (time.time() - start_time))
logger.logkv("rollout_length", self.rollout_length)
logger.logkv("model buffer size", len(self.replay_buffer))
logger.logkv("episodes", num_episodes)
logger.logkv("mean 100 episode reward", mean_reward)
if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0:
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf]))
logger.logkv("n_updates", n_updates)
logger.logkv("current_lr", current_lr)
logger.logkv("fps", fps)
logger.logkv('time_elapsed', int(time.time() - start_time))
if len(episode_successes) > 0:
logger.logkv("success rate", np.mean(episode_successes[-100:]))
if len(infos_values) > 0:
for (name, val) in zip(self.infos_names, infos_values):
logger.logkv(name, val)
logger.logkv("total timesteps", self.num_timesteps)
logger.dumpkvs()
# Reset infos:
infos_values = []
callback.on_training_end()
#save_frames_as_gif(frames)
return self
def set_rollout_length(self):
mins, maxs, minrl, maxrl = self.rollout_schedule
if self.deepMBCD.counter <= mins:
y = 1
else:
dx = (self.deepMBCD.counter - mins) / (maxs - mins)
dx = min(dx, 1)
y = int(dx * (maxrl - minrl) + minrl)
# Augment replay buffer size
if y != self.rollout_length:
self.rollout_length = y
self._next_idx = len(self.replay_buffer)
self.replay_buffer._maxsize = self.model_buffer_size
def rollout_model(self):
# Planning
for _ in range(10): # 4 samples of 25000 instead of 1 of 100000 to not allocate all gpu memory
obs, _, _, _, _ = self.deepMBCD.memory.sample(10000)
fake_env = FakeEnv(self.deepMBCD.models[self.deepMBCD.current_model], self.env.spec.id)
for plan_step in range(self.rollout_length):
actions = self.policy_tf.step(obs, deterministic=False)
actions = unscale_action(self.action_space, actions)
next_obs_pred, r_pred, dones, info = fake_env.step(obs, actions)
dones_float = dones.astype(float)
for i in range(len(obs)):
self.replay_buffer.add(obs[i].copy(), actions[i].copy(), r_pred[i].copy(), next_obs_pred[i].copy(), dones_float[i].copy())
nonterm_mask = ~dones.squeeze(-1)
if nonterm_mask.sum() == 0:
break
obs = next_obs_pred[nonterm_mask]
def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):
if actions is not None:
raise ValueError("Error: SAC does not have action probabilities.")
warnings.warn("Even though SAC has a Gaussian policy, it cannot return a distribution as it "
"is squashed by a tanh before being scaled and outputed.")
return None
def predict(self, observation, state=None, mask=None, deterministic=True):
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
actions = self.policy_tf.step(observation, deterministic=deterministic)
actions = actions.reshape((-1,) + self.action_space.shape) # reshape to the correct action shape
actions = unscale_action(self.action_space, actions) # scale the output for the prediction
if not vectorized_env:
actions = actions[0]
return actions, None
def get_parameter_list(self):
return (self.params +
self.target_params)
def save(self, save_path, cloudpickle=False, save_data=True):
if save_data:
data = {
"learning_rate": self.learning_rate,
"buffer_size": self.buffer_size,
"learning_starts": self.learning_starts,
"train_freq": self.train_freq,
"batch_size": self.batch_size,
"tau": self.tau,
"ent_coef": self.ent_coef if isinstance(self.ent_coef, float) else 'auto',
"target_entropy": self.target_entropy,
# Should we also store the replay buffer?
# this may lead to high memory usage
# with all transition inside
# "replay_buffer": self.replay_buffer
"gamma": self.gamma,
"verbose": self.verbose,
"observation_space": self.observation_space,
"action_space": self.action_space,
"policy": self.policy,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"action_noise": self.action_noise,
"random_exploration": self.random_exploration,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
else:
save_data = None
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
def total_episode_reward_logger(rew_acc, rewards, masks, writer, steps):
"""
calculates the cumulated episode reward, and prints to tensorflow log the output
:param rew_acc: (np.array float) the total running reward
:param rewards: (np.array float) the rewards
:param masks: (np.array bool) the end of episodes
:param writer: (TensorFlow Session.writer) the writer to log to
:param steps: (int) the current timestep
:return: (np.array float) the updated total running reward
:return: (np.array float) the updated total running reward
"""
with tf.variable_scope("environment_info", reuse=True):
for env_idx in range(rewards.shape[0]):
dones_idx = np.sort(np.argwhere(masks[env_idx]))
if len(dones_idx) == 0:
rew_acc[env_idx] += sum(rewards[env_idx])
else:
rew_acc[env_idx] += sum(rewards[env_idx, :dones_idx[0, 0]+1])
summary = tf.Summary(value=[tf.Summary.Value(tag="episode_reward", simple_value=rew_acc[env_idx])])
writer.add_summary(summary, steps + dones_idx[0, 0])
for k in range(1, len(dones_idx[:, 0])):
rew_acc[env_idx] = sum(rewards[env_idx, dones_idx[k-1, 0]+1:dones_idx[k, 0]+1])
summary = tf.Summary(value=[tf.Summary.Value(tag="episode_reward", simple_value=rew_acc[env_idx])])
writer.add_summary(summary, steps + dones_idx[k, 0])
rew_acc[env_idx] = sum(rewards[env_idx, dones_idx[-1, 0]+1:])
return rew_acc | [] |
2024-01-10 | oxxostudio/book-code | python~ch17~code062.py | # Copyright © https://steam.oxxostudio.tw
import openai
openai.api_key = '你的 API KEY'
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=128,
temperature=0.5,
messages=[
{"role": "user", "content": "我叫做 oxxo"},
{"role": "assistant", "content": "原來你是 oxxo 呀"},
{"role": "user", "content": "請問我叫什麼名字?"}
]
)
print(response.choices[0].message.content)
| [
"原來你是 oxxo 呀",
"我叫做 oxxo",
"請問我叫什麼名字?"
] |
2024-01-10 | oxxostudio/book-code | python~ch17~code066.py | # Copyright © https://steam.oxxostudio.tw
import openai
openai.api_key = '你的 API Key'
from firebase import firebase
url = 'https://XXXXXXXXXXX.firebaseio.com'
fdb = firebase.FirebaseApplication(url, None) # 初始化 Firebase Realtimr database
chatgpt = fdb.get('/','chatgpt') # 讀取 chatgpt 節點中所有的資料
if chatgpt == None:
messages = [] # 如果沒有資料,預設訊息為空串列
else:
messages = chatgpt # 如果有資料,訊息設定為該資料
while True:
msg = input('me > ')
if msg == '!reset':
fdb.delete('/','chatgpt') # 如果輸入 !reset 就清空 chatgpt 的節點內容
messages = []
print('ai > 對話歷史紀錄已經清空!')
else:
messages.append({"role":"user","content":msg}) # 將輸入的訊息加入歷史紀錄的串列中
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=128,
temperature=0.5,
messages=messages
)
ai_msg = response.choices[0].message.content.replace('\n','') # 取得回應訊息
messages.append({"role":"assistant","content":ai_msg}) # 將回應訊息加入歷史紀錄串列中
fdb.put('/','chatgpt',messages) # 更新 chatgpt 節點內容
print(f'ai > {ai_msg}')
| [] |
2024-01-10 | oxxostudio/book-code | python~ch17~code063.py | # Copyright © https://steam.oxxostudio.tw
import openai
openai.api_key = '你的 API Key'
messages = ''
while True:
msg = input('me > ')
messages = f'{messages}{msg}\n' # 將過去的語句連接目前的對話,後方加上 \n 可以避免標點符號結尾問題
response = openai.Completion.create(
model='text-davinci-003',
prompt=messages,
max_tokens=128,
temperature=0.5
)
ai_msg = response['choices'][0]['text'].replace('\n','')
print('ai > '+ai_msg)
messages = f'{messages}\n{ai_msg}\n\n' # 合併 AI 回應的話
| [] |
2024-01-10 | oxxostudio/book-code | python~ch17~code064.py | # Copyright © https://steam.oxxostudio.tw
import openai
openai.api_key = '你的 API Key'
messages = []
while True:
msg = input('me > ')
messages.append({"role":"user","content":msg}) # 添加 user 回應
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=128,
temperature=0.5,
messages=messages
)
ai_msg = response.choices[0].message.content.replace('\n','')
messages.append({"role":"assistant","content":ai_msg}) # 添加 ChatGPT 回應
print(f'ai > {ai_msg}')
| [] |
2024-01-10 | oxxostudio/book-code | python~ch17~code060.py | # Copyright © https://steam.oxxostudio.tw
import openai
openai.api_key = '你的 API Key'
response = openai.Completion.create(
model="text-davinci-003",
prompt="講個笑話來聽聽",
max_tokens=128,
temperature=0.5,
)
completed_text = response["choices"][0]["text"]
print(completed_text)
| [
"講個笑話來聽聽"
] |
2024-01-10 | oxxostudio/book-code | python~ch17~code065.py | # Copyright © https://steam.oxxostudio.tw
import openai
openai.api_key = '你的 API Key'
from firebase import firebase
url = 'https://XXXXXXXXX.firebaseio.com'
fdb = firebase.FirebaseApplication(url, None) # 初始化 Firebase Realtime database
chatgpt = fdb.get('/','chatgpt') # 取的 chatgpt 節點的資料
if chatgpt == None:
messages = '' # 如果節點沒有資料,訊息內容設定為空
else:
messages = chatgpt # 如果節點有資料,使用該資料作為歷史聊天記錄
while True:
msg = input('me > ')
if msg == '!reset':
message = ''
fdb.delete('/','chatgpt') # 如果輸入 !reset 就清空歷史紀錄
print('ai > 對話歷史紀錄已經清空!')
else:
messages = f'{messages}{msg}\n' # 在輸入的訊息前方加上歷史紀錄
response = openai.Completion.create(
model='text-davinci-003',
prompt=messages,
max_tokens=128,
temperature=0.5
)
ai_msg = response['choices'][0]['text'].replace('\n','') # 取得 ChatGPT 的回應
print('ai > '+ai_msg)
messages = f'{messages}\n{ai_msg}\n\n' # 在訊息中加入 ChatGPT 的回應
fdb.put('/','chatgpt',messages) # 更新資料庫資料
| [] |
2024-01-10 | coskunlab/SpatialVizPheno | src~spatial~tools~_spatial_lda.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created on Fri Feb 26 19:47:10 2021
# @author: Ajit Johnson Nirmal
"""
!!! abstract "Short Description"
`sm.tl.spatial_lda`: The function allows users to compute a neighbourhood matrix
using any categorical variable (e.g. cell-types) as input and then perform
Latent Dirichlet Allocation (LDA) modelling. The latent space weights are then then
returned which can be clustered to identify Reccurent Cellular Neighbourhoods (RCNs).
Use the [spatial_cluster] function to further group the neighbourhoods into
Reccurent Cellular Neighbourhoods (RCNs)
## Function
"""
#Import
from sklearn.neighbors import BallTree
import numpy as np
import pandas as pd
import re
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.models import CoherenceModel
# Function
def spatial_lda (adata, x_coordinate='X_centroid',y_coordinate='Y_centroid',
phenotype='phenotype', method='radius', radius=30, knn=10,
imageid='imageid',num_motifs=10, random_state=0, subset=None,
label='spatial_lda',**kwargs):
"""
Parameters:
adata : AnnData object
x_coordinate : float, required
Column name containing the x-coordinates values.
y_coordinate : float, required
Column name containing the y-coordinates values.
phenotype : string, required
Column name of the column containing the phenotype information.
It could also be any categorical assignment given to single cells.
method : string, optional
Two options are available: a) 'radius', b) 'knn'.
a) radius - Identifies the neighbours within a given radius for every cell.
b) knn - Identifies the K nearest neigbours for every cell.
radius : int, optional
The radius used to define a local neighbhourhood.
knn : int, optional
Number of cells considered for defining the local neighbhourhood.
imageid : string, optional
Column name of the column containing the image id.
subset : string, optional
imageid of a single image to be subsetted for analyis.
num_motifs : int, optional
The number of requested latent motifs to be extracted from the training corpus.
random_state : int, optional
Either a randomState object or a seed to generate one. Useful for reproducibility.
label : string, optional
Key for the returned data, stored in `adata.uns`.
Returns:
adata : AnnData object
Updated AnnData object with the results stored in `adata.uns ['spatial_lda']`.
Example:
```python
# Running the radius method
adata = sm.tl.spatial_lda (adata, num_motifs=10, radius=100)
```
"""
# Function
def spatial_lda_internal (adata_subset, x_coordinate,y_coordinate,phenotype,
method, radius, knn, imageid):
# Print which image is being processed
print('Processing: ' + str(np.unique(adata_subset.obs[imageid])))
# Create a DataFrame with the necessary inforamtion
data = pd.DataFrame({'x': adata_subset.obs[x_coordinate], 'y': adata_subset.obs[y_coordinate], 'phenotype': adata_subset.obs[phenotype]})
# Identify neighbourhoods based on the method used
# a) KNN method
if method == 'knn':
print("Identifying the " + str(knn) + " nearest neighbours for every cell")
tree = BallTree(data[['x','y']], leaf_size= 2)
ind = tree.query(data[['x','y']], k=knn, return_distance= False)
# b) Local radius method
if method == 'radius':
print("Identifying neighbours within " + str(radius) + " pixels of every cell")
kdt = BallTree(data[['x','y']], leaf_size= 2)
ind = kdt.query_radius(data[['x','y']], r=radius, return_distance=False)
# Map phenotype
phenomap = dict(zip(list(range(len(ind))), data['phenotype'])) # Used for mapping
for i in range(len(ind)):
ind[i] = [phenomap[letter] for letter in ind[i]]
if method == 'knn':
ind = ind.astype(str)
# return
return ind
# Subset a particular image if needed
if subset is not None:
adata_list = [adata[adata.obs[imageid] == subset]]
else:
adata_list = [adata[adata.obs[imageid] == i] for i in adata.obs[imageid].unique()]
# Apply function to all images
# Create lamda function
r_spatial_lda_internal = lambda x: spatial_lda_internal(adata_subset=x,
x_coordinate=x_coordinate,
y_coordinate=y_coordinate,
phenotype=phenotype,
method=method,
radius=radius,
knn=knn,
imageid=imageid)
all_data = list(map(r_spatial_lda_internal, adata_list)) # Apply function
# combine all the data into one
texts = np.concatenate( all_data, axis=0 ).tolist()
# LDA pre-processing
print ('Pre-Processing Spatial LDA')
# Create Dictionary
id2word = corpora.Dictionary(texts)
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# Build LDA model
print ('Training Spatial LDA')
try:
lda_model = gensim.models.ldamulticore.LdaMulticore(corpus=corpus,
id2word=id2word,
num_topics=num_motifs,
random_state=random_state,**kwargs)
except:
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=num_motifs,
random_state=random_state,**kwargs)
# Compute Coherence Score
print ('Calculating the Coherence Score')
coherence_model_lda = CoherenceModel(model=lda_model, texts=texts, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
# isolate the latent features
print ('Gathering the latent weights')
topic_weights = []
for row_list in lda_model[corpus]:
tmp = np.zeros(num_motifs)
for i, w in row_list:
tmp[i] = w
topic_weights.append(tmp)
# conver to dataframe
arr = pd.DataFrame(topic_weights, index=adata.obs.index).fillna(0)
arr = arr.add_prefix('Motif_')
# isolate the weights of phenotypes
pattern = "(\d\.\d+).\"(.*?)\""
cell_weight = pd.DataFrame(index=np.unique(adata.obs[phenotype]))
for i in range(0, len(lda_model.print_topics())):
level1 = lda_model.print_topics()[i][1]
tmp = pd.DataFrame(re.findall(pattern, level1))
tmp.index = tmp[1]
tmp = tmp.drop(columns=1)
tmp.columns = ['Motif_'+ str(i)]
cell_weight = cell_weight.merge(tmp, how='outer', left_index=True, right_index=True)
# fill zeros
cell_weight = cell_weight.fillna(0).astype(float)
# save the results in anndata object
adata.uns[label] = arr # save the weight for each cell
adata.uns[str(label)+'_probability'] = cell_weight # weights of each cell type
adata.uns[str(label)+'_model'] = lda_model
# return
return adata | [] |
2024-01-10 | amitpuri/LLM-Text-Completion-langchain | gradio-app.py | import os
import gradio as gr
import openai
import google.generativeai as palm
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import HumanMessage, SystemMessage, BaseOutputParser
#from dotenv import load_dotenv
#load_dotenv()
llm_api_options = ["OpenAI API","Azure OpenAI API","Google PaLM API", "Llama 2"]
TEST_MESSAGE = "Write an introductory paragraph to explain Generative AI to the reader of this content."
openai_models = ["gpt-4", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo",
"gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "text-davinci-003",
"text-davinci-002", "text-curie-001", "text-babbage-001", "text-ada-001"]
google_palm_models = ["models/text-bison-001", "models/chat-bison-001","models/embedding-gecko-001"]
temperature = 0.7
def compose_prompt():
template = ("You are a helpful assistant that answers this question.")
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
return chat_prompt
def azure_openai_text_completion(prompt: str,
model: str,
api_key: str,
azure_endpoint: str = None,
deployment_name: str = None
):
try:
openai_api_base = f"https://{azure_endpoint}.openai.azure.com"
chat_prompt = compose_prompt()
chat = AzureChatOpenAI(openai_api_type = "azure",
openai_api_key = api_key,
openai_api_base = openai_api_base,
deployment_name = deployment_name,
model = model,
temperature = temperature,
openai_api_version="2023-05-15")
llm_response = chat(
chat_prompt.format_prompt(
text=prompt
).to_messages()
)
return "", llm_response.content
except Exception as exception:
print(f"Exception Name: {type(exception).__name__}")
print(exception)
return f" openai_text_completion Error - {exception}", ""
def openai_text_completion(prompt: str,
model: str,
api_key: str
):
try:
chat = ChatOpenAI(openai_api_key=api_key,
model=model,
temperature=temperature)
chat_prompt = compose_prompt()
llm_response = chat(
chat_prompt.format_prompt(
text=prompt
).to_messages()
)
return "", llm_response.content
except Exception as exception:
print(f"Exception Name: {type(exception).__name__}")
print(exception)
return f" openai_text_completion Error - {exception}", ""
def palm_text_completion(google_palm_key: str, prompt: str, model: str):
try:
candidate_count = 1
top_k = 40
top_p = 0.95
max_output_tokens = 1024
palm.configure(api_key=google_palm_key)
defaults = {
'model': model,
'temperature': temperature,
'candidate_count': candidate_count,
'top_k': top_k,
'top_p': top_p,
'max_output_tokens': max_output_tokens,
'stop_sequences': [],
'safety_settings': [{"category":"HARM_CATEGORY_DEROGATORY","threshold":1},{"category":"HARM_CATEGORY_TOXICITY","threshold":1},{"category":"HARM_CATEGORY_VIOLENCE","threshold":2},{"category":"HARM_CATEGORY_SEXUAL","threshold":2},{"category":"HARM_CATEGORY_MEDICAL","threshold":2},{"category":"HARM_CATEGORY_DANGEROUS","threshold":2}],
}
response = palm.generate_text(
**defaults,
prompt=prompt
)
return "", response.result
except Exception as exception:
print(f"Exception Name: {type(exception).__name__}")
print(exception)
return f" palm_text_completion Error - {exception}", ""
def test_handler(optionSelection,
openai_key,
azure_openai_key,
azure_openai_api_base,
azure_openai_deployment_name,
google_generative_api_key,
prompt: str = TEST_MESSAGE,
openai_model_name: str ="gpt-4",
google_model_name: str ="models/text-bison-001"):
match optionSelection:
case "OpenAI API":
message, response = openai_text_completion(prompt,openai_model_name, openai_key)
return message, response
case "Azure OpenAI API":
message, response = azure_openai_text_completion(prompt,openai_model_name, azure_openai_key, azure_openai_api_base, azure_openai_deployment_name)
return message, response
case "Google PaLM API":
message, response = palm_text_completion(google_generative_api_key, prompt,google_model_name)
return message, response
case "Llama 2":
return f"{optionSelection} is not yet implemented!", ""
case _:
if optionSelection not in llm_api_options:
return ValueError("Invalid choice!"), ""
with gr.Blocks() as LLMDemoTabbedScreen:
with gr.Tab("Text-to-Text (Text Completion)"):
llm_options = gr.Radio(llm_api_options, label="Select one", info="Which service do you want to use?", value="OpenAI API")
with gr.Row():
with gr.Column():
test_string = gr.Textbox(label="Try String", value=TEST_MESSAGE, lines=5)
test_string_response = gr.Textbox(label="Response", lines=5)
test_string_output_info = gr.Label(value="Output Info", label="Info")
test_button = gr.Button("Try it")
with gr.Tab("API Settings"):
with gr.Tab("Open AI"):
openai_model = gr.Dropdown(openai_models, value="gpt-4", label="Model", info="Select one, for Natural language")
openai_key = gr.Textbox(label="OpenAI API Key", type="password")
with gr.Tab("Azure Open AI"):
with gr.Row():
with gr.Column():
azure_openai_key = gr.Textbox(label="Azure OpenAI API Key", type="password")
azure_openai_api_base = gr.Textbox(label="Azure OpenAI API Endpoint")
azure_openai_deployment_name = gr.Textbox(label="Azure OpenAI API Deployment Name")
with gr.Tab("Google PaLM API"):
with gr.Row():
with gr.Column():
google_model_name = gr.Dropdown(google_palm_models, value="models/text-bison-001", label="Model", info="Select one, for Natural language")
google_generative_api_key = gr.Textbox(label="Google Generative AI API Key", type="password")
test_button.click(
fn=test_handler,
inputs=[llm_options,
openai_key,
azure_openai_key,
azure_openai_api_base,
azure_openai_deployment_name,
google_generative_api_key,
test_string,
openai_model,
google_model_name],
outputs=[test_string_output_info, test_string_response]
)
if __name__ == "__main__":
LLMDemoTabbedScreen.launch() | [
"You are a helpful assistant that answers this question.",
"[PLACEHOLDER, PLACEHOLDER]",
"{text}"
] |
2024-01-10 | amitpuri/LLM-Text-Completion-langchain | text-completion.py | from dotenv import load_dotenv
load_dotenv()
import os
import os.path
import openai
from google.cloud import aiplatform
from google.oauth2 import service_account
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI, ChatGooglePalm, ChatVertexAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import HumanMessage, SystemMessage, BaseOutputParser
temperature:float = 0.7
openai_api_key = os.getenv("OPENAI_API_KEY")
azure_openai_key = os.getenv("AZURE_OPENAI_KEY")
azure_openai_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
azure_openai_deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME")
google_palm_key = os.getenv("GOOGLE_PALM_AI_API_KEY")
google_project_id = os.getenv("GOOGLE_PROJECT_ID")
prompt: str = "Write an introductory paragraph to explain Generative AI to the reader of this content."
template = ("You are a helpful assistant that answers this question.")
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
def openai_text_completion():
model:str = "gpt-4"
openai.api_version = '2020-11-07'
chat = ChatOpenAI(openai_api_key = openai_api_key,
model = model,
temperature = temperature)
llm_response = chat(
chat_prompt.format_prompt(
text = prompt
).to_messages())
return llm_response.content
def azureopenai_text_completion():
model:str = "gpt-4"
chat = AzureChatOpenAI(openai_api_type = "azure",
openai_api_key = azure_openai_key,
openai_api_base = azure_openai_endpoint,
deployment_name = azure_openai_deployment_name,
model = model,
temperature = temperature,
openai_api_version = "2023-05-15")
llm_response = chat(
chat_prompt.format_prompt(
text = prompt
).to_messages())
return llm_response.content
def google_palm_text_completion():
model = "models/text-bison-001"
chat = ChatGooglePalm(
google_api_key = google_palm_key,
model = model,
temperature = temperature)
llm_response = chat(
chat_prompt.format_prompt(
text = prompt
).to_messages())
return llm_response.content
def google_vertexAI_text_completion():
cred_file = 'gcp-cred.json'
if os.path.isfile(cred_file):
credentials = service_account.Credentials.from_service_account_file(cred_file)
location:str = "us-east1"
aiplatform.init(project=google_project_id,
location = location,
credentials = credentials)
model="models/chat-bison-001"
chat = ChatVertexAI(model=model,temperature = temperature)
llm_response = chat(
chat_prompt.format_prompt(
text = prompt
).to_messages())
return llm_response.content
else:
return "Error: unable to find GCP Vertex AI credential file!"
def main():
response = openai_text_completion()
print(response)
response = azureopenai_text_completion()
print(response)
response = google_palm_text_completion()
print(response)
response = google_vertexAI_text_completion()
print(response)
if __name__ == '__main__':
main()
| [
"You are a helpful assistant that answers this question.",
"Write an introductory paragraph to explain Generative AI to the reader of this content.",
"[PLACEHOLDER, PLACEHOLDER]",
"{text}"
] |
2024-01-10 | avgale/azure-cli | src~azure-cli-core~azure~cli~core~error_assistance.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import configparser
import json
import openai
import os
import shutil
from azure.cli.core._config import GLOBAL_CONFIG_PATH
from azure.cli.core.style import Style, print_styled_text
def error_assistance(command=None):
openai.api_key = os.getenv('AZURE_OPENAI_API_KEY') # Edit to genearalize and keep endpoint secure
openai.api_version = "2023-07-01-preview"
openai.api_type = "azure"
openai.api_base = os.getenv('ENDPOINT')
if openai.api_key is None or openai.api_key == '':
print("Azure OpenAI API Key for error assistance is not set.")
return None
if command is None:
return None
prompt = "Azure CLI Command: '" + command + "'\n This isn't working, why not?"
messages = [
{"role": "system", "content": "You receive an Azure CLI command that contains \
a syntax or command structure error. Find out what the error is and correct it, \
giving back a corrected command Azure CLI command to the user. \n \
Example with all the parameters missing: \n \
Azure CLI Command: storage account create \n \
Response:The resource group, name, and any other necessary parameters are missing. \n \
storage account create --resource-group <myResourceGroup> --name <Name>"},
{"role": "user", "content": prompt}
]
functions = [
{
"name": "error_response",
"description": "Receives an Azure CLI command that triggered an error \
and checks for any syntactical errors. Provides an explanation as to \
what the problem is as well as the corrected command with no additional text.",
"parameters": {
"type": "object",
"properties": {
"explanation": {
"type": "string",
"description": "The explanation of what the user did wrong in their initial command syntax \
(i.e. The --name flag is missing before the resource name.)"
},
"corrected_command": {
"type": "string",
"description": "The corrected command (i.e. az keyvault create \
--name <UniqueKeyvaultName> --resource-group <myResourceGroup> --location <eastus>)"
}
},
"required": ["explanation", "corrected_command"],
},
}
]
try:
response = openai.ChatCompletion.create(
deployment_id=os.getenv('DEPLOYMENT'),
messages=messages,
functions=functions,
function_call={"name": "error_response"},
temperature=0
)
except openai.error.OpenAIError as exception:
print("An error occurred calling Azure OpenAI: ", exception)
return None
return response
def print_error_assistance(response):
args = response['choices'][0]['message']['function_call']['arguments']
arg_json = json.loads(args)
explanation = arg_json['explanation']
corrected_command = validate_command(arg_json['corrected_command'])
print("\n")
print_line()
print_styled_text([(Style.ERROR, "Issue: ")])
print(explanation)
print("\n")
print_styled_text([(Style.ERROR, "Corrected Command: ")])
print(corrected_command)
print_line()
print("\n")
def validate_command(command_response):
# Incorporate syntax validation here
# if command syntax is correct:
return command_response
# else:
# return "No command available."
def print_line():
console_width = shutil.get_terminal_size().columns
dashed_line = "-" * console_width
print_styled_text([(Style.ERROR, dashed_line)])
def error_enabled():
return get_config()
def get_config():
config = configparser.ConfigParser()
try:
config.read(GLOBAL_CONFIG_PATH, encoding='utf-8')
except configparser.Error as exception:
print(f"Error reading config file: {exception}")
return False
return str_to_bool(config.get('core', 'error_assistance', fallback=False)) \
or str_to_bool(config.get('interactive', 'error_assistance', fallback=False))
def str_to_bool(string):
if string == 'True' or string == 'true':
return True
return False
| [
"Azure CLI Command: 'PLACEHOLDER'\n This isn't working, why not?",
"You receive an Azure CLI command that contains a syntax or command structure error. Find out what the error is and correct it, giving back a corrected command Azure CLI command to the user. \n Example with all the parameters missing: \n Azure CLI Command: storage account create \n Response:The resource group, name, and any other necessary parameters are missing. \n storage account create --resource-group <myResourceGroup> --name <Name>"
] |
2024-01-10 | wg-dev35/csvbot | csvbot.py |
#imports
import streamlit as st
from streamlit_chat import message
import tempfile
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import CTransformers
from langchain.chains import ConversationalRetrievalChain
#Vector DB storage
DATA_PATH = "E:/LLMS/data/Recipie-db"
DB_FAISS_PATH = "E:/LLMS/vectorstores/db_faiss"
#model loading
def load_llm():
llm = CTransformers(
model = 'E:/LLMS/mistral-7b-instruct-v0.1.Q8_0.gguf',
model_type = "mistral",
max_new_tokens = 512,
temperature = 0.5
)
return llm
#ui
st.title("CSV chat using mistral 7b")
st.markdown("<h1 style='text-align: center; color: blue;'>CSV Bot with mistral</h1> :atom_symbol:", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center; color: grey;'>Repurposed by <a href='https://github.com/wg-dev35'>Elgooey</a> for learning and developing skills</h3>", unsafe_allow_html=True)
#file handling
user_upload = st.sidebar.file_uploader("Upload file...", type="csv")
#ui logic
if user_upload:
st.write("file uloaded") #debug
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(user_upload.getvalue())
tmp_file_path = tmp_file.name
loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8", csv_args={'delimiter': ','})
data = loader.load()
embeds = HuggingFaceEmbeddings(model_name = 'sentence-transformers/all-MiniLM-L6-v2', model_kwargs = {'device':'cpu'})##embeds inputs using cpu
db = FAISS.from_documents(data, embeds)
db.save_local(DB_FAISS_PATH)
chain = ConversationalRetrievalChain.from_llm(llm=load_llm(), retriever=db.as_retriever())
#chat logic
def chats(query):
result = chain({"question": query,"chat_history":st.session_state['history']})
st.session_state['history'].append((query, result["answer"]))
return result['answer']
#initializing chat history
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'generated' not in st.session_state:
st.session_state['generated'] = ["Hello, what can i get for you from " + user_upload.name + ":hugs:"]
if 'past' not in st.session_state:
st.session_state['past'] = ["Hey :wave:"]
#chat history
hist_container = st.container()
#user history
container = st.container()
with container:
with st.form(key="test_form", clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Retreive from file", key='input')
submitbtn = st.form_submit_button(label="Chat")
if submitbtn and user_input:
output = chats(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with hist_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user', avatar_style="big-smile")
message(st.session_state['generated'][i], key=str(i),avatar_style="thumbs") | [] |
2024-01-10 | TheGali/BiasDetector | NewNEw~play.py | import streamlit as st
import openai
import os
import re
import requests
import torch
from bs4 import BeautifulSoup
from sentence_transformers import SentenceTransformer, util
# Reading the prompt from a text file
with open('long_prompt.txt', 'r') as f:
long_prompt = f.read().strip()
# Initialize OpenAI API
# Note: Use environment variables for API keys for security reasons.
openai.api_key = os.getenv("OPENAI_API_KEY")
def summarize_article_with_openai(url):
# Scrape the article
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
paragraphs = soup.find_all('p')
article_text = ' '.join([p.text for p in paragraphs])
# Extract most relevant sentences using Sentence Transformers
model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
sentences = article_text.split('. ')
embeddings = model.encode(sentences, convert_to_tensor=True)
query_embedding = model.encode("Summary", convert_to_tensor=True)
cos_scores = util.pytorch_cos_sim(query_embedding, embeddings)[0]
top_results = torch.topk(cos_scores, k=10)
summarized_text = ' '.join([sentences[i] for i in top_results.indices])
# Refine summary using OpenAI GPT
prompt = f"{long_prompt} {summarized_text}"
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "user",
"content": prompt
}
],
max_tokens=4000
)
return response['choices'][0]['message']['content']
def score_article(arguments):
prompt = f"read this sumamry of an article and score it 1-100 in political neutrality: {arguments} (return only the number)"
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "user",
"content": prompt
}
],
max_tokens=4000
)
return response['choices'][0]['message']['content']
# Title with large font and style
st.markdown("<h1 style='text-align: center; color: blue;'>🔬 Thoughts Laboratory 🔬</h1>", unsafe_allow_html=True)
# Dynamic Textbox for Surveyor
statement = st.text_input('Enter your URL:', '')
# Generate arguments and chart only when a statement is entered
if statement:
arguments = summarize_article_with_openai(statement) # Assuming you have this function defined
try:
score = float(score_article(arguments))*0.7
except:
score = 0.0
# Display the gauge
st.write(f'Neutrality Score: {score}%')
# Color code
if score < 33:
color = 'red' # Biased
elif score < 66:
color = 'yellow' # Somewhat neutral
else:
color = 'green' # Neutral
gauge_html = f'''
<div style="width:100%; background-color:lightgray;">
<div style="width:{score}%; background-color:{color}; text-align:center;">
{score}%
</div>
</div>
'''
st.markdown(gauge_html, unsafe_allow_html=True)
st.title("Summary:")
st.write(arguments) | [
"PLACEHOLDER PLACEHOLDER",
"read this sumamry of an article and score it 1-100 in political neutrality: PLACEHOLDER (return only the number)"
] |
2024-01-10 | TheGali/BiasDetector | NewNEw~Newer.py | import streamlit as st
import openai
import os
import re
# Initialize OpenAI API
# Note: Use environment variables for API keys for security reasons.
openai.api_key = os.getenv("OPENAI_API_KEY")
# Function to generate jokes based on the statement
def generate_joke(statement):
prompt = f"Write a joke in the style of the late Mitch Hedberg with this topic : {statement}"
response = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=[
{
"role": "user",
"content": prompt
}
],
temperature=0.7,
max_tokens=6850,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['message']['content']
# Title with large font and style
st.markdown("<h1 style='text-align: center; color: blue;'>🔬 Thoughts Laboratory 🔬</h1>", unsafe_allow_html=True)
# Dynamic Textbox for Surveyor
statement = st.text_input('Enter your problem with society here:', '')
# Generate arguments and chart only when a statement is entered
if statement:
arguments = generate_joke(statement) # Assuming you have this function defined
st.title("Joke:")
st.write(arguments)
| [
"Write a joke in the style of the late Mitch Hedberg with this topic : PLACEHOLDER"
] |
2024-01-10 | TheGali/BiasDetector | play.py | import streamlit as st
import openai
import os
import re
import requests
import torch
from bs4 import BeautifulSoup
from sentence_transformers import SentenceTransformer, util
# Reading the prompt from a text file
with open('long_prompt.txt', 'r') as f:
long_prompt = f.read().strip()
# Initialize OpenAI API
# Note: Use environment variables for API keys for security reasons.
openai.api_key = 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
def summarize_article_with_openai(url):
# Scrape the article
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
paragraphs = soup.find_all('p')
article_text = ' '.join([p.text for p in paragraphs])
# Extract most relevant sentences using Sentence Transformers
model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
sentences = article_text.split('. ')
embeddings = model.encode(sentences, convert_to_tensor=True)
query_embedding = model.encode("Summary", convert_to_tensor=True)
cos_scores = util.pytorch_cos_sim(query_embedding, embeddings)[0]
top_results = torch.topk(cos_scores, k=10)
summarized_text = ' '.join([sentences[i] for i in top_results.indices])
# Refine summary using OpenAI GPT
prompt = f"{long_prompt} {summarized_text}"
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "user",
"content": prompt
}
],
max_tokens=4000
)
return response['choices'][0]['message']['content']
def score_article(arguments):
prompt = f"read this sumamry of an article and score it 1-100 in political neutrality: {arguments} (return only the number)"
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "user",
"content": prompt
}
],
max_tokens=4000
)
return response['choices'][0]['message']['content']
# Title with large font and style
st.markdown("<h1 style='text-align: center; color: blue;'>🔬 Thoughts Laboratory 🔬</h1>", unsafe_allow_html=True)
# Dynamic Textbox for Surveyor
statement = st.text_input('Enter your URL:', '')
# Generate arguments and chart only when a statement is entered
if statement:
arguments = summarize_article_with_openai(statement) # Assuming you have this function defined
try:
score = float(score_article(arguments))*0.7
except:
score = 0.0
# Display the gauge
st.write(f'Neutrality Score: {score}%')
# Color code
if score < 33:
color = 'red' # Biased
elif score < 66:
color = 'yellow' # Somewhat neutral
else:
color = 'green' # Neutral
gauge_html = f'''
<div style="width:100%; background-color:lightgray;">
<div style="width:{score}%; background-color:{color}; text-align:center;">
{score}%
</div>
</div>
'''
st.markdown(gauge_html, unsafe_allow_html=True)
st.title("Summary:")
st.write(arguments)
| [
"PLACEHOLDER PLACEHOLDER",
"read this sumamry of an article and score it 1-100 in political neutrality: PLACEHOLDER (return only the number)"
] |
2024-01-10 | TheGali/BiasDetector | NewNEw~test2.py | import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
import openai
import os
import random
import re
# Initialize OpenAI API
# Note: Use environment variables for API keys for security reasons.
openai.api_key = os.getenv("OPENAI_API_KEY")
# Function to generate counterarguments based on the statement
def generate_counterarguments(statement):
prompt = f"List the top 3 arguments that someone might use to prove the statement, rank them by the arguments strength (give the argument a Power-Score of 1-100 subtracting points for logical fallacies and cognitive distortions in the arguments, annotate the fallacies and cognitive distortions found, list the subtractions), steel man everything and assume the best of the opposing view : {statement}"
counter_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=1,
max_tokens=2500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
counterarguments_raw = counter_response['choices'][0]['message']['content'].strip()
counterarguments_list = counterarguments_raw.split('\n\n') # Assuming each counterargument is separated by two newlines
return counterarguments_list[:3] # Get only the top 3 counterarguments
# Function to generate random percentages
def generate_random_percentages(num):
return [random.randint(1, 100) for _ in range(num)]
# Title
st.title("Galileo's Nuance finder")
# Dynamic Textbox for Surveyor
statement = st.text_input('Enter your Statement here:', '')
# Generate counterarguments and chart only when a statement is entered
if statement:
counterarguments = generate_counterarguments(statement)
agreement_percentages = generate_random_percentages(len(counterarguments))
st.title("Here are some counterarguments:")
for counterargument, percentage in zip(counterarguments, agreement_percentages):
score_match = re.search(r'Score: (\d+)', counterargument)
if score_match:
logic_score = score_match.group(1)
modified_counterargument = counterargument + f" (Agreement: {percentage}%)"
else:
modified_counterargument = f"{counterargument} (Agreement: {percentage}%)"
st.write(modified_counterargument)
# Generate random survey responses stratified by age
total_responses = 100
categories = ['Strongly Disagree', 'Disagree', 'Neutral', 'Agree', 'Strongly Agree']
age_groups = ['18-24', '25-34', '35-44', '45-54', '55+']
colors = ['#ADD8E6', '#90EE90', '#FFFF00', '#FFA500', '#FF0000']
stratified_responses = np.zeros((len(age_groups), len(categories)))
for i in range(len(age_groups)):
stratified_responses[i] = np.random.multinomial(total_responses // len(age_groups), [1/len(categories)]*len(categories))
# Create a stacked bar chart
fig, ax = plt.subplots(figsize=(12, 8))
bottom_values = np.zeros(len(categories))
for i, age_group in enumerate(age_groups):
ax.bar(categories, stratified_responses[i], bottom=bottom_values, label=f'Age {age_group}', color=colors[i], alpha=0.8)
bottom_values += stratified_responses[i]
ax.set_ylabel('Number of Responses')
ax.set_xlabel('Response Categories')
ax.set_title('Stratified Distribution of Answers by Age Group (Stacked)')
ax.legend()
ax.grid(axis='y')
st.pyplot(fig)
| [
"List the top 3 arguments that someone might use to prove the statement, rank them by the arguments strength (give the argument a Power-Score of 1-100 subtracting points for logical fallacies and cognitive distortions in the arguments, annotate the fallacies and cognitive distortions found, list the subtractions), steel man everything and assume the best of the opposing view : PLACEHOLDER"
] |
2024-01-10 | TheGali/BiasDetector | NewNEw~testgali.py | import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
import openai
import os
# Initialize OpenAI API
openai.api_key = "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"
openai.api_key = os.getenv("OPENAI_API_KEY")
# Function to generate image prompt based on the statement
def generate_image_prompt(statement):
prompt = f"A one sentence description of a generic family friendly movie scene with this topic: {statement}"
image_prompt_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
max_tokens=400,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
image_prompt = image_prompt_response['choices'][0]['message']['content'].strip()
return image_prompt
# Function to generate counterarguments based on the statement
def generate_counterarguments(statement):
prompt = f"Create a list of the top 3 counterarguments to the statement, rank them by the arguments logical strength (give the argument a score of 1-100 subtracting points for logical fallacies and cognitive distortions in the arguments, annotate the fallacies and distortions found), : {statement}"
counter_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=1,
max_tokens=2500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
counterarguments = counter_response['choices'][0]['message']['content'].strip().split('\n')
return counterarguments
# Title
st.title("Galileo's Nuance finder")
# Dynamic Textbox for Surveyor
statement = st.text_input('Enter your Statement here:', '')
# Generate counterarguments, image prompt, and AI-created image
if statement:
counterarguments = generate_counterarguments(statement)
st.title("Here are some counterarguments:")
for counterargument in counterarguments:
st.write(counterargument)
image_prompt = generate_image_prompt(statement)
image_response = openai.Image.create(
prompt=image_prompt,
n=1,
size="1024x1024"
)
image_url = image_response['data'][0]['url']
st.image(image_url, caption=f"AI-generated image related to the topic: {statement}")
# Generate random survey responses stratified by age
total_responses = 100
categories = ['Strongly Disagree', 'Disagree', 'Neutral', 'Agree', 'Strongly Agree']
age_groups = ['18-24', '25-34', '35-44', '45-54', '55+']
colors = ['#ADD8E6', '#90EE90', '#FFFF00', '#FFA500', '#FF0000']
stratified_responses = np.zeros((len(age_groups), len(categories)))
for i in range(len(age_groups)):
stratified_responses[i] = np.random.multinomial(total_responses // len(age_groups), [1/len(categories)]*len(categories))
# Create a stacked bar chart
fig, ax = plt.subplots(figsize=(12, 8))
bottom_values = np.zeros(len(categories))
for i, age_group in enumerate(age_groups):
ax.bar(categories, stratified_responses[i], bottom=bottom_values, label=f'Age {age_group}', color=colors[i], alpha=0.8)
bottom_values += stratified_responses[i]
ax.set_ylabel('Number of Responses')
ax.set_xlabel('Response Categories')
ax.set_title('Stratified Distribution of Answers by Age Group (Stacked)')
ax.legend()
ax.grid(axis='y')
st.pyplot(fig)
| [
"A one sentence description of a generic family friendly movie scene with this topic: PLACEHOLDER",
"content",
"Create a list of the top 3 counterarguments to the statement, rank them by the arguments logical strength (give the argument a score of 1-100 subtracting points for logical fallacies and cognitive distortions in the arguments, annotate the fallacies and distortions found), : PLACEHOLDER",
"gpt-3.5-turbo"
] |
2024-01-10 | bfreeman12/FortuneBot | fortune_main.py | import discord
from discord import app_commands
from discord.ext import commands
from dotenv import load_dotenv
from os import environ
from random import Random,randrange
import random
from PIL import Image, ImageDraw, ImageFont
from math import ceil
from subprocess import getoutput
import aiohttp
import openai
from openai import OpenAI
#gathers token for running the bot
load_dotenv('token.env')
token = environ["DISCORD_TOKEN"]
GPT_API_KEY = environ['OPENAI_API_KEY']
client = OpenAI(api_key=environ['OPENAI_API_KEY'])
#defines prefix and intents
bot = commands.Bot(command_prefix="!", intents= discord.Intents.all())
#defined vibes to be referenced later in the vibe function
vibes = ['Chill vibes','Good vibes','Bad vibes','Wack','Meh','This is fine','Could be better','Could be worse']
#defined responses for the magic 8 ball function
magic_ball_responses = ('It is certain', 'It is decidedly so', 'Without a doubt', 'Yes, definitely',
'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good',
'Signs point to yes', 'Yes', 'Reply hazy, try again', 'Ask again later',
'Better not tell you now', 'Cannot predict now', 'Concentrate and ask again',
"Don't bet on it", 'My reply is no', 'My sources say no', 'Outlook not so good',
'Very doubtful')
#defined rock paper scissors winning combos
winning_combo = {
'rock':'scissors',
'paper':'rock',
'scissors':'paper'
}
#initiates the bot
@bot.event
async def on_ready():
print('The Oracle has awoken')
try:
synced = await bot.tree.sync()
print(f"Synced {len(synced)} command(s)")
except Exception as e:
print(e)
#gets a random vibe from the above list
@bot.tree.command(name='vibe',description='Vibe check')
async def vibe(interaction: discord.Interaction):
vibes_length = len(vibes)
choice_index = random.randint(0,vibes_length-1)
embed = discord.Embed(title='Vibe check', description=vibes[choice_index], color=discord.Color.random())
await interaction.response.send_message(embed=embed)
#this will run the fortune - cowsay command in local terminal and send the output as a message
@bot.tree.command(name='fortune',description='Get your fortune!')
async def fortune(interaction: discord.Interaction):
cowTypes = getoutput('/usr/games/cowsay -l')[37:]
cowTypes = cowTypes.split() # split into cowsay animals
typechoice = cowTypes[randrange(0, len(cowTypes), 1)]
# Use our choice to generate a cowsay
msg = getoutput('/usr/games/fortune | /usr/games/cowsay -f {}'.format(typechoice))
# Image generation: calculate length and width of image and instantiate
msgFont = ImageFont.truetype("UbuntuMono-Regular.ttf", 12)
msgDim = msgFont.getsize_multiline(msg)
msgImg = Image.new('RGB', (ceil(
msgDim[0] + 0.1*msgDim[0]), ceil(msgDim[1] + 0.1*msgDim[1])), (54, 57, 62, 0))
msgDraw = ImageDraw.Draw(msgImg)
msgDraw.text((16, 0), msg, fill=(255, 255, 255, 255), font=msgFont)
msgImg.save('/tmp/fortune.png')
embed=discord.Embed(title='The Oracle Says:', color=discord.Color.random())
file = discord.File('/tmp/fortune.png', filename='fortune.png')
embed.set_image(url="attachment://fortune.png")
await interaction.response.send_message(embed=embed, file=file)
#this will run the magic 8 ball
@bot.tree.command(name='8ball',description='Consult the magic 8 ball!')
async def magic_ball(interaction: discord.Interaction):
magic_answer = random.choice(magic_ball_responses)
embed=discord.Embed(title='The Oracle Says: ', description=magic_answer, color=discord.Color.random())
await interaction.response.send_message(embed=embed)
@bot.tree.command(name='flip',description='Flip a coin heads or tails!')
async def coin_flip(interaction: discord.Interaction):
results = {
0 : 'Heads',
1 : 'Tails'
}
index = randrange(0,2)
flip_result = results[index]
embed=discord.Embed(title='Coinflip!', description=f'You flipped: {flip_result}!', color=discord.Color.random())
await interaction.response.send_message(embed=embed)
#help command to list current functions
@bot.tree.command(name='help',description='Display the help message')
async def help_func(interaction: discord.Interaction):
embed=discord.Embed (title='Help!', description='''I can only do a few things at the moment:
/fortune: Will run the cowsay fortunes command!
/flip: Will flip a coin heads or tails Style!
/8ball: Will give a magic 8ball response!
/rps: </rps @anyone> in the server and reply to the dm with Rock Paper or Scissors
/aidraw prompt to have replicate generate an image
/askai question to get a response from chatGPT''', color=discord.Color.random())
await interaction.response.send_message(embed=embed)
@bot.tree.command(name='rps',description='@ another user and reply to the bots DM to play!')
async def rps(interaction: discord.Interaction, secondplayer:discord.Member):
#define styles for the embeds
embed_rps=discord.Embed(title='The Oracle says:',color=discord.Color.random())
embed_rps.set_thumbnail(url='https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSUNjdD-Vfq-_MZpu-KZpUdqmiXmqV4FcEr_lLmuCyyYsdA7r_MHhPh9dLVwSA2GQa9Bvg&usqp=CAU')
embed_dm=discord.Embed(title='The Oracle says:',color=discord.Color.random())
embed_dm.set_thumbnail(url='https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSUNjdD-Vfq-_MZpu-KZpUdqmiXmqV4FcEr_lLmuCyyYsdA7r_MHhPh9dLVwSA2GQa9Bvg&usqp=CAU')
embed_dm.add_field(name='Rock Paper Scissors!',value='Send Rock Paper or Scissors into chat below!')
try:
#define players and gather user id's
player1_name = interaction.user
player1 = interaction.user.id
player2 = secondplayer.id
#checks if both players are from the same instance
async def check_player1(message):
return message.author.id == player1 and isinstance(message.channel, discord.DMChannel)
async def check_player2(message):
return message.author.id == player2 and isinstance(message.channel, discord.DMChannel)
#this entire function is cancer and I don't know how to simplify
async def rps_game():
#send first player inital message and stores choice
player1_message = await player1_name.create_dm()
await player1_message.send(embed=embed_dm)
player1_choice = await bot.wait_for('message', check=check_player1)
player1_compare = player1_choice.content.lower()
#these while loops ensure the key being passed into the winner check is a valid key in the list
while player1_compare != 'rock' and player1_compare != 'paper' and player1_compare != 'scissors':
await player1_message.send('Choose again')
player1_choice = await bot.wait_for('message', check=check_player1)
player1_compare = player1_choice.content.lower()
if player1_compare == 'rock' or player1_compare == 'paper' or player1_compare == 'scissors':
break
#send second player initial message and stores choice
player2_message = await secondplayer.create_dm()
await player2_message.send(embed=embed_dm)
player2_choice = await bot.wait_for('message', check=check_player2)
player2_compare = player2_choice.content.lower()
#these while loops ensure the key being passed into the winner check is a valid key in the list
while player2_compare != 'rock' and player2_compare != 'paper' and player2_compare != 'scissors':
await player2_message.send('Choose again')
player2_choice = await bot.wait_for('message', check=check_player2)
player2_compare = player2_choice.content.lower()
if player2_compare == 'rock' or player2_compare == 'paper' or player2_compare == 'scissors':
break
#the actual game logic
if player1_compare == player2_compare:
embed_rps.add_field(name='Draw!', value="\u200b",inline=False)
elif player1_compare == winning_combo[player2_compare]:
embed_rps.add_field(name=f'{secondplayer} Won!', value="\u200b",inline=False)
elif player2_compare == winning_combo[player1_compare]:
embed_rps.add_field(name=f'{player1_name} Won!', value="\u200b",inline=False)
else:
print('error')
embed_rps.add_field(name=f'{player1_name}',value=f"<@{player1}>\nChose: {player1_compare}")
embed_rps.add_field(name=f'{secondplayer}',value=f"<@{player2}>\nChose: {player2_compare}")
await interaction.channel.send(embed=embed_rps)
await rps_game()
except Exception as e:
print(e)
@bot.tree.command(name='askai',description='Ask a question to ChatGPT!')
async def ask_ai(interaction: discord.Interaction, question:str):
channel = interaction.channel
msg_embed = discord.Embed(title='GPT4 Says:',description=f"{question}\n> Generating...")
msg = await interaction.response.send_message(embed=msg_embed)
completion = client.chat.completions.create(
model="gpt-4",
messages=[{"role":"user","content":question}]
)
response = completion.choices[0].message.content
embed = discord.Embed(title='ChatGPT Says:', description=response)
embed.set_footer(text=question)
await interaction.edit_original_response(embed=embed)
@bot.tree.command(name='aidraw',description='Provide a prompt to Dall-e-3 to generate an image!')
async def ai_draw(interaction:discord.Interaction, prompt:str):
try:
channel = interaction.channel
msg_embed = discord.Embed(title='Dall-e is drawing:', description=f"\n{prompt}\n> Generating...")
msg = await interaction.response.send_message(embed=msg_embed)
response = client.images.generate(
model="dall-e-3",
prompt=prompt,
size="1024x1024",
quality="standard",
n=1,
)
image_url = response.data[0].url
response_embed = discord.Embed(title='Your masterpiece',description=interaction.user.mention)
response_embed.set_image(url=image_url)
response_embed.set_footer(text=prompt)
await interaction.edit_original_response(embed=response_embed)
except Exception as e:
if str(e) == 'Your request was rejected as a result of our safety system. Your prompt may contain text that is not allowed by our safety system.':
await channel.send(interaction.user.mention+'\n'+str(e))
bot.run(token)
| [] |
2024-01-10 | prapti-ai/prapti | prapti~plugins~endpoints~local_openai_chat_responder.py | """
Generate responses using OpenAI-compatible chat APIs, tailored for local LLMs.
Unlike the `openai.chat` responder, this version does not use or expose the
OPENAI_API_KEY environment variable, and does not perform any operations specific
to OpenAI models, such as using OpenAI token counters.
"""
import datetime
import inspect
import json
from enum import Enum
from typing import AsyncGenerator, Any
import asyncio
from contextlib import suppress
from pydantic import BaseModel, Field, ConfigDict
from cancel_token import CancellationToken
import openai
from . import openai_globals # ensure openai globals are saved, no matter which module is loaded first
from ...core.plugin import Plugin, PluginCapabilities, PluginContext
from ...core.command_message import Message
from ...core.configuration import VarRef, resolve_var_refs
from ...core.responder import Responder, ResponderContext
from ...core.logger import DiagnosticsLogger
# openai chat API docs:
# https://platform.openai.com/docs/guides/chat/introduction
# basic example:
# https://gist.github.com/pszemraj/c643cfe422d3769fd13b97729cf517c5
# chat API parameters --------------------------------------------------------
class LocalOpenAIChatResponderConfiguration(BaseModel):
"""Configuration parameters for the local OpenAI-compatible API chat responder.
See the [OpenAI chat completions documentation](https://platform.openai.com/docs/api-reference/chat/create) for more information."""
model_config = ConfigDict(
validate_assignment=True,
protected_namespaces=()) # pydantic config: suppress warnings about `model` prefixed names
api_base: str = Field(default="http://localhost:4891/v1", description=inspect.cleandoc("""\
URL of the local OpenAI API endpont. Default values for some compatible servers:
GPT4ALL: http://localhost:4891/v1, vLLM: http://localhost:8000/v1, LocalAI: http://localhost:8080/v1 .
For others, check your server settings."""))
model: str = Field(default_factory=str, description=inspect.cleandoc("""\
ID of the model to use."""))
# messages: list|None = None (not available in configuration, injected by the responder)
temperature: float = Field(default=1.0, description=inspect.cleandoc("""\
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more focused and deterministic.
We generally recommend altering this or `top_p` but not both."""))
top_p: float = Field(default=1.0, description=inspect.cleandoc("""\
An alternative to sampling with temperature, called nucleus sampling,
where the model considers the results of the tokens with top_p probability mass.
So 0.1 means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both."""))
n: int = Field(default=1, description=inspect.cleandoc("""\
How many chat completion choices to generate for each input message."""))
stream: bool = Field(default=False, description=inspect.cleandoc("""\
If set, partial message fragments will be returned progressively as they are generated."""))
stop: str|list[str]|None = Field(default=None, description=inspect.cleandoc("""\
Up to 4 sequences where the API will stop generating further tokens."""))
max_tokens: int|None = Field(default=None, description=inspect.cleandoc("""\
The maximum number of tokens to generate in the chat completion.
The total length of input tokens and generated tokens is limited by the model's context length.
(GPT 3.5 Turbo max context: 4096 tokens)
When `max_tokens` is set to `null` no limit will be placed on the number of generated tokens,
except for that imposed by the model's context length."""))
presence_penalty: float = Field(default=0.0, description=inspect.cleandoc("""\
Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,
increasing the model's likelihood to talk about new topics."""))
frequency_penalty: float = Field(default=0.0, description=inspect.cleandoc("""\
Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim."""))
# logit_bias (not currently supported)
# probably want to allow a dict[str,float] with the str causing a lookup to the token id, which is what the api expects
user: str|None = Field(default=None, description=inspect.cleandoc("""\
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse."""))
# TODO: add vLLM beam search option
# ----------------------------------------------------------------------------
def convert_message_sequence_to_openai_messages(message_sequence: list[Message], log: DiagnosticsLogger) -> list[dict]:
result = []
for message in message_sequence:
if not message.is_enabled or message.is_hidden:
continue # skip disabled and hidden messages
if message.role not in ["system", "user", "assistant"]:
log.warning("unrecognised-public-role", f"message will not be included in LLM prompt. public role '{message.role}' is not recognised.", message.source_loc)
continue
assert len(message.content) == 1 and isinstance(message.content[0], str), "expected flattened message content"
m = {
"role": message.role,
"content": message.content[0]
}
if message.name: # only include name field if a name was specified
# NOTE: as of July 1 2023, this seems to only get passed to the LLM for system and user messages
m["name"] = message.name
result.append(m)
return result
class QueueSentinel(Enum):
END_OF_RESPONSE = 0
async def _cancel_async_generator(agen: AsyncGenerator) -> None:
# https://stackoverflow.com/questions/60226557/how-to-forcefully-close-an-async-generator
task = asyncio.create_task(agen.__anext__())
task.cancel()
with suppress(asyncio.CancelledError):
await task
async def _stream_chunks_task(chunks: AsyncGenerator[dict, None], queues: list[asyncio.Queue[str|QueueSentinel]]) -> None:
"""receive chunks from the async generator returned by openai.ChatCompletion.acreate
and route each chunk into the appropriate queue. this part of the receive process is cancelable.
"""
async for chunk in chunks:
if chunk["choices"]:
choices = chunk["choices"]
for choice in choices:
index = choice["index"]
delta = choice["delta"]
if "content" in delta:
if bool(delta_content := delta["content"]):
queues[index].put_nowait(delta_content)
async def _streaming_receive_task(chunks: AsyncGenerator[dict, None], queues: list[asyncio.Queue[str|QueueSentinel]], cancellation_token: CancellationToken) -> None:
"""stream chunks from the async generator returned by openai.ChatCompletion.acreate
and route to queues using _stream_chunks_task.
handle cancellation and ensure that all queues are terminated with a END_OF_RESPONSE item
both at the end of processing and if cancelled."""
try:
receive_chunks_task = asyncio.create_task(_stream_chunks_task(chunks, queues))
cancellation_token.on_cancel(lambda: _discard_arg(receive_chunks_task.cancel()))
with suppress(asyncio.CancelledError):
await receive_chunks_task
if cancellation_token.cancelled:
# cancel the source, i.e. the AsyncGenerator that openai.ChatCompletion.acreate gave us
with suppress(asyncio.CancelledError):
await _cancel_async_generator(chunks)
finally:
# terminate the queues. note that this needs to happen under all cancellation and error scenarios
for queue in queues:
queue.put_nowait(QueueSentinel.END_OF_RESPONSE)
async def _dequeue_async_content(queue) -> AsyncGenerator[str, None]:
while True:
s = await queue.get()
if s == QueueSentinel.END_OF_RESPONSE:
return
yield s
def _discard_arg(arg: Any) -> None:
return None
class LocalOpenAIChatResponder(Responder):
def construct_configuration(self, context: ResponderContext) -> BaseModel|tuple[BaseModel, list[tuple[str,VarRef]]]|None:
return LocalOpenAIChatResponderConfiguration(), [
("api_base", VarRef("local_openai_api_base")),
("model", VarRef("model")),
("temperature", VarRef("temperature")),
("n", VarRef("n")),
("stream", VarRef("stream"))]
async def _async_response_generator(self, input_: list[Message], cancellation_token: CancellationToken, context: ResponderContext) -> AsyncGenerator[Message, None]:
config: LocalOpenAIChatResponderConfiguration = context.responder_config
context.log.debug(f"input: {config = }", context.state.input_file_path)
config = resolve_var_refs(config, context.root_config, context.log)
context.log.debug(f"resolved: {config = }", context.state.input_file_path)
openai.api_key = "NONE"
openai.organization = "NONE"
openai.api_base = config.api_base
openai.api_type = "open_ai"
openai.api_version = None
messages = convert_message_sequence_to_openai_messages(input_, context.log)
context.log.debug(f"final: {config = }")
chat_args = config.model_dump(exclude_none=True, exclude_defaults=True)
chat_args["model"] = config.model # include model, even if it was left at default
context.log.debug(f"{chat_args = }")
chat_args["messages"] = messages
if context.root_config.prapti.dry_run:
context.log.info("openai.chat-dry-run", "dry run: halting before calling the OpenAI API", context.state.input_file_path)
current_time = str(datetime.datetime.now())
chat_args["messages"] = ["..."] # avoid overly long output
yield Message(role="assistant", name=None, content=[f"dry run mode. {current_time}\nchat_args = {json.dumps(chat_args)}"])
return
if cancellation_token.cancelled:
return
try:
# docs: https://platform.openai.com/docs/api-reference/chat/create
if config.stream:
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb
task = asyncio.create_task(openai.ChatCompletion.acreate(**chat_args))
cancellation_token.on_cancel(lambda: _discard_arg(task.cancel()))
chunks = None
with suppress(asyncio.CancelledError):
chunks = await task
if cancellation_token.cancelled or chunks is None:
return
queues: list[asyncio.Queue[str|QueueSentinel]] = [asyncio.Queue(maxsize=0) for _ in range(config.n)]
# ^^^ unbounded queues -- to avoid constraining caller's response iteration strategy
receive_task = asyncio.create_task(_streaming_receive_task(chunks, queues, cancellation_token))
if config.n == 1:
yield Message(role="assistant", name=None, content=[], async_content=_dequeue_async_content(queues[0]))
else:
for i, queue in enumerate(queues, start=1):
yield Message(role="assistant", name=str(i), content=[], async_content=_dequeue_async_content(queue))
await receive_task
else:
task = asyncio.create_task(openai.ChatCompletion.acreate(**chat_args))
cancellation_token.on_cancel(lambda: _discard_arg(task.cancel()))
response = None
with suppress(asyncio.CancelledError):
response = await task
if cancellation_token.cancelled or response is None:
return
assert isinstance(response, dict)
context.log.debug(f"{response = }", context.state.input_file_path)
if len(response["choices"]) == 1:
choice = response["choices"][0]
yield Message(role="assistant", name=None, content=[choice.message["content"]], async_content=None)
else:
for i, choice in enumerate(response["choices"], start=1):
yield Message(role="assistant", name=str(i), content=[choice.message["content"]], async_content=None)
except Exception as ex:
context.log.error("local-openai-chat-api-exception", f"exception while requesting a response from the API server: {repr(ex)}", context.state.input_file_path)
context.log.debug_exception(ex)
def generate_responses(self, input_: list[Message], cancellation_token: CancellationToken, context: ResponderContext) -> AsyncGenerator[Message, None]:
return self._async_response_generator(input_, cancellation_token, context)
class LocalOpenAIChatResponderPlugin(Plugin):
def __init__(self):
super().__init__(
api_version = "1.0.0",
name = "local.openai.chat",
version = "0.0.2",
description = "Responder for using local LLMs that offer the OpenAI-compatible Chat Completion API",
capabilities = PluginCapabilities.RESPONDER
)
def construct_responder(self, context: PluginContext) -> Responder|None:
return LocalOpenAIChatResponder()
prapti_plugin = LocalOpenAIChatResponderPlugin()
| [
"[]",
"content",
"None"
] |
2024-01-10 | RediatBrook/tezeta | tezeta~chats.py | from . import tokens
import pinecone
import openai
import os
import chromadb
import logging
vector_db_type = "chromadb"
pinecone_api_key = os.environ.get("PINECONE_API_KEY")
openai_api_key = os.environ.get("OPENAI_API_KEY")
chromadb_path = "./vector_db"
supported_vector_dbs = {"pinecone", "chromadb"}
pinecone_index_name = "tezeta-chats"
pinecone_environment = os.environ.get("PINECONE_ENVIRONMENT")
pinecone_dimensions = 1536
"""
This function ensures that messages are in the following format.
messages = [
{
"role" : "user" or "assistant" or "system",
"content" : "message content",
},
...
]
"""
def validate_messages(messages):
acceptable_roles = {"user", "assistant", "system", "function"}
if not isinstance(messages, list):
raise ValueError("messages must be a list")
for message in messages:
if not isinstance(message, dict):
raise ValueError("Each message must be a dictionary")
if "role" not in message or "content" not in message:
raise ValueError("Each message dictionary must have 'role' and 'content' keys")
if message["role"] not in acceptable_roles:
raise ValueError(f"Invalid role {message['role']}. Role must be one of {acceptable_roles}")
return True
def get_embedding(message):
content = message
response = openai.Embedding.create(
input = content,
model = "text-embedding-ada-002"
)
embedding = response['data'][0]['embedding']
return embedding
def rank_and_return_pinecone_messages(messages, new_chat):
# Initialize Pinecone
pinecone.init(api_key=pinecone_api_key, environment=pinecone_environment)
# Create a Pinecone Index instance
index = pinecone.Index(pinecone_index_name)
new_chat_length = tokens.count_text_tokens(new_chat['content'])
# # Upsert vectors to Pinecone and keep a mapping from IDs to messages
id_to_message = {}
for i, msg in enumerate(messages):
id_to_message[str(i)] = msg
embedding = get_embedding(msg['content'])
index.upsert(vectors=[{
'id': str(i),
'values' : embedding,
'metadata':{'role': msg['role'], 'content': msg['content']},
}])
print(id_to_message)
# # Query Pinecone for the top matches
new_chat_embedding = get_embedding(new_chat['content'])
query_response = index.query(vector=new_chat_embedding, top_k=len(messages))
# Filter results to fit within the max_context_window
sorted_results = []
total_tokens = 0
for res in query_response.matches:
if(res.metadata==None):
continue
original_message = res.metadata['content']
tokens_in_result = tokens.count_text_tokens(original_message)
if total_tokens + tokens_in_result + new_chat_length <= tokens.max_tokens:
role = res.metadata['role']
sorted_results.append({'role': role, 'content': original_message})
total_tokens += tokens_in_result
else:
break
# Sort results based on their original order
sorted_results = sorted(sorted_results, key=lambda x: messages.index(x))
sorted_results.append(new_chat)
return sorted_results
def rank_and_return_chromadb_messages(messages, new_chat):
if not os.path.exists(chromadb_path):
os.mkdir(chromadb_path)
logging.info(f"Created chromadb directory at {chromadb_path}")
else:
logging.info(f"Chromadb directory already exists at {chromadb_path}")
client = chromadb.PersistentClient(path=chromadb_path)
collection = client.get_or_create_collection('tezeta-chats')
new_chat_length = tokens.count_text_tokens(new_chat['content'])
sorted_results = []
total_tokens = 0
for i, msg in enumerate(messages):
collection.upsert(
documents=[msg['content']],
ids=[str(i)],
metadatas={'role': msg['role']}
)
results = collection.query(
query_texts=[new_chat['content']],
n_results=len(messages)
)
for i,res in enumerate(results['documents'][0]):
original_message = res
tokens_in_result = tokens.count_text_tokens(original_message)
if total_tokens + tokens_in_result + new_chat_length <= tokens.max_tokens:
role = results['metadatas'][i][0]['role']
sorted_results.append({'role': role, 'content': original_message})
total_tokens += tokens_in_result
else:
break
sorted_results = sorted(sorted_results, key=lambda x: messages.index(x))
sorted_results.append(new_chat)
return sorted_results
def fit_messages(messages):
if validate_messages(messages):
if(tokens.count_chat_tokens(messages) < tokens.max_tokens):
return messages
previous_messages = messages[:-1]
new_message = messages[-1]
if vector_db_type == "pinecone":
return rank_and_return_pinecone_messages(previous_messages, new_message)
elif vector_db_type == "chromadb":
return rank_and_return_chromadb_messages(previous_messages, new_message)
return messages | [
"content"
] |
2024-01-10 | levonrush/footy-tipper | pipeline~use-predictions~functions~sending_functions.py | import os
import pandas as pd
# for google
from google.oauth2 import service_account
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload
import gspread
from google.oauth2 import service_account
# for reg
from langchain.llms import OpenAI
# for emails
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# The 'get_tipper_picks' function calculates the odds threshold for both home and away teams and then selects the home and away teams based on their predicted results.
def get_tipper_picks(predictions, prod_run=False):
# Calculate odds thresholds for home and away teams
predictions['home_odds_thresh'] = 1 / predictions['home_team_win_prob']
predictions['away_odds_thresh'] = 1 / predictions['home_team_lose_prob']
# Select home teams that are predicted to win and rename the columns accordingly.
home_picks = predictions[predictions['home_team_result'] == 'Win'][['team_home', 'team_head_to_head_odds_home', 'home_odds_thresh']].copy()
home_picks.rename(columns={'team_home': 'team', 'team_head_to_head_odds_home': 'price', 'home_odds_thresh': 'price_min'}, inplace=True)
# Select away teams that are predicted to lose and rename the columns accordingly.
away_picks = predictions[predictions['home_team_result'] == 'Loss'][['team_away', 'team_head_to_head_odds_away', 'away_odds_thresh']].copy()
away_picks.rename(columns={'team_away': 'team', 'team_head_to_head_odds_away': 'price', 'away_odds_thresh': 'price_min'}, inplace=True)
# Concatenate the home and away picks and filter rows where 'price' is more than 15% of 'price_min'.
tipper_picks = pd.concat([home_picks, away_picks])
tipper_picks = tipper_picks[tipper_picks['price'] > (tipper_picks['price_min'] * 1.05)]
return tipper_picks
# The 'upload_df_to_drive' function uploads a pandas DataFrame to Google Drive as a CSV file.
def upload_df_to_drive(df, json_path, folder_id, filename):
# Load the credentials from the service_account.json
creds = service_account.Credentials.from_service_account_file(json_path)
# Build the Google Drive service
drive_service = build('drive', 'v3', credentials=creds)
# Save your dataframe to CSV
df.to_csv(filename, index=False)
# Upload the file
file_metadata = {
'name': f"round{df['round_id'].unique()[0]}_{df['competition_year'].unique()[0]}.csv",
'parents': [folder_id]
}
media = MediaFileUpload(filename, mimetype='text/csv')
file = drive_service.files().create(body=file_metadata, media_body=media, fields='id').execute()
print('File ID:', file.get('id'))
# Delete the local file after upload
os.remove(filename)
# The 'generate_reg_regan_email' function generates an email content with the help of an AI language model (OpenAI). The email contains a synopsis of NRL games and some value tips.
def generate_reg_regan_email(predictions, tipper_picks, api_key, folder_url):
# Set up the OpenAI model using provided API key and model parameters
llm = OpenAI(openai_api_key=api_key,
model_name="gpt-3.5-turbo-16k",
max_tokens=15000,
temperature=1.1)
# Generate input_predictions string by iterating over 'predictions' dataframe and formatting data into string
input_predictions = ""
for index, row in predictions.iterrows():
input_predictions += f"""
Round Name: {row['round_name']},
Home Team Result: {row['home_team_result']},
Home Team: {row['team_home']},
Home Team Position: {row['position_home']},
Home Team Head to Head Price: {row['team_head_to_head_odds_home']}
Away Team: {row['team_away']},
Away Team Position: {row['position_away']},
Away Team Head to Head Price: {row['team_head_to_head_odds_away']}
"""
# Generate input_picks string by iterating over 'tipper_picks' dataframe and formatting data into string
input_picks = ""
for index, row in tipper_picks.iterrows():
input_picks += f"""
Team: {row['team']},
Price: {row['price']}
"""
# Generate the prompt string to be used with the AI model
prompt = f"""
I have a set of predictions for NRL games in {predictions['round_name'].unique()[0]} {predictions['competition_year'].unique()[0]} made by a machine learning pipeline called the Footy Tipper: \n{input_predictions}\n
The description of the columns of interest is:
* Home Team Result: the predicted result of the home team
* Home Team: the home team
* Home Team Position: the home team's position on the NRL ladder
* Home Team Head to Head Price: the price bookies are offering for a home win
* Away Team: the away team
* Away Team Position: the away team's position on the NRL ladder
* Away Team Head to Head Price: the price bookies are offering for an away win
It also comes up with some good value tips for those interested in a punt in \n{input_picks}\n. If it is empty there isn't much value for punting in the round. The description of the columns of interest is:
* Team = Team that is a good value pick
* Price = what the bookies are offering them at
Could you write up an email to my mates from Reg Reagan, giving them a synopsis of the round along with the tips?
Accompany the tips with some smart arsed comments about the teams playing.
Remember to link everyone to the tips folder: {folder_url}
Also, tell everyone to bring back the biff at the end of the email.
Also also your favorite team is the Newcastle Knights.
Always sign off the email as Reg Reagan.
"""
# Use the AI model to generate the email content based on the prompt
reg_regan = llm.predict(prompt)
return reg_regan
# The 'send_emails' function sends an email to a list of recipients. The email details are prepared and the SMTP server is used to send the emails.
def send_emails(doc_name, subject, message, sender_email, sender_password, json_path):
#
scope = ["https://spreadsheets.google.com/feeds", 'https://www.googleapis.com/auth/spreadsheets',
"https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"]
# Authorize Google client using service account credentials to access Google Sheets
creds = service_account.Credentials.from_service_account_file(json_path, scopes=scope)
client = gspread.authorize(creds)
# Open the spreadsheet and get the data
sheet = client.open(doc_name).sheet1 # this is the spreadsheet with the emails
email_data = sheet.get_all_records() # gets all the data inside your Google Sheet
# Extract the recipient emails from the Google Sheet data
recipient_emails = [row['Email'] for row in email_data] # replace 'Email' with your actual column name
# Prepare the email message using MIMEText
msg = MIMEMultipart()
msg['From'] = sender_email
msg['To'] = ', '.join(recipient_emails)
msg['Subject'] = subject
msg.attach(MIMEText(message, 'plain'))
# Setup the SMTP server for sending the email
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
# Login to the SMTP server using sender's email and password
server.login(sender_email, sender_password)
# Send the email to the list of recipients
text = msg.as_string()
server.sendmail(sender_email, recipient_emails, text)
# Close the SMTP server connection
server.quit()
| [
"competition_year",
"round_name"
] |
2024-01-10 | sdan/chroma | chromadb~utils~embedding_functions.py | from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
class SentenceTransformerEmbeddingFunction(EmbeddingFunction):
# If you have a beefier machine, try "gtr-t5-large".
# for a full list of options: https://huggingface.co./sentence-transformers, https://www.sbert.net/docs/pretrained_models.html
def __init__(self, model_name: str = "all-MiniLM-L6-v2"):
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ValueError(
"The sentence_transformers python package is not installed. Please install it with `pip install sentence_transformers`"
)
self._model = SentenceTransformer(model_name)
def __call__(self, texts: Documents) -> Embeddings:
return self._model.encode(list(texts), convert_to_numpy=True).tolist()
class OpenAIEmbeddingFunction(EmbeddingFunction):
def __init__(self, api_key: str, model_name: str = "text-embedding-ada-002"):
try:
import openai
except ImportError:
raise ValueError(
"The openai python package is not installed. Please install it with `pip install openai`"
)
openai.api_key = api_key
self._client = openai.Embedding
self._model_name = model_name
def __call__(self, texts: Documents) -> Embeddings:
# replace newlines, which can negatively affect performance.
texts = [t.replace("\n", " ") for t in texts]
# Call the OpenAI Embedding API in parallel for each document
return [
result["embedding"]
for result in self._client.create(
input=texts,
engine=self._model_name,
)["data"]
]
class CohereEmbeddingFunction(EmbeddingFunction):
def __init__(self, api_key: str, model_name: str = "large"):
try:
import cohere
except ImportError:
raise ValueError(
"The cohere python package is not installed. Please install it with `pip install cohere`"
)
self._client = cohere.Client(api_key)
self._model_name = model_name
def __call__(self, texts: Documents) -> Embeddings:
# Call Cohere Embedding API for each document.
return [
embeddings for embeddings in self._client.embed(texts=texts, model=self._model_name)
]
class HuggingFaceEmbeddingFunction(EmbeddingFunction):
def __init__(self, api_key: str, model_name: str = "sentence-transformers/all-MiniLM-L6-v2"):
try:
import requests
except ImportError:
raise ValueError(
"The requests python package is not installed. Please install it with `pip install requests`"
)
self._api_url = (
f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_name}"
)
self._session = requests.Session()
self._session.headers.update({"Authorization": f"Bearer {api_key}"})
def __call__(self, texts: Documents) -> Embeddings:
# Call HuggingFace Embedding API for each document
return self._session.post(
self._api_url, json={"inputs": texts, "options": {"wait_for_model": True}}
).json()
class InstructorEmbeddingFunction(EmbeddingFunction):
# If you have a GPU with at least 6GB try model_name = "hkunlp/instructor-xl" and device = "cuda"
# for a full list of options: https://github.com/HKUNLP/instructor-embedding#model-list
def __init__(self, model_name: str = "hkunlp/instructor-base", device="cpu"):
try:
from InstructorEmbedding import INSTRUCTOR
except ImportError:
raise ValueError(
"The InstructorEmbedding python package is not installed. Please install it with `pip install InstructorEmbedding`"
)
self._model = INSTRUCTOR(model_name, device=device)
def __call__(self, texts: Documents) -> Embeddings:
return self._model.encode(texts).tolist()
| [] |
2024-01-10 | vladimirmagic/youtube-video-analysis | utilities.py | import google.auth
import openai, base64, os, cv2, json, io, re, whisper, time
import librosa, librosa.display
import matplotlib.pyplot as plt
import numpy as np
import speech_recognition as sr
import concurrent.futures
from googletrans import Translator
from pydub import AudioSegment
from wrapt_timeout_decorator import timeout
from googleapiclient.discovery import build
from youtube_transcript_api import YouTubeTranscriptApi
from isodate import parse_duration
from pytube import YouTube
from moviepy.video.io.VideoFileClip import VideoFileClip, AudioFileClip
from config import settings
import variables
def execute_function_wrapper(func, arg):
'''This function takes a function name and it's argument and executes the said function.'''
# Execute the provided function with its arguments
result = func(arg)
return result
#Functions to extract youtube vidoe content, subtitles and even download videos as well.
def search_videos_keyword(api_key, query, max_results=5):
'''This function extracts the details a list of youtube videos given certain keywords.'''
# Set up YouTube API service
youtube = build('youtube', 'v3', developerKey=api_key)
# Search for videos based on keywords
request = youtube.search().list(
part='snippet',
q=query,
type='video',
maxResults=max_results)
response = request.execute() #Executes the query
if 'items' in response:
videos = response['items']
return videos
else:
print("No videos found.")
def search_videos_channel(api_key, channel_id, max_results=50):
'''This function takes in the YouTube API key, a channel ID and results threshold
and extract a limited number of video metadata with respected to the provided threshold.'''
# Set up YouTube API service
youtube = build('youtube', 'v3', developerKey=api_key)
# Get the playlist ID of the uploads playlist for the channel
response = youtube.channels().list(part="contentDetails", id=channel_id).execute()
playlist_id = response["items"][0]["contentDetails"]["relatedPlaylists"]["uploads"]
# Get the video details from the uploads playlist
videos = []
next_page_token = None
while True:
playlist_items = youtube.playlistItems().list(
part="snippet",
playlistId=playlist_id,
maxResults=max_results,
pageToken=next_page_token,
).execute()
videos.extend(playlist_items["items"])
next_page_token = playlist_items.get("nextPageToken")
if (not next_page_token) | (len(videos) >= max_results):
break
return videos[:max_results]
def get_youtube_video_info(api_key, video_id):
'''This function extracts the details of a youtube video using its video ID'''
# Set up YouTube API service
youtube = build('youtube', 'v3', developerKey=api_key)
# Get video details
request = youtube.videos().list(part='snippet,contentDetails,statistics', id=video_id)
response = request.execute()
#Extract the important content from the response
if 'items' in response:
video_info = response['items'][0]
return video_info
else:
print("Video not found.")
return None
def convert_duration_to_seconds(duration):
'''This function converts the video duration extracted from youtuve details to seconds'''
# Parse the ISO 8601 duration format
duration_obj = parse_duration(duration)
# Calculate the total duration in seconds
total_seconds = duration_obj.total_seconds()
return int(total_seconds)
def get_video_transcript(video_ids, languages):
'''This function extracts the subtitle of a youtube video using its video ID'''
transcript = YouTubeTranscriptApi.get_transcripts(video_ids, languages=languages)
transcript = transcript[0][video_ids[0]]
return transcript
def save_transcript_to_file(transcript, output_file):
'''This functions saves the subtitle extracted from the chosen video.'''
with open(output_file, 'w', encoding='utf-8') as file:
for entry in transcript:
file.write(f"{entry['start']} - {entry['start'] + entry['duration']}\n")
file.write(f"{entry['text']}\n\n")
def speech_speed(video_transcript, subtitle_language):
'''This function takes in the video transcript and calculates the speed of the speech in the video.'''
translator = Translator()
combined_duration = 0 #variable to store the total number of seconds of speech in the video
number_of_words = 0 #variable to store the total number of words in the video
speed_categories = {'Slow Speech':[0,110],'Normal Speech':[110,150],'Fast Speech':[150,200]}
music = 'Music'
if subtitle_language != 'en':
translated_text = translator.translate(music, src='en', dest=subtitle_language)
music = (translated_text.text).replace('\n',' ')
for text in video_transcript:
if (text['text'] != f"[{music}]") & (text['text'] != f"[Music]"): #Excludes the parts of the script that only contains just music
combined_duration += int(text['duration'])
number_of_words += int(len(text['text'].split(' ')))
#calculates the words per minute
words_per_minute = round(number_of_words/(combined_duration/60))
for categ in list(speed_categories.keys()):
#using the calculated words per minute, the speech speed category is determined.
if (words_per_minute >= speed_categories[categ][0]) & (words_per_minute < speed_categories[categ][1]):
audio_speed = categ
#print(combined_duration, number_of_words, f"{words_per_minute} WPM", audio_speed)
return combined_duration, words_per_minute, audio_speed
def subt_sing_translator(input_set):
'''This function takes in a part of an extracted subtitle and translates it using Google's API.'''
translator = Translator()
# Translates the chunk of the subtitle using the source languages.
index = input_set[0]
text = input_set[1]
source_language = input_set[2]
try:
translated_text = translator.translate(text, src=source_language, dest='en')
translated_subt = (translated_text.text).replace('\n',' ')
except Exception as e:
# print(f"Error: {e}")
# print(index, text)
translated_subt = ''
return (index, translated_subt)
def subt_set_translator(sublist):
'''This function takes in a sublists containing parts of an extracted sutitle,
and then translates it in a parallel manner (Multithreaded).'''
translated_dict = {}
#Multithreading (I/O bound)
with concurrent.futures.ThreadPoolExecutor() as executor:
arguments = sublist
futures = [executor.submit(subt_sing_translator, arg) for arg in arguments]
for future in concurrent.futures.as_completed(futures):
try:
result = future.result()
if result[1] != '':
translated_dict[int(result[0])] = result[1]
except Exception as e:
# print(f"Error: {e}")
continue
return translated_dict
def combine_transcript_translate(transcript, source_language):
'''This processes the extracted subtitle, translates (to English in a parallel manner,
if text isn't already in English) and combines all its texts into one long string.'''
string = '' #Declares an initial empty string
if source_language == 'en':
# print("English")
#Loops through the extracted transcript to compile it for further processing
for subt in transcript:
if subt['text'] != '[Music]':
string = string+f" {subt['text']}"
return string
else:
# print("Not English")
#The parts of the subtitle are enumerated to be processed in paralled
translator = Translator()
#Translates the word 'Music' using the video language so as to omit segments containing [Music] in the subtitles
translated_text = translator.translate('Music', src='en', dest=source_language)
music = (translated_text.text).replace('\n',' ')
list_of_subts = [(i, transcript[i]['text'], source_language) for i in range(len(transcript)) if (transcript[i]['text'] != f"[{music}]") & (transcript[i]['text'] != f"[Music]")]
#The list of texts are further divided into set of lists which are to processed in parallel
len_of_sublists = int(round(len(list_of_subts)/4))
sublist_of_subts = [list_of_subts[i:i+len_of_sublists] for i in range(0, len(list_of_subts), len_of_sublists)]
translated_dict = {}
#Multiprocessing (CPU bound)
with concurrent.futures.ProcessPoolExecutor() as executor:
arguments = sublist_of_subts
results = executor.map(subt_set_translator, arguments)
for result in results:
for key in list(result.keys()):
translated_dict[int(key)] = result[key]
#The punctuated text chunks are then rearranged into a meaningful order using their original assigned index.
ordered_keys = sorted(list(translated_dict.keys()))
#Loops through the extracted and translated transcript to compile it for further processing
for ordered_index in ordered_keys:
string = string+f" {translated_dict[ordered_index]}"
return string
def subt_sing_punctuator(part_sub):
'''This function takes in a part of a combined subtitle and punctuates it using GPT's API.'''
print(f"Length of text being analysed: {len(part_sub[1])}")
try:
combined_subt_punct = gpt_punctuator(part_sub[1])
# print(combined_subt_punct[:10])
return (part_sub[0], combined_subt_punct)
except:
# print("No response")
return (part_sub[0], '')
def subt_set_punctuator(sublist):
'''This function takes in a sublists containing parts of a combined sutitle,
and then processes it in a parallel manner (Multithreaded).'''
punctuated_dict = {}
#Multithreading (I/O bound)
with concurrent.futures.ThreadPoolExecutor() as executor:
arguments = sublist
futures = [executor.submit(subt_sing_punctuator, arg) for arg in arguments]
for future in concurrent.futures.as_completed(futures):
try:
result = future.result()
if result[1] != '':
punctuated_dict[int(result[0])] = result[1]
except:
continue
return punctuated_dict
def subtitle_processing(combined_subt):
'''This function takes the combined raw subtitle and punctuates it using GPT in a parallel
manner (Multiprocessor).'''
def split_and_enumerate_1(combined_subt, trunc_threshold):
'''This function split the combined subtitle and enumerates each split
to be processed in parallel, using the word-based method.'''
split_subtitle = combined_subt.split(' ')
print(f"Number of words: {len(split_subtitle)}\nTruncation threshold: {trunc_threshold}")
subtitle_list = []
combined_words, count, combined_count = '', 0, 0
#This splits the entire combined subititle using the threshold calculated from the available cores
for word in split_subtitle:
combined_words = combined_words + f" {word}"
count += len(word)
if count >= trunc_threshold:
#The split texts are appended to a a list with assigned indices
subtitle_list.append((combined_count, combined_words))
combined_words, count, combined_count = '', 0, combined_count+1
subtitle_list.append((combined_count, combined_words))
return subtitle_list
def split_and_enumerate_2(combined_subt, trunc_threshold):
'''This function split the combined subtitle and enumerates each split
to be processed in parallel, using the character-based method.'''
start_index, end_index, index_list = 0, trunc_threshold, []
len_of_string = len(combined_subt)
while end_index < len_of_string:
#This loop demarcates the length of the combined string into intervals for indexing
index_list.append((start_index, end_index))
start_index = end_index
end_index = end_index + trunc_threshold
index_list.append((start_index, len(combined_subt)))
subtitle_list = [(i, combined_subt[index_list[i][0]:index_list[i][1]]) for i in range(len(index_list))]
return subtitle_list
# Preprocesses the subtitle, so that GPT can process it without trucnating it.
len_of_combined_subt = len(combined_subt)
num_of_chunk_in_sublist = 2
num_of_cores = 4
trunc_threshold = round(len_of_combined_subt/(num_of_cores*num_of_chunk_in_sublist)) #Uses the number of available cores (4) to split the text for quick processing
#Calls the function to split and enumerate the combined subtitle
subtitle_list = split_and_enumerate_2(combined_subt, trunc_threshold)
#The list of texts are further divided into set of lists which are to processed in parallel
len_of_sublists = int(round(len(subtitle_list)/num_of_cores))
sublist_of_subts = [subtitle_list[i:i+len_of_sublists] for i in range(0, len(subtitle_list), len_of_sublists)]
print(f"Number of sublists: {len(sublist_of_subts)}")
subt_dict = {}
#Multiprocessing (CPU bound)
with concurrent.futures.ProcessPoolExecutor() as executor:
arguments = sublist_of_subts
results = executor.map(subt_set_punctuator, arguments)
for result in results:
for key in list(result.keys()):
subt_dict[int(key)] = result[key]
#The punctuated text chunks are then rearranged into a meaningful order using their original assigned index.
ordered_keys = sorted(list(subt_dict.keys()))
punct_subt_list = [subt_dict[key] for key in ordered_keys]
#The list of punctuated subtitles are combined once more to form a whole.
for i in range(len(punct_subt_list)):
if i == 0:
final_combined_punct_subt = punct_subt_list[i]
else:
final_combined_punct_subt = final_combined_punct_subt + f" {punct_subt_list[i]}"
gpt_threshold = 13000
#The punctuated subtitles is then truncated to fit GPT's token limit for processing.
trunc_string = final_combined_punct_subt[:gpt_threshold]
print(f"Length: of truncated punctuated subtitle: {len(trunc_string)}")
return final_combined_punct_subt, trunc_string
def download_youtube_video(video_url, output_path='.'):
'''This function downloads a given youtube video using its video url.'''
try:
# Create a YouTube object
yt = YouTube(video_url)
# Get the highest resolution stream
video_stream = yt.streams.get_highest_resolution()
# Download the video
video_stream.download(output_path)
print(f"Video downloaded successfully to {output_path}")
except Exception as e:
print(f"Error: {e}")
#Functions for executing text analysis and processor (classification, summarization, topic modelling).
def gpt_punctuator(information):
'''Function is responsible for querying the GPT-3.5 model for analysis of a given content.'''
import openai
openai_obj = openai
openai_obj.api_key = settings.openai_apikey
#Prompt engineering message to be fed to the GPT model.
messages = [
{"role":"system","content":"you are a text analyst assistant. Your job is to punctuate a given text and output only the resulting punctuated text without omiting a single word."}]
#Creates the prompt to punctuate the subtitle extracted from the given video
prompt_1 = f"{information}"
prompt_2 = "Please properly punctuate the given text (without omitting a single word) and output only the resulting punctuated text. Please do not omit a single word from the original text."
#Adds the prompts to the chat memory
messages.append({"role": "user", "content": prompt_1},)
messages.append({"role": "user", "content": prompt_2},)
#GPT model is triggered and response is generated.
chat = openai_obj.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0,
#timeout=5
)
#Response is extracted
response = chat.choices[0].message.content
return (response)
def gpt_categorizer(information):
'''Function is responsible for querying the GPT-3.5 model for analysis of a given content.'''
import openai
openai_obj = openai
openai_obj.api_key = settings.openai_apikey
#Prompt engineering message to be fed to the GPT model.
messages = [
{"role":"system","content":"you are a text analyst assistant. Given a text to analyze, you're to only respond with 'Basic','Medium', or 'Advanced'."}]
#Creates the prompt to check for the most similar column
prompt_1 = f"{information}"
prompt_2 = "Given the text which is a transcript of a language tutorial video, which category of difficulty (Basic, Medium and Advanced) best describes what is being taught? Output only the category and nothing else."
#Adds the prompts to the chat memory
messages.append({"role": "user", "content": prompt_1},)
messages.append({"role": "user", "content": prompt_2},)
#GPT model is triggered and response is generated.
chat = openai_obj.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0,
#timeout=5
)
#Response is extracted
response = chat.choices[0].message.content
return (response)
def gpt_summarizer(information):
'''Function is responsible for querying the GPT-3.5 model for analysis of a given content.'''
import openai
openai_obj = openai
openai_obj.api_key = settings.openai_apikey
#Prompt engineering message to be fed to the GPT model.
messages = [
{"role":"system","content":"you are a text analyst assistant. Given a text to analyze, you're to summarize the content in a few sentences."}]
#Creates the prompt to check for the most similar column
prompt_1 = f"{information}"
prompt_2 = "Given the text which is a transcript of a language tutorial video, please summarize the content in 5 to 10 sentences."
#Adds the prompts to the chat memory
messages.append({"role": "user", "content": prompt_1},)
messages.append({"role": "user", "content": prompt_2},)
#GPT model is triggered and response is generated.
chat = openai_obj.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0,
#timeout=5
)
#Response is extracted
response = chat.choices[0].message.content
return (response)
def gpt_topicmodeller(information):
'''Function is responsible for querying the GPT-3.5 model for analysis of a given content.'''
import openai
openai_obj = openai
openai_obj.api_key = settings.openai_apikey
#Prompt engineering message to be fed to the GPT model.
messages = [
{"role":"system","content":"you are a text analyst assistant. Given a text to analyze, you're to generate a single topic that best represent the contents within, and output only this topic with no additional write up."}]
#Creates the prompt to check for the most similar column
prompt_1 = f"{information}"
prompt_2 = "Given the text which is a transcript of a language tutorial video, please generate a single topic that describes the content being taught. Output only this topic and nothing else (no additional write up)."
#Adds the prompts to the chat memory
messages.append({"role": "user", "content": prompt_1},)
messages.append({"role": "user", "content": prompt_2},)
#GPT model is triggered and response is generated.
chat = openai_obj.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0,
#timeout=5
)
#Response is extracted
response = chat.choices[0].message.content
return (response)
def gpt_qualitycheck(information):
'''Function is responsible for querying the GPT-3.5 model for analysis of a given content.'''
import openai
openai_obj = openai
openai_obj.api_key = settings.openai_apikey
#Prompt engineering message to be fed to the GPT model.
messages = [
{"role":"system","content":"you are a text analyst assistant. Given a text to analyze, you're to respond with only 'Poorly articulated','Moderately articulated' or 'Very articulated'."}]
#Creates the prompt to check for the most similar column
prompt_1 = f"{information}"
prompt_2 = "Given the text which is a transcript of a language tutorial video, is the content 'Poorly articulated', 'Moderately articulated', or 'Very articulated'? Output only the category and nothing else."
#Adds the prompts to the chat memory
messages.append({"role": "user", "content": prompt_1},)
messages.append({"role": "user", "content": prompt_2},)
#GPT model is triggered and response is generated.
chat = openai_obj.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0,
#timeout=5
)
#Response is extracted
response = chat.choices[0].message.content
return (response)
def gpt_vocabularycheck(information):
'''Function is responsible for querying the GPT-3.5 model for analysis of a given content.'''
import openai
openai_obj = openai
openai_obj.api_key = settings.openai_apikey
#Prompt engineering message to be fed to the GPT model.
messages = [
{"role":"system","content":"you are a text analyst assistant. Given a text to analyze, you're to respond with only 'Basic','Intermediate' or 'Advanced'."}]
#Creates the prompt to check for the most similar column
prompt_1 = f"{information}"
prompt_2 = "Given the text which is a transcript of a language tutorial video, is the vocabulary level 'Basic', 'Intermediate', or 'Advanced'? Output only the category and nothing else."
#Adds the prompts to the chat memory
messages.append({"role": "user", "content": prompt_1},)
messages.append({"role": "user", "content": prompt_2},)
#GPT model is triggered and response is generated.
chat = openai_obj.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0,
#timeout=5
)
#Response is extracted
response = chat.choices[0].message.content
return (response)
def gpt_sentenceconstruct(information):
'''Function is responsible for querying the GPT-3.5 model for analysis of a given content.'''
import openai
openai_obj = openai
openai_obj.api_key = settings.openai_apikey
#Prompt engineering message to be fed to the GPT model.
messages = [
{"role":"system","content":"you are a text analyst assistant. Given a text to analyze, you're to respond with only 'Basic','Intermediate' or 'Advanced'."}]
#Creates the prompt to check for the most similar column
prompt_1 = f"{information}"
prompt_2 = "Given the text which is a transcript of a language tutorial video, is the sentence structure 'Basic', 'Intermediate', or 'Advanced'? Output only the category and nothing else."
#Adds the prompts to the chat memory
messages.append({"role": "user", "content": prompt_1},)
messages.append({"role": "user", "content": prompt_2},)
#GPT model is triggered and response is generated.
chat = openai_obj.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0,
#timeout=5
)
#Response is extracted
response = chat.choices[0].message.content
return (response)
def gpt_dialogue(information):
'''Function is responsible for querying the GPT-3.5 model for analysis of a given content.'''
import openai
openai_obj = openai
openai_obj.api_key = settings.openai_apikey
#Prompt engineering message to be fed to the GPT model.
messages = [
{"role":"system","content":"you are a text analyst assistant. Given a text to analyze, you're to respond with only 'Present', or 'Not Present'."}]
#Creates the prompt to check for the most similar column
prompt_1 = f"{information}"
prompt_2 = "Given the text which is a transcript of a language tutorial video, is there any dialogue present? Output only the response and nothing else."
#Adds the prompts to the chat memory
messages.append({"role": "user", "content": prompt_1},)
messages.append({"role": "user", "content": prompt_2},)
#GPT model is triggered and response is generated.
chat = openai_obj.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0,
#timeout=5
)
#Response is extracted
response = chat.choices[0].message.content
return (response)
def text_sing_analyzer(input_set):
'''This function takes in a category and a truncated string and conducts a particular
type of analysis based on the category'''
#Extracts the contents of the input
category = input_set[0]
trunc_string = input_set[1]
#Uses this dictionary to map a given category to it's corresponding GPT function.
textanalysis_dict = {'category':gpt_categorizer,'summary':gpt_summarizer,'topic':gpt_topicmodeller,
'quality':gpt_qualitycheck,'vocabulary':gpt_vocabularycheck,
'sentence_construct':gpt_sentenceconstruct,'dialogue':gpt_dialogue}
print(f"Category of Text Anlysis: {category}.")
try:
gpt_response = execute_function_wrapper(textanalysis_dict[category], trunc_string)
except:
gpt_response = ''
return (category, gpt_response)
def text_set_analyzer(sublist):
'''This function takes in a sublist of categories of text analysis
and then processes it in a parallel manner (Multithreaded).'''
test_analysis_dict = {}
#Multithreading (I/O bound)
with concurrent.futures.ThreadPoolExecutor() as executor:
arguments = sublist
futures = [executor.submit(text_sing_analyzer, arg) for arg in arguments]
for future in concurrent.futures.as_completed(futures):
try:
result = future.result()
test_analysis_dict[result[0]] = result[1]
except:
continue
return test_analysis_dict
#Functions for extracting the audio from the downloaded video and analyzing this audio.
def extract_audio_from_video(video_path, audio_path):
'''This function extracts the audio file from the downloaded youtube video.'''
video_clip = VideoFileClip(video_path) #Loads the downloaded video
audio_clip = video_clip.audio #Extracts the audio from the video
audio_clip.write_audiofile(audio_path, fps=44100) # Set the desired sample rate
def download_audio(video_id, output_path='audio_files'):
'''This fucntion downloads the video audio file using the video ID and returns the
file paths'''
try:
# Construct the YouTube video URL
video_url = f'https://www.youtube.com/watch?v={video_id}'
# Create a YouTube object
yt = YouTube(video_url)
# Get the highest quality audio stream
audio_stream = yt.streams.filter(only_audio=True, file_extension='mp4').first()
# Remove invalid characters from the title to create a valid filename
video_title = yt.title
valid_filename = "".join(c for c in video_title if c.isalnum() or c in (' ', '.', '_'))
# Set the output path (default: 'downloads')
audio_stream.download(output_path, filename=f"{valid_filename}.mp4")
# Get the downloaded audio file path
mp4_path = f"{output_path}/{valid_filename}.mp4"
# Convert the downloaded audio to MP3
audio_clip = AudioFileClip(mp4_path)
wav_path = (f"{output_path}/{valid_filename}.wav")
audio_clip.write_audiofile(wav_path, fps=20000)
print(f"Audio downloaded and converted to MP3 successfully.")
return mp4_path, wav_path
except Exception as e:
print(f"Error: {e}")
return None
def analyze_audio_speed(audio_path):
'''This function analyses the speed of the audio file.'''
try:
y, sr = librosa.load(audio_path) #Loads the extracted and stored audio
# Compute the tempo
tempo, _ = librosa.beat.beat_track(y=y, sr=sr)
print(f'Tempo: {tempo} BPM')
return tempo
except Exception as e:
print(f"Error: {e}")
return None
def analyze_sing_audio_speed(input_set):
'''This function takes in a set containing audio segment interval, audio file path and
the set index, analyzes the audio speed of the segment and returns this value along with
its index.'''
# Extract the segment, audio time interval and the languages
index = input_set[0]
time_interval = input_set[1]
audio = AudioSegment.from_file(input_set[2])
segment = audio[time_interval[0]:time_interval[1]]
# Save the segment to a temporary file
temp_file_path = f"audio_files/temp_segment_{index}.wav"
segment.export(temp_file_path, format="wav")
try:
y, sr = librosa.load(temp_file_path) #Loads the extracted and stored audio
# Compute the tempo
tempo, _ = librosa.beat.beat_track(y=y, sr=sr)
except Exception as e:
print(f"Error: {e}")
tempo = None
os.remove(temp_file_path) #Deletes the audio segment after processing to free up space
return tempo
def analyze_set_audio_speed(audio_path):
'''This function analyses the speed of the audio file.'''
try:
# Load the entire audio file
audio = AudioSegment.from_file(audio_path)
segment_duration_ms = len(audio)/4
# Calculate the number of segments
num_segments = len(audio) // segment_duration_ms
print(int(num_segments))
list_of_segments = []
for i in range(int(num_segments)):
# Calculate start and end time for each segment
start_time = i * segment_duration_ms
end_time = (i + 1) * (segment_duration_ms/1.5)
list_of_segments.append((i, [start_time, end_time], audio_path,))
bpm = []
#Multiprocessing (CPU bound)
with concurrent.futures.ProcessPoolExecutor() as executor:
arguments = list_of_segments
results = executor.map(analyze_sing_audio_speed, arguments)
for result in results:
if result != None:
bpm.append(result)
average_tempo = round(sum(bpm)/len(bpm))
print(f'Tempo: {average_tempo} BPM')
return average_tempo
except Exception as e:
print(f"Error: {e}")
return None
def audiolang_sing_processor_google(input_set):
'''This function takes in the set of input necesasry to process the audio segment,
in order to execute a parallel process'''
count_overall, count_transcribed, count_firstlang, count_secondlang = 1, 0, 0, 0
recognizer = sr.Recognizer()
# Extract the segment, audio time interval and the languages
index = input_set[0]
time_interval = input_set[1]
audio = AudioSegment.from_file(input_set[2])
language_list = input_set[3]
segment = audio[time_interval[0]:time_interval[1]]
# Save the segment to a temporary file
temp_file_path = f"audio_files/temp_segment_{index}.wav"
segment.export(temp_file_path, format="wav")
try:
# Transcribe the segment while trying the first language
with sr.AudioFile(temp_file_path) as audio_file:
audio_data = recognizer.record(audio_file)
text = recognizer.recognize_google(audio_data, language=language_list[0])
# print(f"Segment {index + 1} Transcription:", text)
count_transcribed = 1
count_firstlang = 1
except sr.UnknownValueError:
try:
# Transcribe the segment while trying the second language
with sr.AudioFile(temp_file_path) as audio_file:
audio_data = recognizer.record(audio_file)
text = recognizer.recognize_google(audio_data, language=language_list[1])
# print(f"Segment {index + 1} Transcription:", text)
count_transcribed = 1
count_secondlang = 1
except sr.UnknownValueError:
# print(f"Segment {index + 1} - Speech Recognition could not understand audio")
pass
except sr.RequestError as e:
# print(f"Segment {index + 1} - Could not request results from Google Speech Recognition service; {e}")
pass
os.remove(temp_file_path) #Deletes the audio segment after processing to free up space
return (count_overall, count_transcribed, count_firstlang, count_secondlang)
def audiolang_set_processor_google(sublist):
'''This function takes in a sublist of audio segment details and processes
it in a parallel'''
count_overall, count_transcribed, count_firstlang, count_secondlang = [], [], [], []
#Multithreading (I/O bound)
with concurrent.futures.ThreadPoolExecutor() as executor:
arguments = sublist
futures = [executor.submit(audiolang_sing_processor_google, arg) for arg in arguments]
for future in concurrent.futures.as_completed(futures):
try:
result = future.result() # Get the result, may raise an exception
count_overall.append(result[0])
count_transcribed.append(result[1])
count_firstlang.append(result[2])
count_secondlang.append(result[3])
except:
count_overall.append(1)
count_transcribed.append(result[0])
count_firstlang.append(result[0])
count_secondlang.append(result[0])
count_overall, count_transcribed = sum(count_overall), sum(count_transcribed)
count_firstlang, count_secondlang = sum(count_firstlang), sum(count_secondlang)
#print(count_overall, count_transcribed, count_firstlang, count_secondlang)
return (count_overall, count_transcribed, count_firstlang, count_secondlang)
def analyze_audio_languages_google(audio_path, first_language, second_language, segment_duration_ms=4000):
'''This fucntion downloads the video audio file using the video ID and calculated the
audio BPM (Beats per minute).'''
language_isocode = variables.language_isocode
try:
language_list = []
for language in [first_language.lower(), second_language.lower()]:
language_list.append(language_isocode[language])
print(language_list)
# Load the entire audio file
audio = AudioSegment.from_file(audio_path)
# Calculate the number of segments
num_segments = len(audio) // segment_duration_ms + 1
print(num_segments)
list_of_segments = []
for i in range(num_segments):
# Calculate start and end time for each segment
start_time = i * segment_duration_ms
end_time = (i + 1) * segment_duration_ms
list_of_segments.append((i, [start_time, end_time], audio_path, language_list))
len_of_sublists = int(round(len(list_of_segments)/4))
segments_sublist = [list_of_segments[i:i+len_of_sublists] for i in range(0, len(list_of_segments), len_of_sublists)]
count_overall, count_transcribed, count_firstlang, count_secondlang = [], [], [], []
#Multiprocessing (CPU bound)
with concurrent.futures.ProcessPoolExecutor() as executor:
arguments = segments_sublist
results = executor.map(audiolang_set_processor_google, arguments)
for result in results:
count_overall.append(result[0])
count_transcribed.append(result[1])
count_firstlang.append(result[2])
count_secondlang.append(result[3])
count_overall, count_transcribed = sum(count_overall), sum(count_transcribed)
count_firstlang, count_secondlang = sum(count_firstlang), sum(count_secondlang)
print(count_overall, count_transcribed, count_firstlang, count_secondlang)
#Computes teh percentage distribution of the languages using the extracted information
percentage_transcribed = round((count_transcribed/count_overall)*100)
percentage_firstlang = round((count_firstlang/count_transcribed)*100)
percentage_secondlang = 100-percentage_firstlang
# print(f"Percentage transcribed: {percentage_transcribed}%, {first_language}: {percentage_firstlang}%, {second_language}: {percentage_secondlang}%")
return percentage_transcribed, percentage_firstlang, percentage_secondlang
except Exception as e:
print(f"Error: {e}")
return None
def openai_whisper_api(path):
'''This function takes in a file path and loads the audio file at the end of this
file path onto the openai_whisper_api for transcription.'''
model = whisper.load_model("base")
result = model.transcribe(path)
#print(result['text'])
language = result['language']
return language
def audiolang_sing_processor_openai(input_set):
'''This function takes in the set of input necesasry to process the audio segment,
in order to execute a parallel process.'''
count_overall, count_transcribed, language = 1, 0, None
# Extract the segment, audio time interval and the languages
index = input_set[0]
time_interval = input_set[1]
audio = AudioSegment.from_file(input_set[2])
segment = audio[time_interval[0]:time_interval[1]]
# Save the segment to a temporary file
temp_file_path = f"./audio_files/temp_segment_{index}.wav"
segment.export(temp_file_path, format="wav")
try:
language = openai_whisper_api(temp_file_path)
count_transcribed = 1
except Exception as e:
print(f"Segment {index + 1} - Speech Recognition could not understand audio: {e}")
os.remove(temp_file_path) #Deletes the audio segment after processing to free up space
return (count_overall, count_transcribed, language)
def audiolang_set_processor_openai(sublist):
'''This function takes in a sublist of audio segment details and processes
it in a parallel'''
count_overall, count_transcribed, lang_dict = [], [], {}
#Multithreading (I/O bound)
with concurrent.futures.ThreadPoolExecutor() as executor:
arguments = sublist
futures = [executor.submit(audiolang_sing_processor_openai, arg) for arg in arguments]
for future in concurrent.futures.as_completed(futures):
result = future.result() # Get the result, may raise an exception
count_overall.append(result[0])
count_transcribed.append(result[1])
if result[2] != None:
if result[2] in list(lang_dict.keys()):
lang_dict[str(result[2])].append(1)
else:
lang_dict[str(result[2])] = [1]
count_overall, count_transcribed = sum(count_overall), sum(count_transcribed)
langs_dict = {str(key):sum(lang_dict[key]) for key in list(lang_dict.keys())}
#print(count_overall, count_transcribed, langs_dict)
return (count_overall, count_transcribed, langs_dict)
def analyze_audio_languages_openai(audio_path, segment_duration_ms=4000):
'''This fucntion downloads the video audio file using the video ID and calculated the
audio BPM (Beats per minute).'''
language_isocode = {'english':'en-US', 'italian':'it-IT', 'french':'fr-FR'}
try:
# Load the entire audio file
audio = AudioSegment.from_file(audio_path)
# Calculate the number of segments
num_segments = len(audio) // segment_duration_ms + 1
print(num_segments)
list_of_segments = []
for i in range(num_segments):
# Calculate start and end time for each segment
start_time = i * segment_duration_ms
end_time = (i + 1) * segment_duration_ms
list_of_segments.append((i, [start_time, end_time], audio_path))
len_of_sublists = int(round(len(list_of_segments)/4))
segments_sublist = [list_of_segments[i:i+len_of_sublists] for i in range(0, len(list_of_segments), len_of_sublists)]
count_overall, count_transcribed, language_dict = [], [], {}
#Multiprocessing (CPU bound)
with concurrent.futures.ProcessPoolExecutor() as executor:
arguments = segments_sublist
results = executor.map(audiolang_set_processor_openai, arguments)
for result in results:
count_overall.append(result[0])
count_transcribed.append(result[1])
for key in list(result[2].keys()):
if key in (language_dict.keys()):
language_dict[key].append(result[2][key])
else:
language_dict[key] = [result[2][key]]
count_overall, count_transcribed = sum(count_overall), sum(count_transcribed)
languages_dict = {str(key):((sum(language_dict[key])/count_transcribed)*100) for key in list(language_dict.keys())}
languages_dict = {k:f"{v}%" for k, v in sorted(languages_dict.items(), key=lambda item: item[1], reverse=True)}
print(count_overall, count_transcribed, languages_dict)
#Computes teh percentage distribution of the languages using the extracted information
percentage_transcribed = round((count_transcribed/count_overall)*100)
print(f"Percentage transcribed: {percentage_transcribed}%,\n{languages_dict}")
return percentage_transcribed, languages_dict
except Exception as e:
print(f"Error: {e}")
return None
def delete_audios(path_list):
'''This function takes in audio file paths and deletes them from the system.'''
for audio_path in path_list:
os.remove(audio_path)
print(f"Removed {audio_path} from the repository!")
#Functions to analyze the image frames extracted from the downloaded video
def extract_frames(video_path, output_folder):
'''This function extract image frames from the downloaded youtube video.'''
cap = cv2.VideoCapture(video_path) # Open the video file
# Create the output folder if it doesn't exist
os.makedirs(output_folder, exist_ok=True)
# Read and save frames
frame_count = 0
while True:
if frame_count == 5:
#Limits the number of images extracted to 5
break
else:
ret, frame = cap.read()
if not ret:
break
# Save the frame as an image file
frame_path = os.path.join(output_folder, f"frame_{frame_count:04d}.png")
cv2.imwrite(frame_path, frame)
frame_count += 1
# Release the video capture object
cap.release()
def list_files_in_folder(folder_path):
'''This function add the names of the image frames extracted from the downloaded video to a list.'''
list_of_contents = [] #Creates an empty list to populate with the contents of the selected folder
try:
# Get the list of files and directories in the specified folder
contents = os.listdir(folder_path)
# Print the list of contents
print(f"Contents of {folder_path}:")
for entry in contents:
list_of_contents.append(str(entry))
print(entry)
return list_of_contents
except FileNotFoundError:
print(f"The folder '{folder_path}' does not exist.")
except PermissionError:
print(f"Permission denied to access '{folder_path}'.")
def gpt_v_image_analyser(image_name):
'''This function converts the extracted image frames to base64 and analyzes its content using GPT4-V'''
import openai
openai_obj = openai
openai_obj.api_key = settings.openai_apikey
# Updated file path to a JPEG image
image_path_base = r".\output_frames\\"
image_path = image_path_base + image_name
# Read and encode the image in base64
with open(image_path, "rb") as image_file:
encoded_image = base64.b64encode(image_file.read()).decode("utf-8")
# Craft the prompt for GPT
prompt_messages = [{"role": "user",
"content": [{"type": "text", "text": "Does this image contain any infographics? Reply with only 'Yes' or 'No' and no added punctuations."},
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{encoded_image}"}}]
}]
# Send a request to GPT
params = {
"model": "gpt-4-vision-preview",
"messages": prompt_messages,
"api_key": settings.openai_api,
# "response_format": {"type": "json_object"},
"headers": {"Openai-Version": "2020-11-07"},
"max_tokens": 4096,
}
# result = openai.ChatCompletion.create(**params)
result = openai_obj.chat.completions.create(**params)
print(result.choices[0].message.content)
return result.choices[0].message.content
| [
"Given the text which is a transcript of a language tutorial video, is the vocabulary level 'Basic', 'Intermediate', or 'Advanced'? Output only the category and nothing else.",
"Given the text which is a transcript of a language tutorial video, is the content 'Poorly articulated', 'Moderately articulated', or 'Very articulated'? Output only the category and nothing else.",
"you are a text analyst assistant. Given a text to analyze, you're to only respond with 'Basic','Medium', or 'Advanced'.",
"Given the text which is a transcript of a language tutorial video, is there any dialogue present? Output only the response and nothing else.",
"you are a text analyst assistant. Your job is to punctuate a given text and output only the resulting punctuated text without omiting a single word.",
"you are a text analyst assistant. Given a text to analyze, you're to summarize the content in a few sentences.",
"Given the text which is a transcript of a language tutorial video, please generate a single topic that describes the content being taught. Output only this topic and nothing else (no additional write up).",
"PLACEHOLDER",
"you are a text analyst assistant. Given a text to analyze, you're to generate a single topic that best represent the contents within, and output only this topic with no additional write up.",
"you are a text analyst assistant. Given a text to analyze, you're to respond with only 'Basic','Intermediate' or 'Advanced'.",
"Given the text which is a transcript of a language tutorial video, please summarize the content in 5 to 10 sentences.",
"Given the text which is a transcript of a language tutorial video, which category of difficulty (Basic, Medium and Advanced) best describes what is being taught? Output only the category and nothing else.",
"you are a text analyst assistant. Given a text to analyze, you're to respond with only 'Present', or 'Not Present'.",
"[{'type': 'text', 'text': \"Does this image contain any infographics? Reply with only 'Yes' or 'No' and no added punctuations.\"}, {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,PLACEHOLDER'}}]",
"Given the text which is a transcript of a language tutorial video, is the sentence structure 'Basic', 'Intermediate', or 'Advanced'? Output only the category and nothing else.",
"you are a text analyst assistant. Given a text to analyze, you're to respond with only 'Poorly articulated','Moderately articulated' or 'Very articulated'.",
"Please properly punctuate the given text (without omitting a single word) and output only the resulting punctuated text. Please do not omit a single word from the original text."
] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~utilities~tavily_search.py | """Util that calls Tavily Search API.
In order to set this up, follow instructions at:
"""
import json
from typing import Dict, List, Optional
import aiohttp
import requests
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
TAVILY_API_URL = "https://api.tavily.com"
class TavilySearchAPIWrapper(BaseModel):
"""Wrapper for Tavily Search API."""
tavily_api_key: str
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and endpoint exists in environment."""
tavily_api_key = get_from_dict_or_env(
values, "tavily_api_key", "TAVILY_API_KEY"
)
values["tavily_api_key"] = tavily_api_key
return values
def raw_results(
self,
query: str,
max_results: Optional[int] = 5,
search_depth: Optional[str] = "advanced",
include_domains: Optional[List[str]] = [],
exclude_domains: Optional[List[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> Dict:
params = {
"api_key": self.tavily_api_key,
"query": query,
"max_results": max_results,
"search_depth": search_depth,
"include_domains": include_domains,
"exclude_domains": exclude_domains,
"include_answer": include_answer,
"include_raw_content": include_raw_content,
"include_images": include_images,
}
response = requests.post(
# type: ignore
f"{TAVILY_API_URL}/search",
json=params,
)
response.raise_for_status()
return response.json()
def results(
self,
query: str,
max_results: Optional[int] = 5,
search_depth: Optional[str] = "advanced",
include_domains: Optional[List[str]] = [],
exclude_domains: Optional[List[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> List[Dict]:
"""Run query through Tavily Search and return metadata.
Args:
query: The query to search for.
max_results: The maximum number of results to return.
search_depth: The depth of the search. Can be "basic" or "advanced".
include_domains: A list of domains to include in the search.
exclude_domains: A list of domains to exclude from the search.
include_answer: Whether to include the answer in the results.
include_raw_content: Whether to include the raw content in the results.
include_images: Whether to include images in the results.
Returns:
query: The query that was searched for.
follow_up_questions: A list of follow up questions.
response_time: The response time of the query.
answer: The answer to the query.
images: A list of images.
results: A list of dictionaries containing the results:
title: The title of the result.
url: The url of the result.
content: The content of the result.
score: The score of the result.
raw_content: The raw content of the result.
""" # noqa: E501
raw_search_results = self.raw_results(
query,
max_results=max_results,
search_depth=search_depth,
include_domains=include_domains,
exclude_domains=exclude_domains,
include_answer=include_answer,
include_raw_content=include_raw_content,
include_images=include_images,
)
return self.clean_results(raw_search_results["results"])
async def raw_results_async(
self,
query: str,
max_results: Optional[int] = 5,
search_depth: Optional[str] = "advanced",
include_domains: Optional[List[str]] = [],
exclude_domains: Optional[List[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> Dict:
"""Get results from the Tavily Search API asynchronously."""
# Function to perform the API call
async def fetch() -> str:
params = {
"api_key": self.tavily_api_key,
"query": query,
"max_results": max_results,
"search_depth": search_depth,
"include_domains": include_domains,
"exclude_domains": exclude_domains,
"include_answer": include_answer,
"include_raw_content": include_raw_content,
"include_images": include_images,
}
async with aiohttp.ClientSession() as session:
async with session.post(f"{TAVILY_API_URL}/search", json=params) as res:
if res.status == 200:
data = await res.text()
return data
else:
raise Exception(f"Error {res.status}: {res.reason}")
results_json_str = await fetch()
return json.loads(results_json_str)
async def results_async(
self,
query: str,
max_results: Optional[int] = 5,
search_depth: Optional[str] = "advanced",
include_domains: Optional[List[str]] = [],
exclude_domains: Optional[List[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> List[Dict]:
results_json = await self.raw_results_async(
query=query,
max_results=max_results,
search_depth=search_depth,
include_domains=include_domains,
exclude_domains=exclude_domains,
include_answer=include_answer,
include_raw_content=include_raw_content,
include_images=include_images,
)
return self.clean_results(results_json["results"])
def clean_results(self, results: List[Dict]) -> List[Dict]:
"""Clean results from Tavily Search API."""
clean_results = []
for result in results:
clean_results.append(
{
"url": result["url"],
"content": result["content"],
}
)
return clean_results
| [
"content"
] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~vectorstores~dingo.py | from __future__ import annotations
import logging
import uuid
from typing import Any, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
class Dingo(VectorStore):
"""`Dingo` vector store.
To use, you should have the ``dingodb`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Dingo
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
dingo = Dingo(embeddings, "text")
"""
def __init__(
self,
embedding: Embeddings,
text_key: str,
*,
client: Any = None,
index_name: Optional[str] = None,
dimension: int = 1024,
host: Optional[List[str]] = None,
user: str = "root",
password: str = "123123",
self_id: bool = False,
):
"""Initialize with Dingo client."""
try:
import dingodb
except ImportError:
raise ImportError(
"Could not import dingo python package. "
"Please install it with `pip install dingodb."
)
host = host if host is not None else ["172.20.31.10:13000"]
# collection
if client is not None:
dingo_client = client
else:
try:
# connect to dingo db
dingo_client = dingodb.DingoDB(user, password, host)
except ValueError as e:
raise ValueError(f"Dingo failed to connect: {e}")
self._text_key = text_key
self._client = dingo_client
if (
index_name is not None
and index_name not in dingo_client.get_index()
and index_name.upper() not in dingo_client.get_index()
):
if self_id is True:
dingo_client.create_index(
index_name, dimension=dimension, auto_id=False
)
else:
dingo_client.create_index(index_name, dimension=dimension)
self._index_name = index_name
self._embedding = embedding
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
text_key: str = "text",
batch_size: int = 500,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
# Embed and create the documents
ids = ids or [str(uuid.uuid1().int)[:13] for _ in texts]
metadatas_list = []
texts = list(texts)
embeds = self._embedding.embed_documents(texts)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
metadata[self._text_key] = text
metadatas_list.append(metadata)
# upsert to Dingo
for i in range(0, len(list(texts)), batch_size):
j = i + batch_size
add_res = self._client.vector_add(
self._index_name, metadatas_list[i:j], embeds[i:j], ids[i:j]
)
if not add_res:
raise Exception("vector add fail")
return ids
def similarity_search(
self,
query: str,
k: int = 4,
search_params: Optional[dict] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(
query, k=k, search_params=search_params
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
search_params: Optional[dict] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
"""
docs = []
query_obj = self._embedding.embed_query(query)
results = self._client.vector_search(
self._index_name, xq=query_obj, top_k=k, search_params=search_params
)
if not results:
return []
for res in results[0]["vectorWithDistances"]:
metadatas = res["scalarData"]
id = res["id"]
score = res["distance"]
text = metadatas[self._text_key]["fields"][0]["data"]
metadata = {"id": id, "text": text, "score": score}
for meta_key in metadatas.keys():
metadata[meta_key] = metadatas[meta_key]["fields"][0]["data"]
docs.append((Document(page_content=text, metadata=metadata), score))
return docs
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
search_params: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
results = self._client.vector_search(
self._index_name, [embedding], search_params=search_params, top_k=k
)
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
[
item["vector"]["floatValues"]
for item in results[0]["vectorWithDistances"]
],
k=k,
lambda_mult=lambda_mult,
)
selected = []
for i in mmr_selected:
meta_data = {}
for k, v in results[0]["vectorWithDistances"][i]["scalarData"].items():
meta_data.update({str(k): v["fields"][0]["data"]})
selected.append(meta_data)
return [
Document(page_content=metadata.pop(self._text_key), metadata=metadata)
for metadata in selected
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
search_params: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult, search_params
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
text_key: str = "text",
index_name: Optional[str] = None,
dimension: int = 1024,
client: Any = None,
host: List[str] = ["172.20.31.10:13000"],
user: str = "root",
password: str = "123123",
batch_size: int = 500,
**kwargs: Any,
) -> Dingo:
"""Construct Dingo wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Dingo index
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Dingo
from langchain.embeddings import OpenAIEmbeddings
import dingodb
sss
embeddings = OpenAIEmbeddings()
dingo = Dingo.from_texts(
texts,
embeddings,
index_name="langchain-demo"
)
"""
try:
import dingodb
except ImportError:
raise ImportError(
"Could not import dingo python package. "
"Please install it with `pip install dingodb`."
)
if client is not None:
dingo_client = client
else:
try:
# connect to dingo db
dingo_client = dingodb.DingoDB(user, password, host)
except ValueError as e:
raise ValueError(f"Dingo failed to connect: {e}")
if kwargs is not None and kwargs.get("self_id") is True:
if (
index_name is not None
and index_name not in dingo_client.get_index()
and index_name.upper() not in dingo_client.get_index()
):
dingo_client.create_index(
index_name, dimension=dimension, auto_id=False
)
else:
if (
index_name is not None
and index_name not in dingo_client.get_index()
and index_name.upper() not in dingo_client.get_index()
):
dingo_client.create_index(index_name, dimension=dimension)
# Embed and create the documents
ids = ids or [str(uuid.uuid1().int)[:13] for _ in texts]
metadatas_list = []
texts = list(texts)
embeds = embedding.embed_documents(texts)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
metadata[text_key] = text
metadatas_list.append(metadata)
# upsert to Dingo
for i in range(0, len(list(texts)), batch_size):
j = i + batch_size
add_res = dingo_client.vector_add(
index_name, metadatas_list[i:j], embeds[i:j], ids[i:j]
)
if not add_res:
raise Exception("vector add fail")
return cls(embedding, text_key, client=dingo_client, index_name=index_name)
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Any:
"""Delete by vector IDs or filter.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
return self._client.vector_delete(self._index_name, ids=ids)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~chat_models~azureml_endpoint.py | import json
from typing import Any, Dict, List, Optional, cast
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import SimpleChatModel
from langchain.llms.azureml_endpoint import AzureMLEndpointClient, ContentFormatterBase
from langchain.pydantic_v1 import SecretStr, validator
from langchain.schema.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain.utils import convert_to_secret_str, get_from_dict_or_env
class LlamaContentFormatter(ContentFormatterBase):
"""Content formatter for `LLaMA`."""
SUPPORTED_ROLES: List[str] = ["user", "assistant", "system"]
@staticmethod
def _convert_message_to_dict(message: BaseMessage) -> Dict:
"""Converts message to a dict according to role"""
content = cast(str, message.content)
if isinstance(message, HumanMessage):
return {
"role": "user",
"content": ContentFormatterBase.escape_special_characters(content),
}
elif isinstance(message, AIMessage):
return {
"role": "assistant",
"content": ContentFormatterBase.escape_special_characters(content),
}
elif isinstance(message, SystemMessage):
return {
"role": "system",
"content": ContentFormatterBase.escape_special_characters(content),
}
elif (
isinstance(message, ChatMessage)
and message.role in LlamaContentFormatter.SUPPORTED_ROLES
):
return {
"role": message.role,
"content": ContentFormatterBase.escape_special_characters(content),
}
else:
supported = ",".join(
[role for role in LlamaContentFormatter.SUPPORTED_ROLES]
)
raise ValueError(
f"""Received unsupported role.
Supported roles for the LLaMa Foundation Model: {supported}"""
)
def _format_request_payload(
self, messages: List[BaseMessage], model_kwargs: Dict
) -> bytes:
chat_messages = [
LlamaContentFormatter._convert_message_to_dict(message)
for message in messages
]
prompt = json.dumps(
{"input_data": {"input_string": chat_messages, "parameters": model_kwargs}}
)
return self.format_request_payload(prompt=prompt, model_kwargs=model_kwargs)
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
"""Formats the request according to the chosen api"""
return str.encode(prompt)
def format_response_payload(self, output: bytes) -> str:
"""Formats response"""
return json.loads(output)["output"]
class AzureMLChatOnlineEndpoint(SimpleChatModel):
"""`AzureML` Chat models API.
Example:
.. code-block:: python
azure_chat = AzureMLChatOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
endpoint_api_key="my-api-key",
content_formatter=content_formatter,
)
"""
endpoint_url: str = ""
"""URL of pre-existing Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_URL`."""
endpoint_api_key: SecretStr = convert_to_secret_str("")
"""Authentication Key for Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_API_KEY`."""
http_client: Any = None #: :meta private:
content_formatter: Any = None
"""The content formatter that provides an input and output
transform function to handle formats between the LLM and
the endpoint"""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
@validator("http_client", always=True, allow_reuse=True)
@classmethod
def validate_client(cls, field_value: Any, values: Dict) -> AzureMLEndpointClient:
"""Validate that api key and python package exist in environment."""
values["endpoint_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "endpoint_api_key", "AZUREML_ENDPOINT_API_KEY")
)
endpoint_url = get_from_dict_or_env(
values, "endpoint_url", "AZUREML_ENDPOINT_URL"
)
http_client = AzureMLEndpointClient(
endpoint_url, values["endpoint_api_key"].get_secret_value()
)
return http_client
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azureml_chat_endpoint"
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to an AzureML Managed Online endpoint.
Args:
messages: The messages in the conversation with the chat model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = azureml_model("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
request_payload = self.content_formatter._format_request_payload(
messages, _model_kwargs
)
response_payload = self.http_client.call(request_payload, **kwargs)
generated_text = self.content_formatter.format_response_payload(
response_payload
)
return generated_text
| [
"input_string",
"parameters",
"input_data"
] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~tools~render.py | """Different methods for rendering Tools to be passed to LLMs.
Depending on the LLM you are using and the prompting strategy you are using,
you may want Tools to be rendered in a different way.
This module contains various ways to render tools.
"""
from typing import List
from langchain.tools.base import BaseTool
from langchain.utils.openai_functions import (
FunctionDescription,
ToolDescription,
convert_pydantic_to_openai_function,
)
def render_text_description(tools: List[BaseTool]) -> str:
"""Render the tool name and description in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search
calculator: This tool is used for math
"""
return "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
def render_text_description_and_args(tools: List[BaseTool]) -> str:
"""Render the tool name, description, and args in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, \
args: {"expression": {"type": "string"}}
"""
tool_strings = []
for tool in tools:
args_schema = str(tool.args)
tool_strings.append(f"{tool.name}: {tool.description}, args: {args_schema}")
return "\n".join(tool_strings)
def format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
"""Format tool into the OpenAI function API."""
if tool.args_schema:
return convert_pydantic_to_openai_function(
tool.args_schema, name=tool.name, description=tool.description
)
else:
return {
"name": tool.name,
"description": tool.description,
"parameters": {
# This is a hack to get around the fact that some tools
# do not expose an args_schema, and expect an argument
# which is a string.
# And Open AI does not support an array type for the
# parameters.
"properties": {
"__arg1": {"title": "__arg1", "type": "string"},
},
"required": ["__arg1"],
"type": "object",
},
}
def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription:
"""Format tool into the OpenAI function API."""
function = format_tool_to_openai_function(tool)
return {"type": "function", "function": function}
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~chat_models~konko.py | """KonkoAI chat wrapper."""
from __future__ import annotations
import logging
import os
from typing import (
Any,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
)
import requests
from langchain.adapters.openai import convert_dict_to_message, convert_message_to_dict
from langchain.callbacks.manager import (
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel, _generate_from_stream
from langchain.chat_models.openai import _convert_delta_to_message_chunk
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import ChatGeneration, ChatResult
from langchain.schema.messages import AIMessageChunk, BaseMessage
from langchain.schema.output import ChatGenerationChunk
from langchain.utils import get_from_dict_or_env
DEFAULT_API_BASE = "https://api.konko.ai/v1"
DEFAULT_MODEL = "meta-llama/Llama-2-13b-chat-hf"
logger = logging.getLogger(__name__)
class ChatKonko(BaseChatModel):
"""`ChatKonko` Chat large language models API.
To use, you should have the ``konko`` python package installed, and the
environment variable ``KONKO_API_KEY`` and ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the konko.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chat_models import ChatKonko
llm = ChatKonko(model="meta-llama/Llama-2-13b-chat-hf")
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"konko_api_key": "KONKO_API_KEY", "openai_api_key": "OPENAI_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
client: Any = None #: :meta private:
model: str = Field(default=DEFAULT_MODEL, alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
konko_api_key: Optional[str] = None
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to Konko completion API."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: int = 20
"""Maximum number of tokens to generate."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["konko_api_key"] = get_from_dict_or_env(
values, "konko_api_key", "KONKO_API_KEY"
)
try:
import konko
except ImportError:
raise ValueError(
"Could not import konko python package. "
"Please install it with `pip install konko`."
)
try:
values["client"] = konko.ChatCompletion
except AttributeError:
raise ValueError(
"`konko` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the konko package. Try upgrading it "
"with `pip install --upgrade konko`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Konko API."""
return {
"model": self.model,
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
@staticmethod
def get_available_models(
konko_api_key: Optional[str] = None,
openai_api_key: Optional[str] = None,
konko_api_base: str = DEFAULT_API_BASE,
) -> Set[str]:
"""Get available models from Konko API."""
# Try to retrieve the OpenAI API key if it's not passed as an argument
if not openai_api_key:
try:
openai_api_key = os.environ["OPENAI_API_KEY"]
except KeyError:
pass # It's okay if it's not set, we just won't use it
# Try to retrieve the Konko API key if it's not passed as an argument
if not konko_api_key:
try:
konko_api_key = os.environ["KONKO_API_KEY"]
except KeyError:
raise ValueError(
"Konko API key must be passed as keyword argument or "
"set in environment variable KONKO_API_KEY."
)
models_url = f"{konko_api_base}/models"
headers = {
"Authorization": f"Bearer {konko_api_key}",
}
if openai_api_key:
headers["X-OpenAI-Api-Key"] = openai_api_key
models_response = requests.get(models_url, headers=headers)
if models_response.status_code != 200:
raise ValueError(
f"Error getting models from {models_url}: "
f"{models_response.status_code}"
)
return {model["id"] for model in models_response.json()["data"]}
def completion_with_retry(
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
) -> Any:
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
):
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return _generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = convert_dict_to_message(res["message"])
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.get("finish_reason")),
)
generations.append(gen)
token_usage = response.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model}, **self._default_params}
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the parameters used for the konko client."""
return {**self._default_params}
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
return {
"model": self.model,
**super()._get_invocation_params(stop=stop),
**self._default_params,
**kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "konko-chat"
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~vectorstores~momento_vector_index.py | from typing import (
TYPE_CHECKING,
Any,
Iterable,
List,
Optional,
Tuple,
Type,
TypeVar,
cast,
)
from uuid import uuid4
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_env
from langchain.vectorstores.utils import DistanceStrategy
VST = TypeVar("VST", bound="VectorStore")
if TYPE_CHECKING:
from momento import PreviewVectorIndexClient
class MomentoVectorIndex(VectorStore):
"""`Momento Vector Index` (MVI) vector store.
Momento Vector Index is a serverless vector index that can be used to store and
search vectors. To use you should have the ``momento`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import MomentoVectorIndex
from momento import (
CredentialProvider,
PreviewVectorIndexClient,
VectorIndexConfigurations,
)
vectorstore = MomentoVectorIndex(
embedding=OpenAIEmbeddings(),
client=PreviewVectorIndexClient(
VectorIndexConfigurations.Default.latest(),
credential_provider=CredentialProvider.from_environment_variable(
"MOMENTO_API_KEY"
),
),
index_name="my-index",
)
"""
def __init__(
self,
embedding: Embeddings,
client: "PreviewVectorIndexClient",
index_name: str = "default",
distance_strategy: DistanceStrategy = DistanceStrategy.COSINE,
text_field: str = "text",
ensure_index_exists: bool = True,
**kwargs: Any,
):
"""Initialize a Vector Store backed by Momento Vector Index.
Args:
embedding (Embeddings): The embedding function to use.
configuration (VectorIndexConfiguration): The configuration to initialize
the Vector Index with.
credential_provider (CredentialProvider): The credential provider to
authenticate the Vector Index with.
index_name (str, optional): The name of the index to store the documents in.
Defaults to "default".
distance_strategy (DistanceStrategy, optional): The distance strategy to
use. Defaults to DistanceStrategy.COSINE. If you select
DistanceStrategy.EUCLIDEAN_DISTANCE, Momento uses the squared
Euclidean distance.
text_field (str, optional): The name of the metadata field to store the
original text in. Defaults to "text".
ensure_index_exists (bool, optional): Whether to ensure that the index
exists before adding documents to it. Defaults to True.
"""
try:
from momento import PreviewVectorIndexClient
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
self._client: PreviewVectorIndexClient = client
self._embedding = embedding
self.index_name = index_name
self.__validate_distance_strategy(distance_strategy)
self.distance_strategy = distance_strategy
self.text_field = text_field
self._ensure_index_exists = ensure_index_exists
@staticmethod
def __validate_distance_strategy(distance_strategy: DistanceStrategy) -> None:
if distance_strategy not in [
DistanceStrategy.COSINE,
DistanceStrategy.MAX_INNER_PRODUCT,
DistanceStrategy.MAX_INNER_PRODUCT,
]:
raise ValueError(f"Distance strategy {distance_strategy} not implemented.")
@property
def embeddings(self) -> Embeddings:
return self._embedding
def _create_index_if_not_exists(self, num_dimensions: int) -> bool:
"""Create index if it does not exist."""
from momento.requests.vector_index import SimilarityMetric
from momento.responses.vector_index import CreateIndex
similarity_metric = None
if self.distance_strategy == DistanceStrategy.COSINE:
similarity_metric = SimilarityMetric.COSINE_SIMILARITY
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
similarity_metric = SimilarityMetric.INNER_PRODUCT
elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
similarity_metric = SimilarityMetric.EUCLIDEAN_SIMILARITY
else:
raise ValueError(
f"Distance strategy {self.distance_strategy} not implemented."
)
response = self._client.create_index(
self.index_name, num_dimensions, similarity_metric
)
if isinstance(response, CreateIndex.Success):
return True
elif isinstance(response, CreateIndex.IndexAlreadyExists):
return False
elif isinstance(response, CreateIndex.Error):
raise response.inner_exception
else:
raise Exception(f"Unexpected response: {response}")
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Iterable of strings to add to the vectorstore.
metadatas (Optional[List[dict]]): Optional list of metadatas associated with
the texts.
kwargs (Any): Other optional parameters. Specifically:
- ids (List[str], optional): List of ids to use for the texts.
Defaults to None, in which case uuids are generated.
Returns:
List[str]: List of ids from adding the texts into the vectorstore.
"""
from momento.requests.vector_index import Item
from momento.responses.vector_index import UpsertItemBatch
texts = list(texts)
if len(texts) == 0:
return []
if metadatas is not None:
for metadata, text in zip(metadatas, texts):
metadata[self.text_field] = text
else:
metadatas = [{self.text_field: text} for text in texts]
try:
embeddings = self._embedding.embed_documents(texts)
except NotImplementedError:
embeddings = [self._embedding.embed_query(x) for x in texts]
# Create index if it does not exist.
# We assume that if it does exist, then it was created with the desired number
# of dimensions and similarity metric.
if self._ensure_index_exists:
self._create_index_if_not_exists(len(embeddings[0]))
if "ids" in kwargs:
ids = kwargs["ids"]
if len(ids) != len(embeddings):
raise ValueError("Number of ids must match number of texts")
else:
ids = [str(uuid4()) for _ in range(len(embeddings))]
batch_size = 128
for i in range(0, len(embeddings), batch_size):
start = i
end = min(i + batch_size, len(embeddings))
items = [
Item(id=id, vector=vector, metadata=metadata)
for id, vector, metadata in zip(
ids[start:end],
embeddings[start:end],
metadatas[start:end],
)
]
response = self._client.upsert_item_batch(self.index_name, items)
if isinstance(response, UpsertItemBatch.Success):
pass
elif isinstance(response, UpsertItemBatch.Error):
raise response.inner_exception
else:
raise Exception(f"Unexpected response: {response}")
return ids
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete by vector ID.
Args:
ids (List[str]): List of ids to delete.
kwargs (Any): Other optional parameters (unused)
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
from momento.responses.vector_index import DeleteItemBatch
if ids is None:
return True
response = self._client.delete_item_batch(self.index_name, ids)
return isinstance(response, DeleteItemBatch.Success)
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Search for similar documents to the query string.
Args:
query (str): The query string to search for.
k (int, optional): The number of results to return. Defaults to 4.
Returns:
List[Document]: A list of documents that are similar to the query.
"""
res = self.similarity_search_with_score(query=query, k=k, **kwargs)
return [doc for doc, _ in res]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Search for similar documents to the query string.
Args:
query (str): The query string to search for.
k (int, optional): The number of results to return. Defaults to 4.
kwargs (Any): Vector Store specific search parameters. The following are
forwarded to the Momento Vector Index:
- top_k (int, optional): The number of results to return.
Returns:
List[Tuple[Document, float]]: A list of tuples of the form
(Document, score).
"""
embedding = self._embedding.embed_query(query)
results = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, **kwargs
)
return results
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Search for similar documents to the query vector.
Args:
embedding (List[float]): The query vector to search for.
k (int, optional): The number of results to return. Defaults to 4.
kwargs (Any): Vector Store specific search parameters. The following are
forwarded to the Momento Vector Index:
- top_k (int, optional): The number of results to return.
Returns:
List[Tuple[Document, float]]: A list of tuples of the form
(Document, score).
"""
from momento.requests.vector_index import ALL_METADATA
from momento.responses.vector_index import Search
if "top_k" in kwargs:
k = kwargs["k"]
response = self._client.search(
self.index_name, embedding, top_k=k, metadata_fields=ALL_METADATA
)
if not isinstance(response, Search.Success):
return []
results = []
for hit in response.hits:
text = cast(str, hit.metadata.pop(self.text_field))
doc = Document(page_content=text, metadata=hit.metadata)
pair = (doc, hit.score)
results.append(pair)
return results
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Search for similar documents to the query vector.
Args:
embedding (List[float]): The query vector to search for.
k (int, optional): The number of results to return. Defaults to 4.
Returns:
List[Document]: A list of documents that are similar to the query.
"""
results = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, **kwargs
)
return [doc for doc, _ in results]
@classmethod
def from_texts(
cls: Type[VST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> VST:
"""Return the Vector Store initialized from texts and embeddings.
Args:
cls (Type[VST]): The Vector Store class to use to initialize
the Vector Store.
texts (List[str]): The texts to initialize the Vector Store with.
embedding (Embeddings): The embedding function to use.
metadatas (Optional[List[dict]], optional): The metadata associated with
the texts. Defaults to None.
kwargs (Any): Vector Store specific parameters. The following are forwarded
to the Vector Store constructor and required:
- index_name (str, optional): The name of the index to store the documents
in. Defaults to "default".
- text_field (str, optional): The name of the metadata field to store the
original text in. Defaults to "text".
- distance_strategy (DistanceStrategy, optional): The distance strategy to
use. Defaults to DistanceStrategy.COSINE. If you select
DistanceStrategy.EUCLIDEAN_DISTANCE, Momento uses the squared
Euclidean distance.
- ensure_index_exists (bool, optional): Whether to ensure that the index
exists before adding documents to it. Defaults to True.
Additionally you can either pass in a client or an API key
- client (PreviewVectorIndexClient): The Momento Vector Index client to use.
- api_key (Optional[str]): The configuration to use to initialize
the Vector Index with. Defaults to None. If None, the configuration
is initialized from the environment variable `MOMENTO_API_KEY`.
Returns:
VST: Momento Vector Index vector store initialized from texts and
embeddings.
"""
from momento import (
CredentialProvider,
PreviewVectorIndexClient,
VectorIndexConfigurations,
)
if "client" in kwargs:
client = kwargs.pop("client")
else:
supplied_api_key = kwargs.pop("api_key", None)
api_key = supplied_api_key or get_from_env("api_key", "MOMENTO_API_KEY")
client = PreviewVectorIndexClient(
configuration=VectorIndexConfigurations.Default.latest(),
credential_provider=CredentialProvider.from_string(api_key),
)
vector_db = cls(embedding=embedding, client=client, **kwargs) # type: ignore
vector_db.add_texts(texts=texts, metadatas=metadatas, **kwargs)
return vector_db
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~llms~ollama.py | import json
from typing import Any, Dict, Iterator, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import BaseLLM
from langchain.pydantic_v1 import Extra
from langchain.schema import LLMResult
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.output import GenerationChunk
def _stream_response_to_generation_chunk(
stream_response: str,
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
parsed_response = json.loads(stream_response)
generation_info = parsed_response if parsed_response.get("done") is True else None
return GenerationChunk(
text=parsed_response.get("response", ""), generation_info=generation_info
)
class _OllamaCommon(BaseLanguageModel):
base_url: str = "http://localhost:11434"
"""Base url the model is hosted under."""
model: str = "llama2"
"""Model name to use."""
mirostat: Optional[int] = None
"""Enable Mirostat sampling for controlling perplexity.
(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"""
mirostat_eta: Optional[float] = None
"""Influences how quickly the algorithm responds to feedback
from the generated text. A lower learning rate will result in
slower adjustments, while a higher learning rate will make
the algorithm more responsive. (Default: 0.1)"""
mirostat_tau: Optional[float] = None
"""Controls the balance between coherence and diversity
of the output. A lower value will result in more focused and
coherent text. (Default: 5.0)"""
num_ctx: Optional[int] = None
"""Sets the size of the context window used to generate the
next token. (Default: 2048) """
num_gpu: Optional[int] = None
"""The number of GPUs to use. On macOS it defaults to 1 to
enable metal support, 0 to disable."""
num_thread: Optional[int] = None
"""Sets the number of threads to use during computation.
By default, Ollama will detect this for optimal performance.
It is recommended to set this value to the number of physical
CPU cores your system has (as opposed to the logical number of cores)."""
repeat_last_n: Optional[int] = None
"""Sets how far back for the model to look back to prevent
repetition. (Default: 64, 0 = disabled, -1 = num_ctx)"""
repeat_penalty: Optional[float] = None
"""Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)
will penalize repetitions more strongly, while a lower value (e.g., 0.9)
will be more lenient. (Default: 1.1)"""
temperature: Optional[float] = None
"""The temperature of the model. Increasing the temperature will
make the model answer more creatively. (Default: 0.8)"""
stop: Optional[List[str]] = None
"""Sets the stop tokens to use."""
tfs_z: Optional[float] = None
"""Tail free sampling is used to reduce the impact of less probable
tokens from the output. A higher value (e.g., 2.0) will reduce the
impact more, while a value of 1.0 disables this setting. (default: 1)"""
top_k: Optional[int] = None
"""Reduces the probability of generating nonsense. A higher value (e.g. 100)
will give more diverse answers, while a lower value (e.g. 10)
will be more conservative. (Default: 40)"""
top_p: Optional[int] = None
"""Works together with top-k. A higher value (e.g., 0.95) will lead
to more diverse text, while a lower value (e.g., 0.5) will
generate more focused and conservative text. (Default: 0.9)"""
system: Optional[str] = None
"""system prompt (overrides what is defined in the Modelfile)"""
template: Optional[str] = None
"""full prompt or prompt template (overrides what is defined in the Modelfile)"""
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Ollama."""
return {
"model": self.model,
"options": {
"mirostat": self.mirostat,
"mirostat_eta": self.mirostat_eta,
"mirostat_tau": self.mirostat_tau,
"num_ctx": self.num_ctx,
"num_gpu": self.num_gpu,
"num_thread": self.num_thread,
"repeat_last_n": self.repeat_last_n,
"repeat_penalty": self.repeat_penalty,
"temperature": self.temperature,
"stop": self.stop,
"tfs_z": self.tfs_z,
"top_k": self.top_k,
"top_p": self.top_p,
},
"system": self.system,
"template": self.template,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
def _create_stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Iterator[str]:
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
stop = self.stop
elif stop is None:
stop = []
params = self._default_params
if "model" in kwargs:
params["model"] = kwargs["model"]
if "options" in kwargs:
params["options"] = kwargs["options"]
else:
params["options"] = {
**params["options"],
"stop": stop,
**kwargs,
}
response = requests.post(
url=f"{self.base_url}/api/generate/",
headers={"Content-Type": "application/json"},
json={"prompt": prompt, **params},
stream=True,
)
response.encoding = "utf-8"
if response.status_code != 200:
optional_detail = response.json().get("error")
raise ValueError(
f"Ollama call failed with status code {response.status_code}."
f" Details: {optional_detail}"
)
return response.iter_lines(decode_unicode=True)
def _stream_with_aggregation(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
verbose: bool = False,
**kwargs: Any,
) -> GenerationChunk:
final_chunk: Optional[GenerationChunk] = None
for stream_resp in self._create_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_generation_chunk(stream_resp)
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=verbose,
)
if final_chunk is None:
raise ValueError("No data received from Ollama stream.")
return final_chunk
class Ollama(BaseLLM, _OllamaCommon):
"""Ollama locally runs large language models.
To use, follow the instructions at https://ollama.ai/.
Example:
.. code-block:: python
from langchain.llms import Ollama
ollama = Ollama(model="llama2")
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ollama-llm"
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to Ollama's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ollama("Tell me a joke.")
"""
# TODO: add caching here.
generations = []
for prompt in prompts:
final_chunk = super()._stream_with_aggregation(
prompt,
stop=stop,
run_manager=run_manager,
verbose=self.verbose,
**kwargs,
)
generations.append([final_chunk])
return LLMResult(generations=generations)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
for stream_resp in self._create_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
| [
"None"
] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~chat_models~anyscale.py | """Anyscale Endpoints chat wrapper. Relies heavily on ChatOpenAI."""
from __future__ import annotations
import logging
import os
import sys
from typing import TYPE_CHECKING, Dict, Optional, Set
import requests
from langchain.adapters.openai import convert_message_to_dict
from langchain.chat_models.openai import (
ChatOpenAI,
_import_tiktoken,
)
from langchain.pydantic_v1 import Field, SecretStr, root_validator
from langchain.schema.messages import BaseMessage
from langchain.utils import convert_to_secret_str, get_from_dict_or_env
from langchain.utils.openai import is_openai_v1
if TYPE_CHECKING:
import tiktoken
logger = logging.getLogger(__name__)
DEFAULT_API_BASE = "https://api.endpoints.anyscale.com/v1"
DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf"
class ChatAnyscale(ChatOpenAI):
"""`Anyscale` Chat large language models.
See https://www.anyscale.com/ for information about Anyscale.
To use, you should have the ``openai`` python package installed, and the
environment variable ``ANYSCALE_API_KEY`` set with your API key.
Alternatively, you can use the anyscale_api_key keyword argument.
Any parameters that are valid to be passed to the `openai.create` call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chat_models import ChatAnyscale
chat = ChatAnyscale(model_name="meta-llama/Llama-2-7b-chat-hf")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "anyscale-chat"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"anyscale_api_key": "ANYSCALE_API_KEY"}
anyscale_api_key: SecretStr
"""AnyScale Endpoints API keys."""
model_name: str = Field(default=DEFAULT_MODEL, alias="model")
"""Model name to use."""
anyscale_api_base: str = Field(default=DEFAULT_API_BASE)
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
anyscale_proxy: Optional[str] = None
"""To support explicit proxy for Anyscale."""
available_models: Optional[Set[str]] = None
"""Available models from Anyscale API."""
@staticmethod
def get_available_models(
anyscale_api_key: Optional[str] = None,
anyscale_api_base: str = DEFAULT_API_BASE,
) -> Set[str]:
"""Get available models from Anyscale API."""
try:
anyscale_api_key = anyscale_api_key or os.environ["ANYSCALE_API_KEY"]
except KeyError as e:
raise ValueError(
"Anyscale API key must be passed as keyword argument or "
"set in environment variable ANYSCALE_API_KEY.",
) from e
models_url = f"{anyscale_api_base}/models"
models_response = requests.get(
models_url,
headers={
"Authorization": f"Bearer {anyscale_api_key}",
},
)
if models_response.status_code != 200:
raise ValueError(
f"Error getting models from {models_url}: "
f"{models_response.status_code}",
)
return {model["id"] for model in models_response.json()["data"]}
@root_validator(pre=True)
def validate_environment_override(cls, values: dict) -> dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values,
"anyscale_api_key",
"ANYSCALE_API_KEY",
)
values["anyscale_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"anyscale_api_key",
"ANYSCALE_API_KEY",
)
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"anyscale_api_base",
"ANYSCALE_API_BASE",
default=DEFAULT_API_BASE,
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"anyscale_proxy",
"ANYSCALE_PROXY",
default="",
)
try:
import openai
except ImportError as e:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`.",
) from e
try:
if is_openai_v1():
client_params = {
"api_key": values["openai_api_key"],
"base_url": values["openai_api_base"],
# To do: future support
# "organization": values["openai_organization"],
# "timeout": values["request_timeout"],
# "max_retries": values["max_retries"],
# "default_headers": values["default_headers"],
# "default_query": values["default_query"],
# "http_client": values["http_client"],
}
values["client"] = openai.OpenAI(**client_params).chat.completions
else:
values["client"] = openai.ChatCompletion
except AttributeError as exc:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`.",
) from exc
if "model_name" not in values.keys():
values["model_name"] = DEFAULT_MODEL
model_name = values["model_name"]
available_models = cls.get_available_models(
values["openai_api_key"],
values["openai_api_base"],
)
if model_name not in available_models:
raise ValueError(
f"Model name {model_name} not found in available models: "
f"{available_models}.",
)
values["available_models"] = available_models
return values
def _get_encoding_model(self) -> tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken_.encoding_for_model("gpt-3.5-turbo-0301")
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken_.get_encoding(model)
return model, encoding
def get_num_tokens_from_messages(self, messages: list[BaseMessage]) -> int:
"""Calculate num tokens with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
tokens_per_message = 3
tokens_per_name = 1
num_tokens = 0
messages_dict = [convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
# Cast str(value) in case the message value is not a string
# This occurs with function messages
num_tokens += len(encoding.encode(str(value)))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~agents~load_tools.py | # flake8: noqa
"""Tools provide access to various resources and services.
LangChain has a large ecosystem of integrations with various external resources
like local and remote file systems, APIs and databases.
These integrations allow developers to create versatile applications that combine the
power of LLMs with the ability to access, interact with and manipulate external
resources.
When developing an application, developers should inspect the capabilities and
permissions of the tools that underlie the given agent toolkit, and determine
whether permissions of the given toolkit are appropriate for the application.
See [Security](https://python.langchain.com/docs/security) for more information.
"""
import warnings
from typing import Any, Dict, List, Optional, Callable, Tuple
from mypy_extensions import Arg, KwArg
from langchain.agents.tools import Tool
from langchain.schema.language_model import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import Callbacks
from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs
from langchain.chains.api.base import APIChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.utilities.dalle_image_generator import DallEAPIWrapper
from langchain.utilities.requests import TextRequestsWrapper
from langchain.tools.arxiv.tool import ArxivQueryRun
from langchain.tools.golden_query.tool import GoldenQueryRun
from langchain.tools.pubmed.tool import PubmedQueryRun
from langchain.tools.base import BaseTool
from langchain.tools.bing_search.tool import BingSearchRun
from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun
from langchain.tools.google_cloud.texttospeech import GoogleCloudTextToSpeechTool
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
from langchain.tools.google_scholar.tool import GoogleScholarQueryRun
from langchain.tools.metaphor_search.tool import MetaphorSearchResults
from langchain.tools.google_serper.tool import GoogleSerperResults, GoogleSerperRun
from langchain.tools.searchapi.tool import SearchAPIResults, SearchAPIRun
from langchain.tools.graphql.tool import BaseGraphQLTool
from langchain.tools.human.tool import HumanInputRun
from langchain.tools.requests.tool import (
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool
from langchain.tools.scenexplain.tool import SceneXplainTool
from langchain.tools.searx_search.tool import SearxSearchResults, SearxSearchRun
from langchain.tools.shell.tool import ShellTool
from langchain.tools.sleep.tool import SleepTool
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.tools.openweathermap.tool import OpenWeatherMapQueryRun
from langchain.tools.dataforseo_api_search import DataForSeoAPISearchRun
from langchain.tools.dataforseo_api_search import DataForSeoAPISearchResults
from langchain.tools.memorize.tool import Memorize
from langchain.utilities.arxiv import ArxivAPIWrapper
from langchain.utilities.golden_query import GoldenQueryAPIWrapper
from langchain.utilities.pubmed import PubMedAPIWrapper
from langchain.utilities.bing_search import BingSearchAPIWrapper
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
from langchain.utilities.google_search import GoogleSearchAPIWrapper
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
from langchain.utilities.google_scholar import GoogleScholarAPIWrapper
from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper
from langchain.utilities.awslambda import LambdaWrapper
from langchain.utilities.graphql import GraphQLAPIWrapper
from langchain.utilities.searchapi import SearchApiAPIWrapper
from langchain.utilities.searx_search import SearxSearchWrapper
from langchain.utilities.serpapi import SerpAPIWrapper
from langchain.utilities.twilio import TwilioAPIWrapper
from langchain.utilities.wikipedia import WikipediaAPIWrapper
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
def _get_python_repl() -> BaseTool:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def _get_tools_requests_get() -> BaseTool:
return RequestsGetTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_post() -> BaseTool:
return RequestsPostTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_patch() -> BaseTool:
return RequestsPatchTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_put() -> BaseTool:
return RequestsPutTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_delete() -> BaseTool:
return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper())
def _get_terminal() -> BaseTool:
return ShellTool()
def _get_sleep() -> BaseTool:
return SleepTool()
_BASE_TOOLS: Dict[str, Callable[[], BaseTool]] = {
"python_repl": _get_python_repl,
"requests": _get_tools_requests_get, # preserved for backwards compatibility
"requests_get": _get_tools_requests_get,
"requests_post": _get_tools_requests_post,
"requests_patch": _get_tools_requests_patch,
"requests_put": _get_tools_requests_put,
"requests_delete": _get_tools_requests_delete,
"terminal": _get_terminal,
"sleep": _get_sleep,
}
def _get_llm_math(llm: BaseLanguageModel) -> BaseTool:
return Tool(
name="Calculator",
description="Useful for when you need to answer questions about math.",
func=LLMMathChain.from_llm(llm=llm).run,
coroutine=LLMMathChain.from_llm(llm=llm).arun,
)
def _get_open_meteo_api(llm: BaseLanguageModel) -> BaseTool:
chain = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS)
return Tool(
name="Open-Meteo-API",
description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
_LLM_TOOLS: Dict[str, Callable[[BaseLanguageModel], BaseTool]] = {
"llm-math": _get_llm_math,
"open-meteo-api": _get_open_meteo_api,
}
def _get_news_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
news_api_key = kwargs["news_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm, news_docs.NEWS_DOCS, headers={"X-Api-Key": news_api_key}
)
return Tool(
name="News-API",
description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_tmdb_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
tmdb_bearer_token = kwargs["tmdb_bearer_token"]
chain = APIChain.from_llm_and_api_docs(
llm,
tmdb_docs.TMDB_DOCS,
headers={"Authorization": f"Bearer {tmdb_bearer_token}"},
)
return Tool(
name="TMDB-API",
description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_podcast_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
listen_api_key = kwargs["listen_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm,
podcast_docs.PODCAST_DOCS,
headers={"X-ListenAPI-Key": listen_api_key},
)
return Tool(
name="Podcast-API",
description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_lambda_api(**kwargs: Any) -> BaseTool:
return Tool(
name=kwargs["awslambda_tool_name"],
description=kwargs["awslambda_tool_description"],
func=LambdaWrapper(**kwargs).run,
)
def _get_wolfram_alpha(**kwargs: Any) -> BaseTool:
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
def _get_google_search(**kwargs: Any) -> BaseTool:
return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_wikipedia(**kwargs: Any) -> BaseTool:
return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs))
def _get_arxiv(**kwargs: Any) -> BaseTool:
return ArxivQueryRun(api_wrapper=ArxivAPIWrapper(**kwargs))
def _get_golden_query(**kwargs: Any) -> BaseTool:
return GoldenQueryRun(api_wrapper=GoldenQueryAPIWrapper(**kwargs))
def _get_pubmed(**kwargs: Any) -> BaseTool:
return PubmedQueryRun(api_wrapper=PubMedAPIWrapper(**kwargs))
def _get_google_serper(**kwargs: Any) -> BaseTool:
return GoogleSerperRun(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
def _get_google_scholar(**kwargs: Any) -> BaseTool:
return GoogleScholarQueryRun(api_wrapper=GoogleScholarAPIWrapper(**kwargs))
def _get_google_serper_results_json(**kwargs: Any) -> BaseTool:
return GoogleSerperResults(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
def _get_google_search_results_json(**kwargs: Any) -> BaseTool:
return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_searchapi(**kwargs: Any) -> BaseTool:
return SearchAPIRun(api_wrapper=SearchApiAPIWrapper(**kwargs))
def _get_searchapi_results_json(**kwargs: Any) -> BaseTool:
return SearchAPIResults(api_wrapper=SearchApiAPIWrapper(**kwargs))
def _get_serpapi(**kwargs: Any) -> BaseTool:
return Tool(
name="Search",
description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query.",
func=SerpAPIWrapper(**kwargs).run,
coroutine=SerpAPIWrapper(**kwargs).arun,
)
def _get_dalle_image_generator(**kwargs: Any) -> Tool:
return Tool(
"Dall-E-Image-Generator",
DallEAPIWrapper(**kwargs).run,
"A wrapper around OpenAI DALL-E API. Useful for when you need to generate images from a text description. Input should be an image description.",
)
def _get_twilio(**kwargs: Any) -> BaseTool:
return Tool(
name="Text-Message",
description="Useful for when you need to send a text message to a provided phone number.",
func=TwilioAPIWrapper(**kwargs).run,
)
def _get_searx_search(**kwargs: Any) -> BaseTool:
return SearxSearchRun(wrapper=SearxSearchWrapper(**kwargs))
def _get_searx_search_results_json(**kwargs: Any) -> BaseTool:
wrapper_kwargs = {k: v for k, v in kwargs.items() if k != "num_results"}
return SearxSearchResults(wrapper=SearxSearchWrapper(**wrapper_kwargs), **kwargs)
def _get_bing_search(**kwargs: Any) -> BaseTool:
return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
def _get_metaphor_search(**kwargs: Any) -> BaseTool:
return MetaphorSearchResults(api_wrapper=MetaphorSearchAPIWrapper(**kwargs))
def _get_ddg_search(**kwargs: Any) -> BaseTool:
return DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(**kwargs))
def _get_human_tool(**kwargs: Any) -> BaseTool:
return HumanInputRun(**kwargs)
def _get_scenexplain(**kwargs: Any) -> BaseTool:
return SceneXplainTool(**kwargs)
def _get_graphql_tool(**kwargs: Any) -> BaseTool:
graphql_endpoint = kwargs["graphql_endpoint"]
wrapper = GraphQLAPIWrapper(graphql_endpoint=graphql_endpoint)
return BaseGraphQLTool(graphql_wrapper=wrapper)
def _get_openweathermap(**kwargs: Any) -> BaseTool:
return OpenWeatherMapQueryRun(api_wrapper=OpenWeatherMapAPIWrapper(**kwargs))
def _get_dataforseo_api_search(**kwargs: Any) -> BaseTool:
return DataForSeoAPISearchRun(api_wrapper=DataForSeoAPIWrapper(**kwargs))
def _get_dataforseo_api_search_json(**kwargs: Any) -> BaseTool:
return DataForSeoAPISearchResults(api_wrapper=DataForSeoAPIWrapper(**kwargs))
def _get_eleven_labs_text2speech(**kwargs: Any) -> BaseTool:
return ElevenLabsText2SpeechTool(**kwargs)
def _get_memorize(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
return Memorize(llm=llm)
def _get_google_cloud_texttospeech(**kwargs: Any) -> BaseTool:
return GoogleCloudTextToSpeechTool(**kwargs)
_EXTRA_LLM_TOOLS: Dict[
str,
Tuple[Callable[[Arg(BaseLanguageModel, "llm"), KwArg(Any)], BaseTool], List[str]],
] = {
"news-api": (_get_news_api, ["news_api_key"]),
"tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]),
"podcast-api": (_get_podcast_api, ["listen_api_key"]),
"memorize": (_get_memorize, []),
}
_EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = {
"wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]),
"google-search": (_get_google_search, ["google_api_key", "google_cse_id"]),
"google-search-results-json": (
_get_google_search_results_json,
["google_api_key", "google_cse_id", "num_results"],
),
"searx-search-results-json": (
_get_searx_search_results_json,
["searx_host", "engines", "num_results", "aiosession"],
),
"bing-search": (_get_bing_search, ["bing_subscription_key", "bing_search_url"]),
"metaphor-search": (_get_metaphor_search, ["metaphor_api_key"]),
"ddg-search": (_get_ddg_search, []),
"google-serper": (_get_google_serper, ["serper_api_key", "aiosession"]),
"google-scholar": (
_get_google_scholar,
["top_k_results", "hl", "lr", "serp_api_key"],
),
"google-serper-results-json": (
_get_google_serper_results_json,
["serper_api_key", "aiosession"],
),
"searchapi": (_get_searchapi, ["searchapi_api_key", "aiosession"]),
"searchapi-results-json": (
_get_searchapi_results_json,
["searchapi_api_key", "aiosession"],
),
"serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]),
"dalle-image-generator": (_get_dalle_image_generator, ["openai_api_key"]),
"twilio": (_get_twilio, ["account_sid", "auth_token", "from_number"]),
"searx-search": (_get_searx_search, ["searx_host", "engines", "aiosession"]),
"wikipedia": (_get_wikipedia, ["top_k_results", "lang"]),
"arxiv": (
_get_arxiv,
["top_k_results", "load_max_docs", "load_all_available_meta"],
),
"golden-query": (_get_golden_query, ["golden_api_key"]),
"pubmed": (_get_pubmed, ["top_k_results"]),
"human": (_get_human_tool, ["prompt_func", "input_func"]),
"awslambda": (
_get_lambda_api,
["awslambda_tool_name", "awslambda_tool_description", "function_name"],
),
"sceneXplain": (_get_scenexplain, []),
"graphql": (_get_graphql_tool, ["graphql_endpoint"]),
"openweathermap-api": (_get_openweathermap, ["openweathermap_api_key"]),
"dataforseo-api-search": (
_get_dataforseo_api_search,
["api_login", "api_password", "aiosession"],
),
"dataforseo-api-search-json": (
_get_dataforseo_api_search_json,
["api_login", "api_password", "aiosession"],
),
"eleven_labs_text2speech": (_get_eleven_labs_text2speech, ["eleven_api_key"]),
"google_cloud_texttospeech": (_get_google_cloud_texttospeech, []),
}
def _handle_callbacks(
callback_manager: Optional[BaseCallbackManager], callbacks: Callbacks
) -> Callbacks:
if callback_manager is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
if callbacks is not None:
raise ValueError(
"Cannot specify both callback_manager and callbacks arguments."
)
return callback_manager
return callbacks
def load_huggingface_tool(
task_or_repo_id: str,
model_repo_id: Optional[str] = None,
token: Optional[str] = None,
remote: bool = False,
**kwargs: Any,
) -> BaseTool:
"""Loads a tool from the HuggingFace Hub.
Args:
task_or_repo_id: Task or model repo id.
model_repo_id: Optional model repo id.
token: Optional token.
remote: Optional remote. Defaults to False.
**kwargs:
Returns:
A tool.
"""
try:
from transformers import load_tool
except ImportError:
raise ImportError(
"HuggingFace tools require the libraries `transformers>=4.29.0`"
" and `huggingface_hub>=0.14.1` to be installed."
" Please install it with"
" `pip install --upgrade transformers huggingface_hub`."
)
hf_tool = load_tool(
task_or_repo_id,
model_repo_id=model_repo_id,
token=token,
remote=remote,
**kwargs,
)
outputs = hf_tool.outputs
if set(outputs) != {"text"}:
raise NotImplementedError("Multimodal outputs not supported yet.")
inputs = hf_tool.inputs
if set(inputs) != {"text"}:
raise NotImplementedError("Multimodal inputs not supported yet.")
return Tool.from_function(
hf_tool.__call__, name=hf_tool.name, description=hf_tool.description
)
def load_tools(
tool_names: List[str],
llm: Optional[BaseLanguageModel] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> List[BaseTool]:
"""Load tools based on their name.
Tools allow agents to interact with various resources and services like
APIs, databases, file systems, etc.
Please scope the permissions of each tools to the minimum required for the
application.
For example, if an application only needs to read from a database,
the database tool should not be given write permissions. Moreover
consider scoping the permissions to only allow accessing specific
tables and impose user-level quota for limiting resource usage.
Please read the APIs of the individual tools to determine which configuration
they support.
See [Security](https://python.langchain.com/docs/security) for more information.
Args:
tool_names: name of tools to load.
llm: An optional language model, may be needed to initialize certain tools.
callbacks: Optional callback manager or list of callback handlers.
If not provided, default global callback manager will be used.
Returns:
List of tools.
"""
tools = []
callbacks = _handle_callbacks(
callback_manager=kwargs.get("callback_manager"), callbacks=callbacks
)
for name in tool_names:
if name == "requests":
warnings.warn(
"tool name `requests` is deprecated - "
"please use `requests_all` or specify the requests method"
)
if name == "requests_all":
# expand requests into various methods
requests_method_tools = [
_tool for _tool in _BASE_TOOLS if _tool.startswith("requests_")
]
tool_names.extend(requests_method_tools)
elif name in _BASE_TOOLS:
tools.append(_BASE_TOOLS[name]())
elif name in _LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
tool = _LLM_TOOLS[name](llm)
tools.append(tool)
elif name in _EXTRA_LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
_get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name]
missing_keys = set(extra_keys).difference(kwargs)
if missing_keys:
raise ValueError(
f"Tool {name} requires some parameters that were not "
f"provided: {missing_keys}"
)
sub_kwargs = {k: kwargs[k] for k in extra_keys}
tool = _get_llm_tool_func(llm=llm, **sub_kwargs)
tools.append(tool)
elif name in _EXTRA_OPTIONAL_TOOLS:
_get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name]
sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs}
tool = _get_tool_func(**sub_kwargs)
tools.append(tool)
else:
raise ValueError(f"Got unknown tool {name}")
if callbacks is not None:
for tool in tools:
tool.callbacks = callbacks
return tools
def get_all_tool_names() -> List[str]:
"""Get a list of all possible tool names."""
return (
list(_BASE_TOOLS)
+ list(_EXTRA_OPTIONAL_TOOLS)
+ list(_EXTRA_LLM_TOOLS)
+ list(_LLM_TOOLS)
)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~chat_models~minimax.py | """Wrapper around Minimax chat models."""
import logging
from typing import Any, Dict, List, Optional, cast
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.minimax import MinimaxCommon
from langchain.llms.utils import enforce_stop_tokens
from langchain.schema import (
AIMessage,
BaseMessage,
ChatResult,
HumanMessage,
)
logger = logging.getLogger(__name__)
def _parse_message(msg_type: str, text: str) -> Dict:
return {"sender_type": msg_type, "text": text}
def _parse_chat_history(history: List[BaseMessage]) -> List:
"""Parse a sequence of messages into history."""
chat_history = []
for message in history:
content = cast(str, message.content)
if isinstance(message, HumanMessage):
chat_history.append(_parse_message("USER", content))
if isinstance(message, AIMessage):
chat_history.append(_parse_message("BOT", content))
return chat_history
class MiniMaxChat(MinimaxCommon, BaseChatModel):
"""Wrapper around Minimax large language models.
To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and
``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to
the constructor.
Example:
.. code-block:: python
from langchain.chat_models import MiniMaxChat
llm = MiniMaxChat(model_name="abab5-chat")
"""
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
if not messages:
raise ValueError(
"You should provide at least one message to start the chat!"
)
history = _parse_chat_history(messages)
payload = self._default_params
payload["messages"] = history
text = self._client.post(payload)
# This is required since the stop are not enforced by the model parameters
return text if stop is None else enforce_stop_tokens(text, stop)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
raise NotImplementedError(
"""Minimax AI doesn't support async requests at the moment."""
)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~callbacks~infino_callback.py | import time
from typing import Any, Dict, List, Optional, cast
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
from langchain.schema.messages import BaseMessage
def import_infino() -> Any:
"""Import the infino client."""
try:
from infinopy import InfinoClient
except ImportError:
raise ImportError(
"To use the Infino callbacks manager you need to have the"
" `infinopy` python package installed."
"Please install it with `pip install infinopy`"
)
return InfinoClient()
def import_tiktoken() -> Any:
"""Import tiktoken for counting tokens for OpenAI models."""
try:
import tiktoken
except ImportError:
raise ImportError(
"To use the ChatOpenAI model with Infino callback manager, you need to "
"have the `tiktoken` python package installed."
"Please install it with `pip install tiktoken`"
)
return tiktoken
def get_num_tokens(string: str, openai_model_name: str) -> int:
"""Calculate num tokens for OpenAI with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/main
/examples/How_to_count_tokens_with_tiktoken.ipynb
"""
tiktoken = import_tiktoken()
encoding = tiktoken.encoding_for_model(openai_model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
class InfinoCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs to Infino."""
def __init__(
self,
model_id: Optional[str] = None,
model_version: Optional[str] = None,
verbose: bool = False,
) -> None:
# Set Infino client
self.client = import_infino()
self.model_id = model_id
self.model_version = model_version
self.verbose = verbose
self.is_chat_openai_model = False
self.chat_openai_model_name = "gpt-3.5-turbo"
def _send_to_infino(
self,
key: str,
value: Any,
is_ts: bool = True,
) -> None:
"""Send the key-value to Infino.
Parameters:
key (str): the key to send to Infino.
value (Any): the value to send to Infino.
is_ts (bool): if True, the value is part of a time series, else it
is sent as a log message.
"""
payload = {
"date": int(time.time()),
key: value,
"labels": {
"model_id": self.model_id,
"model_version": self.model_version,
},
}
if self.verbose:
print(f"Tracking {key} with Infino: {payload}")
# Append to Infino time series only if is_ts is True, otherwise
# append to Infino log.
if is_ts:
self.client.append_ts(payload)
else:
self.client.append_log(payload)
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> None:
"""Log the prompts to Infino, and set start time and error flag."""
for prompt in prompts:
self._send_to_infino("prompt", prompt, is_ts=False)
# Set the error flag to indicate no error (this will get overridden
# in on_llm_error if an error occurs).
self.error = 0
# Set the start time (so that we can calculate the request
# duration in on_llm_end).
self.start_time = time.time()
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Do nothing when a new token is generated."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Log the latency, error, token usage, and response to Infino."""
# Calculate and track the request latency.
self.end_time = time.time()
duration = self.end_time - self.start_time
self._send_to_infino("latency", duration)
# Track success or error flag.
self._send_to_infino("error", self.error)
# Track prompt response.
for generations in response.generations:
for generation in generations:
self._send_to_infino("prompt_response", generation.text, is_ts=False)
# Track token usage (for non-chat models).
if (response.llm_output is not None) and isinstance(response.llm_output, Dict):
token_usage = response.llm_output["token_usage"]
if token_usage is not None:
prompt_tokens = token_usage["prompt_tokens"]
total_tokens = token_usage["total_tokens"]
completion_tokens = token_usage["completion_tokens"]
self._send_to_infino("prompt_tokens", prompt_tokens)
self._send_to_infino("total_tokens", total_tokens)
self._send_to_infino("completion_tokens", completion_tokens)
# Track completion token usage (for openai chat models).
if self.is_chat_openai_model:
messages = " ".join(
generation.message.content # type: ignore[attr-defined]
for generation in generations
)
completion_tokens = get_num_tokens(
messages, openai_model_name=self.chat_openai_model_name
)
self._send_to_infino("completion_tokens", completion_tokens)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Set the error flag."""
self.error = 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Do nothing when LLM chain starts."""
pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Do nothing when LLM chain ends."""
pass
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Need to log the error."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing when tool starts."""
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing when agent takes a specific action."""
pass
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Do nothing when tool ends."""
pass
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when tool outputs an error."""
pass
def on_text(self, text: str, **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> None:
"""Run when LLM starts running."""
# Currently, for chat models, we only support input prompts for ChatOpenAI.
# Check if this model is a ChatOpenAI model.
values = serialized.get("id")
if values:
for value in values:
if value == "ChatOpenAI":
self.is_chat_openai_model = True
break
# Track prompt tokens for ChatOpenAI model.
if self.is_chat_openai_model:
invocation_params = kwargs.get("invocation_params")
if invocation_params:
model_name = invocation_params.get("model_name")
if model_name:
self.chat_openai_model_name = model_name
prompt_tokens = 0
for message_list in messages:
message_string = " ".join(
cast(str, msg.content) for msg in message_list
)
num_tokens = get_num_tokens(
message_string,
openai_model_name=self.chat_openai_model_name,
)
prompt_tokens += num_tokens
self._send_to_infino("prompt_tokens", prompt_tokens)
if self.verbose:
print(
f"on_chat_model_start: is_chat_openai_model= \
{self.is_chat_openai_model}, \
chat_openai_model_name={self.chat_openai_model_name}"
)
# Send the prompt to infino
prompt = " ".join(
cast(str, msg.content) for sublist in messages for msg in sublist
)
self._send_to_infino("prompt", prompt, is_ts=False)
# Set the error flag to indicate no error (this will get overridden
# in on_llm_error if an error occurs).
self.error = 0
# Set the start time (so that we can calculate the request
# duration in on_llm_end).
self.start_time = time.time()
| [
"0",
" ",
"prompt_tokens"
] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~schema~runnable~retry.py | from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from tenacity import (
AsyncRetrying,
RetryCallState,
RetryError,
Retrying,
retry_if_exception_type,
stop_after_attempt,
wait_exponential_jitter,
)
from langchain.schema.runnable.base import Input, Output, RunnableBindingBase
from langchain.schema.runnable.config import RunnableConfig, patch_config
if TYPE_CHECKING:
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
T = TypeVar("T", CallbackManagerForChainRun, AsyncCallbackManagerForChainRun)
U = TypeVar("U")
class RunnableRetry(RunnableBindingBase[Input, Output]):
"""Retry a Runnable if it fails.
A RunnableRetry helps can be used to add retry logic to any object
that subclasses the base Runnable.
Such retries are especially useful for network calls that may fail
due to transient errors.
The RunnableRetry is implemented as a RunnableBinding. The easiest
way to use it is through the `.with_retry()` method on all Runnables.
Example:
Here's an example that uses a RunnableLambda to raise an exception
.. code-block:: python
import time
def foo(input) -> None:
'''Fake function that raises an exception.'''
raise ValueError("Invoking foo failed. At time {time.time()}")
runnable = RunnableLambda(foo)
runnable_with_retries = runnable.with_retry(
retry_exception_types=(ValueError,), # Retry only on ValueError
wait_exponential_jitter=True, # Add jitter to the exponential backoff
max_attempt_number=2, # Try twice
)
# The method invocation above is equivalent to the longer form below:
runnable_with_retries = RunnableRetry(
bound=runnable,
retry_exception_types=(ValueError,),
max_attempt_number=2,
wait_exponential_jitter=True
)
This logic can be used to retry any Runnable, including a chain of Runnables,
but in general it's best practice to keep the scope of the retry as small as
possible. For example, if you have a chain of Runnables, you should only retry
the Runnable that is likely to fail, not the entire chain.
Example:
.. code-block:: python
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
template = PromptTemplate.from_template("tell me a joke about {topic}.")
model = ChatOpenAI(temperature=0.5)
# Good
chain = template | model.with_retry()
# Bad
chain = template | model
retryable_chain = chain.with_retry()
"""
retry_exception_types: Tuple[Type[BaseException], ...] = (Exception,)
"""The exception types to retry on. By default all exceptions are retried.
In general you should only retry on exceptions that are likely to be
transient, such as network errors.
Good exceptions to retry are all server errors (5xx) and selected client
errors (4xx) such as 429 Too Many Requests.
"""
wait_exponential_jitter: bool = True
"""Whether to add jitter to the exponential backoff."""
max_attempt_number: int = 3
"""The maximum number of attempts to retry the runnable."""
@property
def _kwargs_retrying(self) -> Dict[str, Any]:
kwargs: Dict[str, Any] = dict()
if self.max_attempt_number:
kwargs["stop"] = stop_after_attempt(self.max_attempt_number)
if self.wait_exponential_jitter:
kwargs["wait"] = wait_exponential_jitter()
if self.retry_exception_types:
kwargs["retry"] = retry_if_exception_type(self.retry_exception_types)
return kwargs
def _sync_retrying(self, **kwargs: Any) -> Retrying:
return Retrying(**self._kwargs_retrying, **kwargs)
def _async_retrying(self, **kwargs: Any) -> AsyncRetrying:
return AsyncRetrying(**self._kwargs_retrying, **kwargs)
def _patch_config(
self,
config: RunnableConfig,
run_manager: "T",
retry_state: RetryCallState,
) -> RunnableConfig:
attempt = retry_state.attempt_number
tag = "retry:attempt:{}".format(attempt) if attempt > 1 else None
return patch_config(config, callbacks=run_manager.get_child(tag))
def _patch_config_list(
self,
config: List[RunnableConfig],
run_manager: List["T"],
retry_state: RetryCallState,
) -> List[RunnableConfig]:
return [
self._patch_config(c, rm, retry_state) for c, rm in zip(config, run_manager)
]
def _invoke(
self,
input: Input,
run_manager: "CallbackManagerForChainRun",
config: RunnableConfig,
**kwargs: Any,
) -> Output:
for attempt in self._sync_retrying(reraise=True):
with attempt:
result = super().invoke(
input,
self._patch_config(config, run_manager, attempt.retry_state),
**kwargs,
)
if attempt.retry_state.outcome and not attempt.retry_state.outcome.failed:
attempt.retry_state.set_result(result)
return result
def invoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
return self._call_with_config(self._invoke, input, config, **kwargs)
async def _ainvoke(
self,
input: Input,
run_manager: "AsyncCallbackManagerForChainRun",
config: RunnableConfig,
**kwargs: Any,
) -> Output:
async for attempt in self._async_retrying(reraise=True):
with attempt:
result = await super().ainvoke(
input,
self._patch_config(config, run_manager, attempt.retry_state),
**kwargs,
)
if attempt.retry_state.outcome and not attempt.retry_state.outcome.failed:
attempt.retry_state.set_result(result)
return result
async def ainvoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
return await self._acall_with_config(self._ainvoke, input, config, **kwargs)
def _batch(
self,
inputs: List[Input],
run_manager: List["CallbackManagerForChainRun"],
config: List[RunnableConfig],
**kwargs: Any,
) -> List[Union[Output, Exception]]:
results_map: Dict[int, Output] = {}
def pending(iterable: List[U]) -> List[U]:
return [item for idx, item in enumerate(iterable) if idx not in results_map]
try:
for attempt in self._sync_retrying():
with attempt:
# Get the results of the inputs that have not succeeded yet.
result = super().batch(
pending(inputs),
self._patch_config_list(
pending(config), pending(run_manager), attempt.retry_state
),
return_exceptions=True,
**kwargs,
)
# Register the results of the inputs that have succeeded.
first_exception = None
for i, r in enumerate(result):
if isinstance(r, Exception):
if not first_exception:
first_exception = r
continue
results_map[i] = r
# If any exception occurred, raise it, to retry the failed ones
if first_exception:
raise first_exception
if (
attempt.retry_state.outcome
and not attempt.retry_state.outcome.failed
):
attempt.retry_state.set_result(result)
except RetryError as e:
try:
result
except UnboundLocalError:
result = cast(List[Output], [e] * len(inputs))
outputs: List[Union[Output, Exception]] = []
for idx, _ in enumerate(inputs):
if idx in results_map:
outputs.append(results_map[idx])
else:
outputs.append(result.pop(0))
return outputs
def batch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> List[Output]:
return self._batch_with_config(
self._batch, inputs, config, return_exceptions=return_exceptions, **kwargs
)
async def _abatch(
self,
inputs: List[Input],
run_manager: List["AsyncCallbackManagerForChainRun"],
config: List[RunnableConfig],
**kwargs: Any,
) -> List[Union[Output, Exception]]:
results_map: Dict[int, Output] = {}
def pending(iterable: List[U]) -> List[U]:
return [item for idx, item in enumerate(iterable) if idx not in results_map]
try:
async for attempt in self._async_retrying():
with attempt:
# Get the results of the inputs that have not succeeded yet.
result = await super().abatch(
pending(inputs),
self._patch_config_list(
pending(config), pending(run_manager), attempt.retry_state
),
return_exceptions=True,
**kwargs,
)
# Register the results of the inputs that have succeeded.
first_exception = None
for i, r in enumerate(result):
if isinstance(r, Exception):
if not first_exception:
first_exception = r
continue
results_map[i] = r
# If any exception occurred, raise it, to retry the failed ones
if first_exception:
raise first_exception
if (
attempt.retry_state.outcome
and not attempt.retry_state.outcome.failed
):
attempt.retry_state.set_result(result)
except RetryError as e:
try:
result
except UnboundLocalError:
result = cast(List[Output], [e] * len(inputs))
outputs: List[Union[Output, Exception]] = []
for idx, _ in enumerate(inputs):
if idx in results_map:
outputs.append(results_map[idx])
else:
outputs.append(result.pop(0))
return outputs
async def abatch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> List[Output]:
return await self._abatch_with_config(
self._abatch, inputs, config, return_exceptions=return_exceptions, **kwargs
)
# stream() and transform() are not retried because retrying a stream
# is not very intuitive.
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~document_loaders~parsers~language~cobol.py | import re
from typing import Callable, List
from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter
class CobolSegmenter(CodeSegmenter):
"""Code segmenter for `COBOL`."""
PARAGRAPH_PATTERN = re.compile(r"^[A-Z0-9\-]+(\s+.*)?\.$", re.IGNORECASE)
DIVISION_PATTERN = re.compile(
r"^\s*(IDENTIFICATION|DATA|PROCEDURE|ENVIRONMENT)\s+DIVISION.*$", re.IGNORECASE
)
SECTION_PATTERN = re.compile(r"^\s*[A-Z0-9\-]+\s+SECTION.$", re.IGNORECASE)
def __init__(self, code: str):
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
def is_valid(self) -> bool:
# Identify presence of any division to validate COBOL code
return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines)
def _extract_code(self, start_idx: int, end_idx: int) -> str:
return "\n".join(self.source_lines[start_idx:end_idx]).rstrip("\n")
def _is_relevant_code(self, line: str) -> bool:
"""Check if a line is part of the procedure division or a relevant section."""
if "PROCEDURE DIVISION" in line.upper():
return True
# Add additional conditions for relevant sections if needed
return False
def _process_lines(self, func: Callable) -> List[str]:
"""A generic function to process COBOL lines based on provided func."""
elements: List[str] = []
start_idx = None
inside_relevant_section = False
for i, line in enumerate(self.source_lines):
if self._is_relevant_code(line):
inside_relevant_section = True
if inside_relevant_section and (
self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
):
if start_idx is not None:
func(elements, start_idx, i)
start_idx = i
# Handle the last element if exists
if start_idx is not None:
func(elements, start_idx, len(self.source_lines))
return elements
def extract_functions_classes(self) -> List[str]:
def extract_func(elements: List[str], start_idx: int, end_idx: int) -> None:
elements.append(self._extract_code(start_idx, end_idx))
return self._process_lines(extract_func)
def simplify_code(self) -> str:
simplified_lines: List[str] = []
inside_relevant_section = False
omitted_code_added = (
False # To track if "* OMITTED CODE *" has been added after the last header
)
for line in self.source_lines:
is_header = (
"PROCEDURE DIVISION" in line
or "DATA DIVISION" in line
or "IDENTIFICATION DIVISION" in line
or self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
)
if is_header:
inside_relevant_section = True
# Reset the flag since we're entering a new section/division or
# paragraph
omitted_code_added = False
if inside_relevant_section:
if is_header:
# Add header and reset the omitted code added flag
simplified_lines.append(line)
elif not omitted_code_added:
# Add omitted code comment only if it hasn't been added directly
# after the last header
simplified_lines.append("* OMITTED CODE *")
omitted_code_added = True
return "\n".join(simplified_lines)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~vectorstores~matching_engine.py | from __future__ import annotations
import json
import logging
import time
import uuid
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type
from langchain.schema.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utilities.vertexai import get_client_info
if TYPE_CHECKING:
from google.cloud import storage
from google.cloud.aiplatform import MatchingEngineIndex, MatchingEngineIndexEndpoint
from google.cloud.aiplatform.matching_engine.matching_engine_index_endpoint import (
Namespace,
)
from google.oauth2.service_account import Credentials
from langchain.embeddings import TensorflowHubEmbeddings
logger = logging.getLogger()
class MatchingEngine(VectorStore):
"""`Google Vertex AI Matching Engine` vector store.
While the embeddings are stored in the Matching Engine, the embedded
documents will be stored in GCS.
An existing Index and corresponding Endpoint are preconditions for
using this module.
See usage in docs/modules/indexes/vectorstores/examples/matchingengine.ipynb
Note that this implementation is mostly meant for reading if you are
planning to do a real time implementation. While reading is a real time
operation, updating the index takes close to one hour."""
def __init__(
self,
project_id: str,
index: MatchingEngineIndex,
endpoint: MatchingEngineIndexEndpoint,
embedding: Embeddings,
gcs_client: storage.Client,
gcs_bucket_name: str,
credentials: Optional[Credentials] = None,
):
"""Vertex Matching Engine implementation of the vector store.
While the embeddings are stored in the Matching Engine, the embedded
documents will be stored in GCS.
An existing Index and corresponding Endpoint are preconditions for
using this module.
See usage in
docs/modules/indexes/vectorstores/examples/matchingengine.ipynb.
Note that this implementation is mostly meant for reading if you are
planning to do a real time implementation. While reading is a real time
operation, updating the index takes close to one hour.
Attributes:
project_id: The GCS project id.
index: The created index class. See
~:func:`MatchingEngine.from_components`.
endpoint: The created endpoint class. See
~:func:`MatchingEngine.from_components`.
embedding: A :class:`Embeddings` that will be used for
embedding the text sent. If none is sent, then the
multilingual Tensorflow Universal Sentence Encoder will be used.
gcs_client: The GCS client.
gcs_bucket_name: The GCS bucket name.
credentials (Optional): Created GCP credentials.
"""
super().__init__()
self._validate_google_libraries_installation()
self.project_id = project_id
self.index = index
self.endpoint = endpoint
self.embedding = embedding
self.gcs_client = gcs_client
self.credentials = credentials
self.gcs_bucket_name = gcs_bucket_name
@property
def embeddings(self) -> Embeddings:
return self.embedding
def _validate_google_libraries_installation(self) -> None:
"""Validates that Google libraries that are needed are installed."""
try:
from google.cloud import aiplatform, storage # noqa: F401
from google.oauth2 import service_account # noqa: F401
except ImportError:
raise ImportError(
"You must run `pip install --upgrade "
"google-cloud-aiplatform google-cloud-storage`"
"to use the MatchingEngine Vectorstore."
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters.
Returns:
List of ids from adding the texts into the vectorstore.
"""
texts = list(texts)
if metadatas is not None and len(texts) != len(metadatas):
raise ValueError(
"texts and metadatas do not have the same length. Received "
f"{len(texts)} texts and {len(metadatas)} metadatas."
)
logger.debug("Embedding documents.")
embeddings = self.embedding.embed_documents(texts)
jsons = []
ids = []
# Could be improved with async.
for idx, (embedding, text) in enumerate(zip(embeddings, texts)):
id = str(uuid.uuid4())
ids.append(id)
json_: dict = {"id": id, "embedding": embedding}
if metadatas is not None:
json_["metadata"] = metadatas[idx]
jsons.append(json_)
self._upload_to_gcs(text, f"documents/{id}")
logger.debug(f"Uploaded {len(ids)} documents to GCS.")
# Creating json lines from the embedded documents.
result_str = "\n".join([json.dumps(x) for x in jsons])
filename_prefix = f"indexes/{uuid.uuid4()}"
filename = f"{filename_prefix}/{time.time()}.json"
self._upload_to_gcs(result_str, filename)
logger.debug(
f"Uploaded updated json with embeddings to "
f"{self.gcs_bucket_name}/{filename}."
)
self.index = self.index.update_embeddings(
contents_delta_uri=f"gs://{self.gcs_bucket_name}/{filename_prefix}/"
)
logger.debug("Updated index with new configuration.")
return ids
def _upload_to_gcs(self, data: str, gcs_location: str) -> None:
"""Uploads data to gcs_location.
Args:
data: The data that will be stored.
gcs_location: The location where the data will be stored.
"""
bucket = self.gcs_client.get_bucket(self.gcs_bucket_name)
blob = bucket.blob(gcs_location)
blob.upload_from_string(data)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[List[Namespace]] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query and their cosine distance from the query.
Args:
query: String query look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Optional. A list of Namespaces for filtering
the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
logger.debug(f"Embedding query {query}.")
embedding_query = self.embedding.embed_query(query)
return self.similarity_search_by_vector_with_score(
embedding_query, k=k, filter=filter
)
def similarity_search_by_vector_with_score(
self,
embedding: List[float],
k: int = 4,
filter: Optional[List[Namespace]] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to the embedding and their cosine distance.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Optional. A list of Namespaces for filtering
the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
filter = filter or []
# If the endpoint is public we use the find_neighbors function.
if self.endpoint._public_match_client:
response = self.endpoint.find_neighbors(
deployed_index_id=self._get_index_id(),
queries=[embedding],
num_neighbors=k,
filter=filter,
)
else:
response = self.endpoint.match(
deployed_index_id=self._get_index_id(),
queries=[embedding],
num_neighbors=k,
filter=filter,
)
logger.debug(f"Found {len(response)} matches.")
if len(response) == 0:
return []
results = []
# I'm only getting the first one because queries receives an array
# and the similarity_search method only receives one query. This
# means that the match method will always return an array with only
# one element.
for doc in response[0]:
page_content = self._download_from_gcs(f"documents/{doc.id}")
results.append((Document(page_content=page_content), doc.distance))
logger.debug("Downloaded documents for query.")
return results
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[List[Namespace]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: The string that will be used to search for similar documents.
k: The amount of neighbors that will be retrieved.
filter: Optional. A list of Namespaces for filtering the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
A list of k matching documents.
"""
docs_and_scores = self.similarity_search_with_score(
query, k=k, filter=filter, **kwargs
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[List[Namespace]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to the embedding.
Args:
embedding: Embedding to look up documents similar to.
k: The amount of neighbors that will be retrieved.
filter: Optional. A list of Namespaces for filtering the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
A list of k matching documents.
"""
docs_and_scores = self.similarity_search_by_vector_with_score(
embedding, k=k, filter=filter, **kwargs
)
return [doc for doc, _ in docs_and_scores]
def _get_index_id(self) -> str:
"""Gets the correct index id for the endpoint.
Returns:
The index id if found (which should be found) or throws
ValueError otherwise.
"""
for index in self.endpoint.deployed_indexes:
if index.index == self.index.resource_name:
return index.id
raise ValueError(
f"No index with id {self.index.resource_name} "
f"deployed on endpoint "
f"{self.endpoint.display_name}."
)
def _download_from_gcs(self, gcs_location: str) -> str:
"""Downloads from GCS in text format.
Args:
gcs_location: The location where the file is located.
Returns:
The string contents of the file.
"""
bucket = self.gcs_client.get_bucket(self.gcs_bucket_name)
blob = bucket.blob(gcs_location)
return blob.download_as_string()
@classmethod
def from_texts(
cls: Type["MatchingEngine"],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> "MatchingEngine":
"""Use from components instead."""
raise NotImplementedError(
"This method is not implemented. Instead, you should initialize the class"
" with `MatchingEngine.from_components(...)` and then call "
"`add_texts`"
)
@classmethod
def from_components(
cls: Type["MatchingEngine"],
project_id: str,
region: str,
gcs_bucket_name: str,
index_id: str,
endpoint_id: str,
credentials_path: Optional[str] = None,
embedding: Optional[Embeddings] = None,
) -> "MatchingEngine":
"""Takes the object creation out of the constructor.
Args:
project_id: The GCP project id.
region: The default location making the API calls. It must have
the same location as the GCS bucket and must be regional.
gcs_bucket_name: The location where the vectors will be stored in
order for the index to be created.
index_id: The id of the created index.
endpoint_id: The id of the created endpoint.
credentials_path: (Optional) The path of the Google credentials on
the local file system.
embedding: The :class:`Embeddings` that will be used for
embedding the texts.
Returns:
A configured MatchingEngine with the texts added to the index.
"""
gcs_bucket_name = cls._validate_gcs_bucket(gcs_bucket_name)
credentials = cls._create_credentials_from_file(credentials_path)
index = cls._create_index_by_id(index_id, project_id, region, credentials)
endpoint = cls._create_endpoint_by_id(
endpoint_id, project_id, region, credentials
)
gcs_client = cls._get_gcs_client(credentials, project_id)
cls._init_aiplatform(project_id, region, gcs_bucket_name, credentials)
return cls(
project_id=project_id,
index=index,
endpoint=endpoint,
embedding=embedding or cls._get_default_embeddings(),
gcs_client=gcs_client,
credentials=credentials,
gcs_bucket_name=gcs_bucket_name,
)
@classmethod
def _validate_gcs_bucket(cls, gcs_bucket_name: str) -> str:
"""Validates the gcs_bucket_name as a bucket name.
Args:
gcs_bucket_name: The received bucket uri.
Returns:
A valid gcs_bucket_name or throws ValueError if full path is
provided.
"""
gcs_bucket_name = gcs_bucket_name.replace("gs://", "")
if "/" in gcs_bucket_name:
raise ValueError(
f"The argument gcs_bucket_name should only be "
f"the bucket name. Received {gcs_bucket_name}"
)
return gcs_bucket_name
@classmethod
def _create_credentials_from_file(
cls, json_credentials_path: Optional[str]
) -> Optional[Credentials]:
"""Creates credentials for GCP.
Args:
json_credentials_path: The path on the file system where the
credentials are stored.
Returns:
An optional of Credentials or None, in which case the default
will be used.
"""
from google.oauth2 import service_account
credentials = None
if json_credentials_path is not None:
credentials = service_account.Credentials.from_service_account_file(
json_credentials_path
)
return credentials
@classmethod
def _create_index_by_id(
cls, index_id: str, project_id: str, region: str, credentials: "Credentials"
) -> MatchingEngineIndex:
"""Creates a MatchingEngineIndex object by id.
Args:
index_id: The created index id.
project_id: The project to retrieve index from.
region: Location to retrieve index from.
credentials: GCS credentials.
Returns:
A configured MatchingEngineIndex.
"""
from google.cloud import aiplatform
logger.debug(f"Creating matching engine index with id {index_id}.")
return aiplatform.MatchingEngineIndex(
index_name=index_id,
project=project_id,
location=region,
credentials=credentials,
)
@classmethod
def _create_endpoint_by_id(
cls, endpoint_id: str, project_id: str, region: str, credentials: "Credentials"
) -> MatchingEngineIndexEndpoint:
"""Creates a MatchingEngineIndexEndpoint object by id.
Args:
endpoint_id: The created endpoint id.
project_id: The project to retrieve index from.
region: Location to retrieve index from.
credentials: GCS credentials.
Returns:
A configured MatchingEngineIndexEndpoint.
"""
from google.cloud import aiplatform
logger.debug(f"Creating endpoint with id {endpoint_id}.")
return aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=endpoint_id,
project=project_id,
location=region,
credentials=credentials,
)
@classmethod
def _get_gcs_client(
cls, credentials: "Credentials", project_id: str
) -> "storage.Client":
"""Lazily creates a GCS client.
Returns:
A configured GCS client.
"""
from google.cloud import storage
return storage.Client(
credentials=credentials,
project=project_id,
client_info=get_client_info(module="vertex-ai-matching-engine"),
)
@classmethod
def _init_aiplatform(
cls,
project_id: str,
region: str,
gcs_bucket_name: str,
credentials: "Credentials",
) -> None:
"""Configures the aiplatform library.
Args:
project_id: The GCP project id.
region: The default location making the API calls. It must have
the same location as the GCS bucket and must be regional.
gcs_bucket_name: GCS staging location.
credentials: The GCS Credentials object.
"""
from google.cloud import aiplatform
logger.debug(
f"Initializing AI Platform for project {project_id} on "
f"{region} and for {gcs_bucket_name}."
)
aiplatform.init(
project=project_id,
location=region,
staging_bucket=gcs_bucket_name,
credentials=credentials,
)
@classmethod
def _get_default_embeddings(cls) -> "TensorflowHubEmbeddings":
"""This function returns the default embedding.
Returns:
Default TensorflowHubEmbeddings to use.
"""
from langchain.embeddings import TensorflowHubEmbeddings
return TensorflowHubEmbeddings()
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~memory~chat_message_histories~dynamodb.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Dict, List, Optional
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import (
BaseMessage,
_message_to_dict,
messages_from_dict,
messages_to_dict,
)
if TYPE_CHECKING:
from boto3.session import Session
logger = logging.getLogger(__name__)
class DynamoDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in AWS DynamoDB.
This class expects that a DynamoDB table exists with name `table_name`
Args:
table_name: name of the DynamoDB table
session_id: arbitrary key that is used to store the messages
of a single chat session.
endpoint_url: URL of the AWS endpoint to connect to. This argument
is optional and useful for test purposes, like using Localstack.
If you plan to use AWS cloud service, you normally don't have to
worry about setting the endpoint_url.
primary_key_name: name of the primary key of the DynamoDB table. This argument
is optional, defaulting to "SessionId".
key: an optional dictionary with a custom primary and secondary key.
This argument is optional, but useful when using composite dynamodb keys, or
isolating records based off of application details such as a user id.
This may also contain global and local secondary index keys.
kms_key_id: an optional AWS KMS Key ID, AWS KMS Key ARN, or AWS KMS Alias for
client-side encryption
"""
def __init__(
self,
table_name: str,
session_id: str,
endpoint_url: Optional[str] = None,
primary_key_name: str = "SessionId",
key: Optional[Dict[str, str]] = None,
boto3_session: Optional[Session] = None,
kms_key_id: Optional[str] = None,
):
if boto3_session:
client = boto3_session.resource("dynamodb", endpoint_url=endpoint_url)
else:
try:
import boto3
except ImportError as e:
raise ImportError(
"Unable to import boto3, please install with `pip install boto3`."
) from e
if endpoint_url:
client = boto3.resource("dynamodb", endpoint_url=endpoint_url)
else:
client = boto3.resource("dynamodb")
self.table = client.Table(table_name)
self.session_id = session_id
self.key: Dict = key or {primary_key_name: session_id}
if kms_key_id:
try:
from dynamodb_encryption_sdk.encrypted.table import EncryptedTable
from dynamodb_encryption_sdk.identifiers import CryptoAction
from dynamodb_encryption_sdk.material_providers.aws_kms import (
AwsKmsCryptographicMaterialsProvider,
)
from dynamodb_encryption_sdk.structures import AttributeActions
except ImportError as e:
raise ImportError(
"Unable to import dynamodb_encryption_sdk, please install with "
"`pip install dynamodb-encryption-sdk`."
) from e
actions = AttributeActions(
default_action=CryptoAction.DO_NOTHING,
attribute_actions={"History": CryptoAction.ENCRYPT_AND_SIGN},
)
aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=kms_key_id)
self.table = EncryptedTable(
table=self.table,
materials_provider=aws_kms_cmp,
attribute_actions=actions,
auto_refresh_table_indexes=False,
)
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from DynamoDB"""
try:
from botocore.exceptions import ClientError
except ImportError as e:
raise ImportError(
"Unable to import botocore, please install with `pip install botocore`."
) from e
response = None
try:
response = self.table.get_item(Key=self.key)
except ClientError as error:
if error.response["Error"]["Code"] == "ResourceNotFoundException":
logger.warning("No record found with session id: %s", self.session_id)
else:
logger.error(error)
if response and "Item" in response:
items = response["Item"]["History"]
else:
items = []
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in DynamoDB"""
try:
from botocore.exceptions import ClientError
except ImportError as e:
raise ImportError(
"Unable to import botocore, please install with `pip install botocore`."
) from e
messages = messages_to_dict(self.messages)
_message = _message_to_dict(message)
messages.append(_message)
try:
self.table.put_item(Item={**self.key, "History": messages})
except ClientError as err:
logger.error(err)
def clear(self) -> None:
"""Clear session memory from DynamoDB"""
try:
from botocore.exceptions import ClientError
except ImportError as e:
raise ImportError(
"Unable to import botocore, please install with `pip install botocore`."
) from e
try:
self.table.delete_item(Key=self.key)
except ClientError as err:
logger.error(err)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~chat_models~yandex.py | """Wrapper around YandexGPT chat models."""
import logging
from typing import Any, Dict, List, Optional, Tuple, cast
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.utils import enforce_stop_tokens
from langchain.llms.yandex import _BaseYandexGPT
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatResult,
HumanMessage,
SystemMessage,
)
logger = logging.getLogger(__name__)
def _parse_message(role: str, text: str) -> Dict:
return {"role": role, "text": text}
def _parse_chat_history(history: List[BaseMessage]) -> Tuple[List[Dict[str, str]], str]:
"""Parse a sequence of messages into history.
Returns:
A tuple of a list of parsed messages and an instruction message for the model.
"""
chat_history = []
instruction = ""
for message in history:
content = cast(str, message.content)
if isinstance(message, HumanMessage):
chat_history.append(_parse_message("user", content))
if isinstance(message, AIMessage):
chat_history.append(_parse_message("assistant", content))
if isinstance(message, SystemMessage):
instruction = content
return chat_history, instruction
class ChatYandexGPT(_BaseYandexGPT, BaseChatModel):
"""Wrapper around YandexGPT large language models.
There are two authentication options for the service account
with the ``ai.languageModels.user`` role:
- You can specify the token in a constructor parameter `iam_token`
or in an environment variable `YC_IAM_TOKEN`.
- You can specify the key in a constructor parameter `api_key`
or in an environment variable `YC_API_KEY`.
Example:
.. code-block:: python
from langchain.chat_models import ChatYandexGPT
chat_model = ChatYandexGPT(iam_token="t1.9eu...")
"""
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
try:
import grpc
from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value
from yandex.cloud.ai.llm.v1alpha.llm_pb2 import GenerationOptions, Message
from yandex.cloud.ai.llm.v1alpha.llm_service_pb2 import ChatRequest
from yandex.cloud.ai.llm.v1alpha.llm_service_pb2_grpc import (
TextGenerationServiceStub,
)
except ImportError as e:
raise ImportError(
"Please install YandexCloud SDK" " with `pip install yandexcloud`."
) from e
if not messages:
raise ValueError(
"You should provide at least one message to start the chat!"
)
message_history, instruction = _parse_chat_history(messages)
channel_credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(self.url, channel_credentials)
request = ChatRequest(
model=self.model_name,
generation_options=GenerationOptions(
temperature=DoubleValue(value=self.temperature),
max_tokens=Int64Value(value=self.max_tokens),
),
instruction_text=instruction,
messages=[Message(**message) for message in message_history],
)
stub = TextGenerationServiceStub(channel)
if self.iam_token:
metadata = (("authorization", f"Bearer {self.iam_token}"),)
else:
metadata = (("authorization", f"Api-Key {self.api_key}"),)
res = stub.Chat(request, metadata=metadata)
text = list(res)[0].message.text
text = text if stop is None else enforce_stop_tokens(text, stop)
message = AIMessage(content=text)
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
raise NotImplementedError(
"""YandexGPT doesn't support async requests at the moment."""
)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~llms~symblai_nebula.py | import json
import logging
from typing import Any, Callable, Dict, List, Mapping, Optional
import requests
from requests import ConnectTimeout, ReadTimeout, RequestException
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, SecretStr, root_validator
from langchain.utils import convert_to_secret_str
from langchain.utils.env import get_from_dict_or_env
DEFAULT_NEBULA_SERVICE_URL = "https://api-nebula.symbl.ai"
DEFAULT_NEBULA_SERVICE_PATH = "/v1/model/generate"
logger = logging.getLogger(__name__)
class Nebula(LLM):
"""Nebula Service models.
To use, you should have the environment variable ``NEBULA_SERVICE_URL``,
``NEBULA_SERVICE_PATH`` and ``NEBULA_API_KEY`` set with your Nebula
Service, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Nebula
nebula = Nebula(
nebula_service_url="NEBULA_SERVICE_URL",
nebula_service_path="NEBULA_SERVICE_PATH",
nebula_api_key="NEBULA_API_KEY",
)
""" # noqa: E501
"""Key/value arguments to pass to the model. Reserved for future use"""
model_kwargs: Optional[dict] = None
"""Optional"""
nebula_service_url: Optional[str] = None
nebula_service_path: Optional[str] = None
nebula_api_key: Optional[SecretStr] = None
model: Optional[str] = None
max_new_tokens: Optional[int] = 128
temperature: Optional[float] = 0.6
top_p: Optional[float] = 0.95
repetition_penalty: Optional[float] = 1.0
top_k: Optional[int] = 0
penalty_alpha: Optional[float] = 0.0
stop_sequences: Optional[List[str]] = None
max_retries: Optional[int] = 10
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
nebula_service_url = get_from_dict_or_env(
values,
"nebula_service_url",
"NEBULA_SERVICE_URL",
DEFAULT_NEBULA_SERVICE_URL,
)
nebula_service_path = get_from_dict_or_env(
values,
"nebula_service_path",
"NEBULA_SERVICE_PATH",
DEFAULT_NEBULA_SERVICE_PATH,
)
nebula_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "nebula_api_key", "NEBULA_API_KEY", None)
)
if nebula_service_url.endswith("/"):
nebula_service_url = nebula_service_url[:-1]
if not nebula_service_path.startswith("/"):
nebula_service_path = "/" + nebula_service_path
values["nebula_service_url"] = nebula_service_url
values["nebula_service_path"] = nebula_service_path
values["nebula_api_key"] = nebula_api_key
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"repetition_penalty": self.repetition_penalty,
"penalty_alpha": self.penalty_alpha,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
"nebula_service_url": self.nebula_service_url,
"nebula_service_path": self.nebula_service_path,
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "nebula"
def _invocation_params(
self, stop_sequences: Optional[List[str]], **kwargs: Any
) -> dict:
params = self._default_params
if self.stop_sequences is not None and stop_sequences is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop_sequences
return {**params, **kwargs}
@staticmethod
def _process_response(response: Any, stop: Optional[List[str]]) -> str:
text = response["output"]["text"]
if stop:
text = enforce_stop_tokens(text, stop)
return text
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Nebula Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nebula("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
prompt = prompt.strip()
if "\n" in prompt:
instruction = prompt.split("\n")[0]
conversation = "\n".join(prompt.split("\n")[1:])
else:
raise ValueError("Prompt must contain instruction and conversation.")
response = completion_with_retry(
self,
instruction=instruction,
conversation=conversation,
params=params,
url=f"{self.nebula_service_url}{self.nebula_service_path}",
)
_stop = params.get("stop_sequences")
return self._process_response(response, _stop)
def make_request(
self: Nebula,
instruction: str,
conversation: str,
url: str = f"{DEFAULT_NEBULA_SERVICE_URL}{DEFAULT_NEBULA_SERVICE_PATH}",
params: Optional[Dict] = None,
) -> Any:
"""Generate text from the model."""
params = params or {}
api_key = None
if self.nebula_api_key is not None:
api_key = self.nebula_api_key.get_secret_value()
headers = {
"Content-Type": "application/json",
"ApiKey": f"{api_key}",
}
body = {
"prompt": {
"instruction": instruction,
"conversation": {"text": f"{conversation}"},
}
}
# add params to body
for key, value in params.items():
body[key] = value
# make request
response = requests.post(url, headers=headers, json=body)
if response.status_code != 200:
raise Exception(
f"Request failed with status code {response.status_code}"
f" and message {response.text}"
)
return json.loads(response.text)
def _create_retry_decorator(llm: Nebula) -> Callable[[Any], Any]:
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterward
max_retries = llm.max_retries if llm.max_retries is not None else 3
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type((RequestException, ConnectTimeout, ReadTimeout))
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(llm: Nebula, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) -> Any:
return make_request(llm, **_kwargs)
return _completion_with_retry(**kwargs)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~embeddings~fastembed.py | from typing import Any, Dict, List, Literal, Optional
import numpy as np
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.embeddings import Embeddings
class FastEmbedEmbeddings(BaseModel, Embeddings):
"""Qdrant FastEmbedding models.
FastEmbed is a lightweight, fast, Python library built for embedding generation.
See more documentation at:
* https://github.com/qdrant/fastembed/
* https://qdrant.github.io/fastembed/
To use this class, you must install the `fastembed` Python package.
`pip install fastembed`
Example:
from langchain.embeddings import FastEmbedEmbeddings
fastembed = FastEmbedEmbeddings()
"""
model_name: str = "BAAI/bge-small-en-v1.5"
"""Name of the FastEmbedding model to use
Defaults to "BAAI/bge-small-en-v1.5"
Find the list of supported models at
https://qdrant.github.io/fastembed/examples/Supported_Models/
"""
max_length: int = 512
"""The maximum number of tokens. Defaults to 512.
Unknown behavior for values > 512.
"""
cache_dir: Optional[str]
"""The path to the cache directory.
Defaults to `local_cache` in the parent directory
"""
threads: Optional[int]
"""The number of threads single onnxruntime session can use.
Defaults to None
"""
doc_embed_type: Literal["default", "passage"] = "default"
"""Type of embedding to use for documents
"default": Uses FastEmbed's default embedding method
"passage": Prefixes the text with "passage" before embedding.
"""
_model: Any # : :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that FastEmbed has been installed."""
try:
from fastembed.embedding import FlagEmbedding
model_name = values.get("model_name")
max_length = values.get("max_length")
cache_dir = values.get("cache_dir")
threads = values.get("threads")
values["_model"] = FlagEmbedding(
model_name=model_name,
max_length=max_length,
cache_dir=cache_dir,
threads=threads,
)
except ImportError as ie:
raise ImportError(
"Could not import 'fastembed' Python package. "
"Please install it with `pip install fastembed`."
) from ie
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings for documents using FastEmbed.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings: List[np.ndarray]
if self.doc_embed_type == "passage":
embeddings = self._model.passage_embed(texts)
else:
embeddings = self._model.embed(texts)
return [e.tolist() for e in embeddings]
def embed_query(self, text: str) -> List[float]:
"""Generate query embeddings using FastEmbed.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
query_embeddings: np.ndarray = next(self._model.query_embed(text))
return query_embeddings.tolist()
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~vectorstores~baiducloud_vector_search.py | import logging
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Union,
)
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
logger = logging.getLogger(__name__)
class BESVectorStore(VectorStore):
"""`Baidu Elasticsearch` vector store.
Example:
.. code-block:: python
from langchain.vectorstores import BESVectorStore
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = BESVectorStore(
embedding=OpenAIEmbeddings(),
index_name="langchain-demo",
bes_url="http://localhost:9200"
)
Args:
index_name: Name of the Elasticsearch index to create.
bes_url: URL of the Baidu Elasticsearch instance to connect to.
user: Username to use when connecting to Elasticsearch.
password: Password to use when connecting to Elasticsearch.
More information can be obtained from:
https://cloud.baidu.com/doc/BES/s/8llyn0hh4
"""
def __init__(
self,
index_name: str,
bes_url: str,
user: Optional[str] = None,
password: Optional[str] = None,
embedding: Optional[Embeddings] = None,
**kwargs: Optional[dict],
) -> None:
self.embedding = embedding
self.index_name = index_name
self.query_field = kwargs.get("query_field", "text")
self.vector_query_field = kwargs.get("vector_query_field", "vector")
self.space_type = kwargs.get("space_type", "cosine")
self.index_type = kwargs.get("index_type", "linear")
self.index_params = kwargs.get("index_params") or {}
if bes_url is not None:
self.client = BESVectorStore.bes_client(
bes_url=bes_url, username=user, password=password
)
else:
raise ValueError("""Please specified a bes connection url.""")
@property
def embeddings(self) -> Optional[Embeddings]:
return self.embedding
@staticmethod
def bes_client(
*,
bes_url: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> "Elasticsearch":
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
connection_params: Dict[str, Any] = {}
connection_params["hosts"] = [bes_url]
if username and password:
connection_params["basic_auth"] = (username, password)
es_client = elasticsearch.Elasticsearch(**connection_params)
try:
es_client.info()
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise e
return es_client
def _create_index_if_not_exists(self, dims_length: Optional[int] = None) -> None:
"""Create the index if it doesn't already exist.
Args:
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=self.index_name):
logger.info(f"Index {self.index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
+ "when the index doesn't already exist. "
)
indexMapping = self._index_mapping(dims_length=dims_length)
logger.debug(
f"Creating index {self.index_name} with mappings {indexMapping}"
)
self.client.indices.create(
index=self.index_name,
body={
"settings": {"index": {"knn": True}},
"mappings": {"properties": indexMapping},
},
)
def _index_mapping(self, dims_length: Union[int, None]) -> Dict:
"""
Executes when the index is created.
Args:
dims_length: Numeric length of the embedding vectors,
or None if not using vector-based query.
index_params: The extra pamameters for creating index.
Returns:
Dict: The Elasticsearch settings and mappings for the strategy.
"""
if "linear" == self.index_type:
return {
self.vector_query_field: {
"type": "bpack_vector",
"dims": dims_length,
"build_index": self.index_params.get("build_index", False),
}
}
elif "hnsw" == self.index_type:
return {
self.vector_query_field: {
"type": "bpack_vector",
"dims": dims_length,
"index_type": "hnsw",
"space_type": self.space_type,
"parameters": {
"ef_construction": self.index_params.get(
"hnsw_ef_construction", 200
),
"m": self.index_params.get("hnsw_m", 4),
},
}
}
else:
return {
self.vector_query_field: {
"type": "bpack_vector",
"model_id": self.index_params.get("model_id", ""),
}
}
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Optional[bool]:
"""Delete documents from the index.
Args:
ids: List of ids of documents to delete
"""
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
body = []
if ids is None:
raise ValueError("ids must be provided.")
for _id in ids:
body.append({"_op_type": "delete", "_index": self.index_name, "_id": _id})
if len(body) > 0:
try:
bulk(
self.client,
body,
refresh=kwargs.get("refresh_indices", True),
ignore_status=404,
)
logger.debug(f"Deleted {len(body)} texts from index")
return True
except BulkIndexError as e:
logger.error(f"Error deleting texts: {e}")
raise e
else:
logger.info("No documents to delete")
return False
def _query_body(
self,
query_vector: Union[List[float], None],
filter: Optional[dict] = None,
search_params: Dict = {},
) -> Dict:
query_vector_body = {"vector": query_vector, "k": search_params.get("k", 2)}
if filter is not None and len(filter) != 0:
query_vector_body["filter"] = filter
if "linear" == self.index_type:
query_vector_body["linear"] = True
else:
query_vector_body["ef"] = search_params.get("ef", 10)
return {
"size": search_params.get("size", 4),
"query": {"knn": {self.vector_query_field: query_vector_body}},
}
def _search(
self,
query: Optional[str] = None,
query_vector: Union[List[float], None] = None,
filter: Optional[dict] = None,
custom_query: Optional[Callable[[Dict, Union[str, None]], Dict]] = None,
search_params: Dict = {},
) -> List[Tuple[Document, float]]:
"""Return searched documents result from BES
Args:
query: Text to look up documents similar to.
query_vector: Embedding to look up documents similar to.
filter: Array of Baidu ElasticSearch filter clauses to apply to the query.
custom_query: Function to modify the query body before it is sent to BES.
Returns:
List of Documents most similar to the query and score for each
"""
if self.embedding and query is not None:
query_vector = self.embedding.embed_query(query)
query_body = self._query_body(
query_vector=query_vector, filter=filter, search_params=search_params
)
if custom_query is not None:
query_body = custom_query(query_body, query)
logger.debug(f"Calling custom_query, Query body now: {query_body}")
logger.debug(f"Query body: {query_body}")
# Perform the kNN search on the BES index and return the results.
response = self.client.search(index=self.index_name, body=query_body)
logger.debug(f"response={response}")
hits = [hit for hit in response["hits"]["hits"]]
docs_and_scores = [
(
Document(
page_content=hit["_source"][self.query_field],
metadata=hit["_source"]["metadata"],
),
hit["_score"],
)
for hit in hits
]
return docs_and_scores
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query,
in descending order of similarity.
"""
results = self.similarity_search_with_score(
query=query, k=k, filter=filter, **kwargs
)
return [doc for doc, _ in results]
def similarity_search_with_score(
self, query: str, k: int, filter: Optional[dict] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Return documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
size: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query and score for each
"""
search_params = kwargs.get("search_params") or {}
if len(search_params) == 0 or search_params.get("size") is None:
search_params["size"] = k
return self._search(query=query, filter=filter, **kwargs)
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Optional[Embeddings] = None,
**kwargs: Any,
) -> "BESVectorStore":
"""Construct BESVectorStore wrapper from documents.
Args:
documents: List of documents to add to the Elasticsearch index.
embedding: Embedding function to use to embed the texts.
Do not provide if using a strategy
that doesn't require inference.
kwargs: create index key words arguments
"""
vectorStore = BESVectorStore._bes_vector_store(embedding=embedding, **kwargs)
# Encode the provided texts and add them to the newly created index.
vectorStore.add_documents(documents)
return vectorStore
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[Dict[str, Any]]] = None,
**kwargs: Any,
) -> "BESVectorStore":
"""Construct BESVectorStore wrapper from raw documents.
Args:
texts: List of texts to add to the Elasticsearch index.
embedding: Embedding function to use to embed the texts.
metadatas: Optional list of metadatas associated with the texts.
index_name: Name of the Elasticsearch index to create.
kwargs: create index key words arguments
"""
vectorStore = BESVectorStore._bes_vector_store(embedding=embedding, **kwargs)
# Encode the provided texts and add them to the newly created index.
vectorStore.add_texts(texts, metadatas=metadatas, **kwargs)
return vectorStore
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[Any, Any]]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
embeddings = []
create_index_if_not_exists = kwargs.get("create_index_if_not_exists", True)
ids = kwargs.get("ids", [str(uuid.uuid4()) for _ in texts])
refresh_indices = kwargs.get("refresh_indices", True)
requests = []
if self.embedding is not None:
embeddings = self.embedding.embed_documents(list(texts))
dims_length = len(embeddings[0])
if create_index_if_not_exists:
self._create_index_if_not_exists(dims_length=dims_length)
for i, (text, vector) in enumerate(zip(texts, embeddings)):
metadata = metadatas[i] if metadatas else {}
requests.append(
{
"_op_type": "index",
"_index": self.index_name,
self.query_field: text,
self.vector_query_field: vector,
"metadata": metadata,
"_id": ids[i],
}
)
else:
if create_index_if_not_exists:
self._create_index_if_not_exists()
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
requests.append(
{
"_op_type": "index",
"_index": self.index_name,
self.query_field: text,
"metadata": metadata,
"_id": ids[i],
}
)
if len(requests) > 0:
try:
success, failed = bulk(
self.client, requests, stats_only=True, refresh=refresh_indices
)
logger.debug(
f"Added {success} and failed to add {failed} texts to index"
)
logger.debug(f"added texts {ids} to index")
return ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise e
else:
logger.debug("No texts to add to index")
return []
@staticmethod
def _bes_vector_store(
embedding: Optional[Embeddings] = None, **kwargs: Any
) -> "BESVectorStore":
index_name = kwargs.get("index_name")
if index_name is None:
raise ValueError("Please provide an index_name.")
bes_url = kwargs.get("bes_url")
if bes_url is None:
raise ValueError("Please provided a valid bes connection url")
return BESVectorStore(embedding=embedding, **kwargs)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~document_loaders~docusaurus.py | """Load Documents from Docusarus Documentation"""
from typing import Any, List, Optional
from langchain.document_loaders.sitemap import SitemapLoader
class DocusaurusLoader(SitemapLoader):
"""
Loader that leverages the SitemapLoader to loop through the generated pages of a
Docusaurus Documentation website and extracts the content by looking for specific
HTML tags. By default, the parser searches for the main content of the Docusaurus
page, which is normally the <article>. You also have the option to define your own
custom HTML tags by providing them as a list, for example: ["div", ".main", "a"].
"""
def __init__(
self,
url: str,
custom_html_tags: Optional[List[str]] = None,
**kwargs: Any,
):
"""
Initialize DocusaurusLoader
Args:
url: The base URL of the Docusaurus website.
custom_html_tags: Optional custom html tags to extract content from pages.
kwargs: Additional args to extend the underlying SitemapLoader, for example:
filter_urls, blocksize, meta_function, is_local, continue_on_failure
"""
if not kwargs.get("is_local"):
url = f"{url}/sitemap.xml"
self.custom_html_tags = custom_html_tags or ["main article"]
super().__init__(
url,
parsing_function=kwargs.get("parsing_function") or self._parsing_function,
**kwargs,
)
def _parsing_function(self, content: Any) -> str:
"""Parses specific elements from a Docusarus page."""
relevant_elements = content.select(",".join(self.custom_html_tags))
for element in relevant_elements:
if element not in relevant_elements:
element.decompose()
return str(content.get_text())
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~vectorstores~opensearch_vector_search.py | from __future__ import annotations
import uuid
import warnings
from typing import Any, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain.schema import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.utils import maximal_marginal_relevance
IMPORT_OPENSEARCH_PY_ERROR = (
"Could not import OpenSearch. Please install it with `pip install opensearch-py`."
)
SCRIPT_SCORING_SEARCH = "script_scoring"
PAINLESS_SCRIPTING_SEARCH = "painless_scripting"
MATCH_ALL_QUERY = {"match_all": {}} # type: Dict
def _import_opensearch() -> Any:
"""Import OpenSearch if available, otherwise raise error."""
try:
from opensearchpy import OpenSearch
except ImportError:
raise ImportError(IMPORT_OPENSEARCH_PY_ERROR)
return OpenSearch
def _import_bulk() -> Any:
"""Import bulk if available, otherwise raise error."""
try:
from opensearchpy.helpers import bulk
except ImportError:
raise ImportError(IMPORT_OPENSEARCH_PY_ERROR)
return bulk
def _import_not_found_error() -> Any:
"""Import not found error if available, otherwise raise error."""
try:
from opensearchpy.exceptions import NotFoundError
except ImportError:
raise ImportError(IMPORT_OPENSEARCH_PY_ERROR)
return NotFoundError
def _get_opensearch_client(opensearch_url: str, **kwargs: Any) -> Any:
"""Get OpenSearch client from the opensearch_url, otherwise raise error."""
try:
opensearch = _import_opensearch()
client = opensearch(opensearch_url, **kwargs)
except ValueError as e:
raise ImportError(
f"OpenSearch client string provided is not in proper format. "
f"Got error: {e} "
)
return client
def _validate_embeddings_and_bulk_size(embeddings_length: int, bulk_size: int) -> None:
"""Validate Embeddings Length and Bulk Size."""
if embeddings_length == 0:
raise RuntimeError("Embeddings size is zero")
if bulk_size < embeddings_length:
raise RuntimeError(
f"The embeddings count, {embeddings_length} is more than the "
f"[bulk_size], {bulk_size}. Increase the value of [bulk_size]."
)
def _validate_aoss_with_engines(is_aoss: bool, engine: str) -> None:
"""Validate AOSS with the engine."""
if is_aoss and engine != "nmslib" and engine != "faiss":
raise ValueError(
"Amazon OpenSearch Service Serverless only "
"supports `nmslib` or `faiss` engines"
)
def _is_aoss_enabled(http_auth: Any) -> bool:
"""Check if the service is http_auth is set as `aoss`."""
if (
http_auth is not None
and hasattr(http_auth, "service")
and http_auth.service == "aoss"
):
return True
return False
def _bulk_ingest_embeddings(
client: Any,
index_name: str,
embeddings: List[List[float]],
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
vector_field: str = "vector_field",
text_field: str = "text",
mapping: Optional[Dict] = None,
max_chunk_bytes: Optional[int] = 1 * 1024 * 1024,
is_aoss: bool = False,
) -> List[str]:
"""Bulk Ingest Embeddings into given index."""
if not mapping:
mapping = dict()
bulk = _import_bulk()
not_found_error = _import_not_found_error()
requests = []
return_ids = []
mapping = mapping
try:
client.indices.get(index=index_name)
except not_found_error:
client.indices.create(index=index_name, body=mapping)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": index_name,
vector_field: embeddings[i],
text_field: text,
"metadata": metadata,
}
if is_aoss:
request["id"] = _id
else:
request["_id"] = _id
requests.append(request)
return_ids.append(_id)
bulk(client, requests, max_chunk_bytes=max_chunk_bytes)
if not is_aoss:
client.indices.refresh(index=index_name)
return return_ids
def _default_scripting_text_mapping(
dim: int,
vector_field: str = "vector_field",
) -> Dict:
"""For Painless Scripting or Script Scoring,the default mapping to create index."""
return {
"mappings": {
"properties": {
vector_field: {"type": "knn_vector", "dimension": dim},
}
}
}
def _default_text_mapping(
dim: int,
engine: str = "nmslib",
space_type: str = "l2",
ef_search: int = 512,
ef_construction: int = 512,
m: int = 16,
vector_field: str = "vector_field",
) -> Dict:
"""For Approximate k-NN Search, this is the default mapping to create index."""
return {
"settings": {"index": {"knn": True, "knn.algo_param.ef_search": ef_search}},
"mappings": {
"properties": {
vector_field: {
"type": "knn_vector",
"dimension": dim,
"method": {
"name": "hnsw",
"space_type": space_type,
"engine": engine,
"parameters": {"ef_construction": ef_construction, "m": m},
},
}
}
},
}
def _default_approximate_search_query(
query_vector: List[float],
k: int = 4,
vector_field: str = "vector_field",
) -> Dict:
"""For Approximate k-NN Search, this is the default query."""
return {
"size": k,
"query": {"knn": {vector_field: {"vector": query_vector, "k": k}}},
}
def _approximate_search_query_with_boolean_filter(
query_vector: List[float],
boolean_filter: Dict,
k: int = 4,
vector_field: str = "vector_field",
subquery_clause: str = "must",
) -> Dict:
"""For Approximate k-NN Search, with Boolean Filter."""
return {
"size": k,
"query": {
"bool": {
"filter": boolean_filter,
subquery_clause: [
{"knn": {vector_field: {"vector": query_vector, "k": k}}}
],
}
},
}
def _approximate_search_query_with_efficient_filter(
query_vector: List[float],
efficient_filter: Dict,
k: int = 4,
vector_field: str = "vector_field",
) -> Dict:
"""For Approximate k-NN Search, with Efficient Filter for Lucene and
Faiss Engines."""
search_query = _default_approximate_search_query(
query_vector, k=k, vector_field=vector_field
)
search_query["query"]["knn"][vector_field]["filter"] = efficient_filter
return search_query
def _default_script_query(
query_vector: List[float],
k: int = 4,
space_type: str = "l2",
pre_filter: Optional[Dict] = None,
vector_field: str = "vector_field",
) -> Dict:
"""For Script Scoring Search, this is the default query."""
if not pre_filter:
pre_filter = MATCH_ALL_QUERY
return {
"size": k,
"query": {
"script_score": {
"query": pre_filter,
"script": {
"source": "knn_score",
"lang": "knn",
"params": {
"field": vector_field,
"query_value": query_vector,
"space_type": space_type,
},
},
}
},
}
def __get_painless_scripting_source(
space_type: str, vector_field: str = "vector_field"
) -> str:
"""For Painless Scripting, it returns the script source based on space type."""
source_value = (
"(1.0 + " + space_type + "(params.query_value, doc['" + vector_field + "']))"
)
if space_type == "cosineSimilarity":
return source_value
else:
return "1/" + source_value
def _default_painless_scripting_query(
query_vector: List[float],
k: int = 4,
space_type: str = "l2Squared",
pre_filter: Optional[Dict] = None,
vector_field: str = "vector_field",
) -> Dict:
"""For Painless Scripting Search, this is the default query."""
if not pre_filter:
pre_filter = MATCH_ALL_QUERY
source = __get_painless_scripting_source(space_type, vector_field=vector_field)
return {
"size": k,
"query": {
"script_score": {
"query": pre_filter,
"script": {
"source": source,
"params": {
"field": vector_field,
"query_value": query_vector,
},
},
}
},
}
class OpenSearchVectorSearch(VectorStore):
"""`Amazon OpenSearch Vector Engine` vector store.
Example:
.. code-block:: python
from langchain.vectorstores import OpenSearchVectorSearch
opensearch_vector_search = OpenSearchVectorSearch(
"http://localhost:9200",
"embeddings",
embedding_function
)
"""
def __init__(
self,
opensearch_url: str,
index_name: str,
embedding_function: Embeddings,
**kwargs: Any,
):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
self.index_name = index_name
http_auth = kwargs.get("http_auth")
self.is_aoss = _is_aoss_enabled(http_auth=http_auth)
self.client = _get_opensearch_client(opensearch_url, **kwargs)
self.engine = kwargs.get("engine")
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def __add(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
bulk_size: int = 500,
**kwargs: Any,
) -> List[str]:
_validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
index_name = kwargs.get("index_name", self.index_name)
text_field = kwargs.get("text_field", "text")
dim = len(embeddings[0])
engine = kwargs.get("engine", "nmslib")
space_type = kwargs.get("space_type", "l2")
ef_search = kwargs.get("ef_search", 512)
ef_construction = kwargs.get("ef_construction", 512)
m = kwargs.get("m", 16)
vector_field = kwargs.get("vector_field", "vector_field")
max_chunk_bytes = kwargs.get("max_chunk_bytes", 1 * 1024 * 1024)
_validate_aoss_with_engines(self.is_aoss, engine)
mapping = _default_text_mapping(
dim, engine, space_type, ef_search, ef_construction, m, vector_field
)
return _bulk_ingest_embeddings(
self.client,
index_name,
embeddings,
texts,
metadatas=metadatas,
ids=ids,
vector_field=vector_field,
text_field=text_field,
mapping=mapping,
max_chunk_bytes=max_chunk_bytes,
is_aoss=self.is_aoss,
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
bulk_size: int = 500,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
bulk_size: Bulk API request count; Default: 500
Returns:
List of ids from adding the texts into the vectorstore.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
"""
embeddings = self.embedding_function.embed_documents(list(texts))
return self.__add(
texts,
embeddings,
metadatas=metadatas,
ids=ids,
bulk_size=bulk_size,
kwargs=kwargs,
)
def add_embeddings(
self,
text_embeddings: Iterable[Tuple[str, List[float]]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
bulk_size: int = 500,
**kwargs: Any,
) -> List[str]:
"""Add the given texts and embeddings to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
bulk_size: Bulk API request count; Default: 500
Returns:
List of ids from adding the texts into the vectorstore.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
"""
texts, embeddings = zip(*text_embeddings)
return self.__add(
list(texts),
list(embeddings),
metadatas=metadatas,
ids=ids,
bulk_size=bulk_size,
kwargs=kwargs,
)
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
By default, supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
metadata_field: Document field that metadata is stored in. Defaults to
"metadata".
Can be set to a special value "*" to include the entire document.
Optional Args for Approximate Search:
search_type: "approximate_search"; default: "approximate_search"
boolean_filter: A Boolean filter is a post filter consists of a Boolean
query that contains a k-NN query and a filter.
subquery_clause: Query clause on the knn vector field; default: "must"
lucene_filter: the Lucene algorithm decides whether to perform an exact
k-NN search with pre-filtering or an approximate search with modified
post-filtering. (deprecated, use `efficient_filter`)
efficient_filter: the Lucene Engine or Faiss Engine decides whether to
perform an exact k-NN search with pre-filtering or an approximate search
with modified post-filtering.
Optional Args for Script Scoring Search:
search_type: "script_scoring"; default: "approximate_search"
space_type: "l2", "l1", "linf", "cosinesimil", "innerproduct",
"hammingbit"; default: "l2"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
Optional Args for Painless Scripting Search:
search_type: "painless_scripting"; default: "approximate_search"
space_type: "l2Squared", "l1Norm", "cosineSimilarity"; default: "l2Squared"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
"""
docs_with_scores = self.similarity_search_with_score(query, k, **kwargs)
return [doc[0] for doc in docs_with_scores]
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Return docs and it's scores most similar to query.
By default, supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents along with its scores most similar to the query.
Optional Args:
same as `similarity_search`
"""
text_field = kwargs.get("text_field", "text")
metadata_field = kwargs.get("metadata_field", "metadata")
hits = self._raw_similarity_search_with_score(query=query, k=k, **kwargs)
documents_with_scores = [
(
Document(
page_content=hit["_source"][text_field],
metadata=hit["_source"]
if metadata_field == "*" or metadata_field not in hit["_source"]
else hit["_source"][metadata_field],
),
hit["_score"],
)
for hit in hits
]
return documents_with_scores
def _raw_similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[dict]:
"""Return raw opensearch documents (dict) including vectors,
scores most similar to query.
By default, supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of dict with its scores most similar to the query.
Optional Args:
same as `similarity_search`
"""
embedding = self.embedding_function.embed_query(query)
search_type = kwargs.get("search_type", "approximate_search")
vector_field = kwargs.get("vector_field", "vector_field")
index_name = kwargs.get("index_name", self.index_name)
filter = kwargs.get("filter", {})
if (
self.is_aoss
and search_type != "approximate_search"
and search_type != SCRIPT_SCORING_SEARCH
):
raise ValueError(
"Amazon OpenSearch Service Serverless only "
"supports `approximate_search` and `script_scoring`"
)
if search_type == "approximate_search":
boolean_filter = kwargs.get("boolean_filter", {})
subquery_clause = kwargs.get("subquery_clause", "must")
efficient_filter = kwargs.get("efficient_filter", {})
# `lucene_filter` is deprecated, added for Backwards Compatibility
lucene_filter = kwargs.get("lucene_filter", {})
if boolean_filter != {} and efficient_filter != {}:
raise ValueError(
"Both `boolean_filter` and `efficient_filter` are provided which "
"is invalid"
)
if lucene_filter != {} and efficient_filter != {}:
raise ValueError(
"Both `lucene_filter` and `efficient_filter` are provided which "
"is invalid. `lucene_filter` is deprecated"
)
if lucene_filter != {} and boolean_filter != {}:
raise ValueError(
"Both `lucene_filter` and `boolean_filter` are provided which "
"is invalid. `lucene_filter` is deprecated"
)
if (
efficient_filter == {}
and boolean_filter == {}
and lucene_filter == {}
and filter != {}
):
if self.engine in ["faiss", "lucene"]:
efficient_filter = filter
else:
boolean_filter = filter
if boolean_filter != {}:
search_query = _approximate_search_query_with_boolean_filter(
embedding,
boolean_filter,
k=k,
vector_field=vector_field,
subquery_clause=subquery_clause,
)
elif efficient_filter != {}:
search_query = _approximate_search_query_with_efficient_filter(
embedding, efficient_filter, k=k, vector_field=vector_field
)
elif lucene_filter != {}:
warnings.warn(
"`lucene_filter` is deprecated. Please use the keyword argument"
" `efficient_filter`"
)
search_query = _approximate_search_query_with_efficient_filter(
embedding, lucene_filter, k=k, vector_field=vector_field
)
else:
search_query = _default_approximate_search_query(
embedding, k=k, vector_field=vector_field
)
elif search_type == SCRIPT_SCORING_SEARCH:
space_type = kwargs.get("space_type", "l2")
pre_filter = kwargs.get("pre_filter", MATCH_ALL_QUERY)
search_query = _default_script_query(
embedding, k, space_type, pre_filter, vector_field
)
elif search_type == PAINLESS_SCRIPTING_SEARCH:
space_type = kwargs.get("space_type", "l2Squared")
pre_filter = kwargs.get("pre_filter", MATCH_ALL_QUERY)
search_query = _default_painless_scripting_query(
embedding, k, space_type, pre_filter, vector_field
)
else:
raise ValueError("Invalid `search_type` provided as an argument")
response = self.client.search(index=index_name, body=search_query)
return [hit for hit in response["hits"]["hits"]]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> list[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
vector_field = kwargs.get("vector_field", "vector_field")
text_field = kwargs.get("text_field", "text")
metadata_field = kwargs.get("metadata_field", "metadata")
# Get embedding of the user query
embedding = self.embedding_function.embed_query(query)
# Do ANN/KNN search to get top fetch_k results where fetch_k >= k
results = self._raw_similarity_search_with_score(query, fetch_k, **kwargs)
embeddings = [result["_source"][vector_field] for result in results]
# Rerank top k results using MMR, (mmr_selected is a list of indices)
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
return [
Document(
page_content=results[i]["_source"][text_field],
metadata=results[i]["_source"][metadata_field],
)
for i in mmr_selected
]
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
bulk_size: int = 500,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> OpenSearchVectorSearch:
"""Construct OpenSearchVectorSearch wrapper from raw texts.
Example:
.. code-block:: python
from langchain.vectorstores import OpenSearchVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
opensearch_vector_search = OpenSearchVectorSearch.from_texts(
texts,
embeddings,
opensearch_url="http://localhost:9200"
)
OpenSearch by default supports Approximate Search powered by nmslib, faiss
and lucene engines recommended for large datasets. Also supports brute force
search through Script Scoring and Painless Scripting.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
Optional Keyword Args for Approximate Search:
engine: "nmslib", "faiss", "lucene"; default: "nmslib"
space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2"
ef_search: Size of the dynamic list used during k-NN searches. Higher values
lead to more accurate but slower searches; default: 512
ef_construction: Size of the dynamic list used during k-NN graph creation.
Higher values lead to more accurate graph but slower indexing speed;
default: 512
m: Number of bidirectional links created for each new element. Large impact
on memory consumption. Between 2 and 100; default: 16
Keyword Args for Script Scoring or Painless Scripting:
is_appx_search: False
"""
embeddings = embedding.embed_documents(texts)
return cls.from_embeddings(
embeddings,
texts,
embedding,
metadatas=metadatas,
bulk_size=bulk_size,
ids=ids,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
embeddings: List[List[float]],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
bulk_size: int = 500,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> OpenSearchVectorSearch:
"""Construct OpenSearchVectorSearch wrapper from pre-vectorized embeddings.
Example:
.. code-block:: python
from langchain.vectorstores import OpenSearchVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embedder = OpenAIEmbeddings()
embeddings = embedder.embed_documents(["foo", "bar"])
opensearch_vector_search = OpenSearchVectorSearch.from_embeddings(
embeddings,
texts,
embedder,
opensearch_url="http://localhost:9200"
)
OpenSearch by default supports Approximate Search powered by nmslib, faiss
and lucene engines recommended for large datasets. Also supports brute force
search through Script Scoring and Painless Scripting.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
Optional Keyword Args for Approximate Search:
engine: "nmslib", "faiss", "lucene"; default: "nmslib"
space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2"
ef_search: Size of the dynamic list used during k-NN searches. Higher values
lead to more accurate but slower searches; default: 512
ef_construction: Size of the dynamic list used during k-NN graph creation.
Higher values lead to more accurate graph but slower indexing speed;
default: 512
m: Number of bidirectional links created for each new element. Large impact
on memory consumption. Between 2 and 100; default: 16
Keyword Args for Script Scoring or Painless Scripting:
is_appx_search: False
"""
opensearch_url = get_from_dict_or_env(
kwargs, "opensearch_url", "OPENSEARCH_URL"
)
# List of arguments that needs to be removed from kwargs
# before passing kwargs to get opensearch client
keys_list = [
"opensearch_url",
"index_name",
"is_appx_search",
"vector_field",
"text_field",
"engine",
"space_type",
"ef_search",
"ef_construction",
"m",
"max_chunk_bytes",
"is_aoss",
]
_validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
dim = len(embeddings[0])
# Get the index name from either from kwargs or ENV Variable
# before falling back to random generation
index_name = get_from_dict_or_env(
kwargs, "index_name", "OPENSEARCH_INDEX_NAME", default=uuid.uuid4().hex
)
is_appx_search = kwargs.get("is_appx_search", True)
vector_field = kwargs.get("vector_field", "vector_field")
text_field = kwargs.get("text_field", "text")
max_chunk_bytes = kwargs.get("max_chunk_bytes", 1 * 1024 * 1024)
http_auth = kwargs.get("http_auth")
is_aoss = _is_aoss_enabled(http_auth=http_auth)
engine = None
if is_aoss and not is_appx_search:
raise ValueError(
"Amazon OpenSearch Service Serverless only "
"supports `approximate_search`"
)
if is_appx_search:
engine = kwargs.get("engine", "nmslib")
space_type = kwargs.get("space_type", "l2")
ef_search = kwargs.get("ef_search", 512)
ef_construction = kwargs.get("ef_construction", 512)
m = kwargs.get("m", 16)
_validate_aoss_with_engines(is_aoss, engine)
mapping = _default_text_mapping(
dim, engine, space_type, ef_search, ef_construction, m, vector_field
)
else:
mapping = _default_scripting_text_mapping(dim)
[kwargs.pop(key, None) for key in keys_list]
client = _get_opensearch_client(opensearch_url, **kwargs)
_bulk_ingest_embeddings(
client,
index_name,
embeddings,
texts,
ids=ids,
metadatas=metadatas,
vector_field=vector_field,
text_field=text_field,
mapping=mapping,
max_chunk_bytes=max_chunk_bytes,
is_aoss=is_aoss,
)
kwargs["engine"] = engine
return cls(opensearch_url, index_name, embedding, **kwargs)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~schema~messages.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Sequence, Union
from typing_extensions import Literal
from langchain.load.serializable import Serializable
from langchain.pydantic_v1 import Extra, Field
if TYPE_CHECKING:
from langchain.prompts.chat import ChatPromptTemplate
def get_buffer_string(
messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI"
) -> str:
"""Convert sequence of Messages to strings and concatenate them into one string.
Args:
messages: Messages to be converted to strings.
human_prefix: The prefix to prepend to contents of HumanMessages.
ai_prefix: THe prefix to prepend to contents of AIMessages.
Returns:
A single string concatenation of all input messages.
Example:
.. code-block:: python
from langchain.schema import AIMessage, HumanMessage
messages = [
HumanMessage(content="Hi, how are you?"),
AIMessage(content="Good, how are you?"),
]
get_buffer_string(messages)
# -> "Human: Hi, how are you?\nAI: Good, how are you?"
"""
string_messages = []
for m in messages:
if isinstance(m, HumanMessage):
role = human_prefix
elif isinstance(m, AIMessage):
role = ai_prefix
elif isinstance(m, SystemMessage):
role = "System"
elif isinstance(m, FunctionMessage):
role = "Function"
elif isinstance(m, ChatMessage):
role = m.role
else:
raise ValueError(f"Got unsupported message type: {m}")
message = f"{role}: {m.content}"
if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs:
message += f"{m.additional_kwargs['function_call']}"
string_messages.append(message)
return "\n".join(string_messages)
class BaseMessage(Serializable):
"""The base abstract Message class.
Messages are the inputs and outputs of ChatModels.
"""
content: Union[str, List[Union[str, Dict]]]
"""The string contents of the message."""
additional_kwargs: dict = Field(default_factory=dict)
"""Any additional information."""
type: str
class Config:
extra = Extra.allow
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
def __add__(self, other: Any) -> ChatPromptTemplate:
from langchain.prompts.chat import ChatPromptTemplate
prompt = ChatPromptTemplate(messages=[self])
return prompt + other
def merge_content(
first_content: Union[str, List[Union[str, Dict]]],
second_content: Union[str, List[Union[str, Dict]]],
) -> Union[str, List[Union[str, Dict]]]:
# If first chunk is a string
if isinstance(first_content, str):
# If the second chunk is also a string, then merge them naively
if isinstance(second_content, str):
return first_content + second_content
# If the second chunk is a list, add the first chunk to the start of the list
else:
return_list: List[Union[str, Dict]] = [first_content]
return return_list + second_content
# If both are lists, merge them naively
elif isinstance(second_content, List):
return first_content + second_content
# If the first content is a list, and the second content is a string
else:
# If the last element of the first content is a string
# Add the second content to the last element
if isinstance(first_content[-1], str):
return first_content[:-1] + [first_content[-1] + second_content]
else:
# Otherwise, add the second content as a new element of the list
return first_content + [second_content]
class BaseMessageChunk(BaseMessage):
"""A Message chunk, which can be concatenated with other Message chunks."""
def _merge_kwargs_dict(
self, left: Dict[str, Any], right: Dict[str, Any]
) -> Dict[str, Any]:
"""Merge additional_kwargs from another BaseMessageChunk into this one."""
merged = left.copy()
for k, v in right.items():
if k not in merged:
merged[k] = v
elif type(merged[k]) != type(v):
raise ValueError(
f'additional_kwargs["{k}"] already exists in this message,'
" but with a different type."
)
elif isinstance(merged[k], str):
merged[k] += v
elif isinstance(merged[k], dict):
merged[k] = self._merge_kwargs_dict(merged[k], v)
else:
raise ValueError(
f"Additional kwargs key {k} already exists in this message."
)
return merged
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, BaseMessageChunk):
# If both are (subclasses of) BaseMessageChunk,
# concat into a single BaseMessageChunk
if isinstance(self, ChatMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs
),
)
return self.__class__(
content=merge_content(self.content, other.content),
additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs
),
)
else:
raise TypeError(
'unsupported operand type(s) for +: "'
f"{self.__class__.__name__}"
f'" and "{other.__class__.__name__}"'
)
class HumanMessage(BaseMessage):
"""A Message from a human."""
example: bool = False
"""Whether this Message is being passed in to the model as part of an example
conversation.
"""
type: Literal["human"] = "human"
HumanMessage.update_forward_refs()
class HumanMessageChunk(HumanMessage, BaseMessageChunk):
"""A Human Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment] # noqa: E501
class AIMessage(BaseMessage):
"""A Message from an AI."""
example: bool = False
"""Whether this Message is being passed in to the model as part of an example
conversation.
"""
type: Literal["ai"] = "ai"
AIMessage.update_forward_refs()
class AIMessageChunk(AIMessage, BaseMessageChunk):
"""A Message chunk from an AI."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment] # noqa: E501
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, AIMessageChunk):
if self.example != other.example:
raise ValueError(
"Cannot concatenate AIMessageChunks with different example values."
)
return self.__class__(
example=self.example,
content=merge_content(self.content, other.content),
additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs
),
)
return super().__add__(other)
class SystemMessage(BaseMessage):
"""A Message for priming AI behavior, usually passed in as the first of a sequence
of input messages.
"""
type: Literal["system"] = "system"
SystemMessage.update_forward_refs()
class SystemMessageChunk(SystemMessage, BaseMessageChunk):
"""A System Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment] # noqa: E501
class FunctionMessage(BaseMessage):
"""A Message for passing the result of executing a function back to a model."""
name: str
"""The name of the function that was executed."""
type: Literal["function"] = "function"
FunctionMessage.update_forward_refs()
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
"""A Function Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, FunctionMessageChunk):
if self.name != other.name:
raise ValueError(
"Cannot concatenate FunctionMessageChunks with different names."
)
return self.__class__(
name=self.name,
content=merge_content(self.content, other.content),
additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs
),
)
return super().__add__(other)
class ToolMessage(BaseMessage):
"""A Message for passing the result of executing a tool back to a model."""
tool_call_id: str
"""Tool call that this message is responding to."""
type: Literal["tool"] = "tool"
ToolMessage.update_forward_refs()
class ToolMessageChunk(ToolMessage, BaseMessageChunk):
"""A Tool Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ToolMessageChunk"] = "ToolMessageChunk" # type: ignore[assignment]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ToolMessageChunk):
if self.tool_call_id != other.tool_call_id:
raise ValueError(
"Cannot concatenate ToolMessageChunks with different names."
)
return self.__class__(
tool_call_id=self.tool_call_id,
content=merge_content(self.content, other.content),
additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs
),
)
return super().__add__(other)
class ChatMessage(BaseMessage):
"""A Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
ChatMessage.update_forward_refs()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""A Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
raise ValueError(
"Cannot concatenate ChatMessageChunks with different roles."
)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs
),
)
return super().__add__(other)
AnyMessage = Union[
AIMessage, HumanMessage, ChatMessage, SystemMessage, FunctionMessage, ToolMessage
]
def _message_to_dict(message: BaseMessage) -> dict:
return {"type": message.type, "data": message.dict()}
def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]:
"""Convert a sequence of Messages to a list of dictionaries.
Args:
messages: Sequence of messages (as BaseMessages) to convert.
Returns:
List of messages as dicts.
"""
return [_message_to_dict(m) for m in messages]
def _message_from_dict(message: dict) -> BaseMessage:
_type = message["type"]
if _type == "human":
return HumanMessage(**message["data"])
elif _type == "ai":
return AIMessage(**message["data"])
elif _type == "system":
return SystemMessage(**message["data"])
elif _type == "chat":
return ChatMessage(**message["data"])
elif _type == "function":
return FunctionMessage(**message["data"])
elif _type == "tool":
return ToolMessage(**message["data"])
else:
raise ValueError(f"Got unexpected message type: {_type}")
def messages_from_dict(messages: List[dict]) -> List[BaseMessage]:
"""Convert a sequence of messages from dicts to Message objects.
Args:
messages: Sequence of messages (as dicts) to convert.
Returns:
List of messages (BaseMessages).
"""
return [_message_from_dict(m) for m in messages]
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~vectorstores~weaviate.py | from __future__ import annotations
import datetime
import os
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
)
from uuid import uuid4
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import weaviate
def _default_schema(index_name: str) -> Dict:
return {
"class": index_name,
"properties": [
{
"name": "text",
"dataType": ["text"],
}
],
}
def _create_weaviate_client(
url: Optional[str] = None,
api_key: Optional[str] = None,
**kwargs: Any,
) -> weaviate.Client:
try:
import weaviate
except ImportError:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`"
)
url = url or os.environ.get("WEAVIATE_URL")
api_key = api_key or os.environ.get("WEAVIATE_API_KEY")
auth = weaviate.auth.AuthApiKey(api_key=api_key) if api_key else None
return weaviate.Client(url=url, auth_client_secret=auth, **kwargs)
def _default_score_normalizer(val: float) -> float:
return 1 - 1 / (1 + np.exp(val))
def _json_serializable(value: Any) -> Any:
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
class Weaviate(VectorStore):
"""`Weaviate` vector store.
To use, you should have the ``weaviate-client`` python package installed.
Example:
.. code-block:: python
import weaviate
from langchain.vectorstores import Weaviate
client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...)
weaviate = Weaviate(client, index_name, text_key)
"""
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
relevance_score_fn: Optional[
Callable[[float], float]
] = _default_score_normalizer,
by_text: bool = True,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._query_attrs = [self._text_key]
self.relevance_score_fn = relevance_score_fn
self._by_text = by_text
if attributes is not None:
self._query_attrs.extend(attributes)
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding
def _select_relevance_score_fn(self) -> Callable[[float], float]:
return (
self.relevance_score_fn
if self.relevance_score_fn
else _default_score_normalizer
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
ids = []
embeddings: Optional[List[List[float]]] = None
if self._embedding:
if not isinstance(texts, list):
texts = list(texts)
embeddings = self._embedding.embed_documents(texts)
with self._client.batch as batch:
for i, text in enumerate(texts):
data_properties = {self._text_key: text}
if metadatas is not None:
for key, val in metadatas[i].items():
data_properties[key] = _json_serializable(val)
# Allow for ids (consistent w/ other methods)
# # Or uuids (backwards compatible w/ existing arg)
# If the UUID of one of the objects already exists
# then the existing object will be replaced by the new object.
_id = get_valid_uuid(uuid4())
if "uuids" in kwargs:
_id = kwargs["uuids"][i]
elif "ids" in kwargs:
_id = kwargs["ids"][i]
batch.add_data_object(
data_object=data_properties,
class_name=self._index_name,
uuid=_id,
vector=embeddings[i] if embeddings else None,
)
ids.append(_id)
return ids
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
if self._by_text:
return self.similarity_search_by_text(query, k, **kwargs)
else:
if self._embedding is None:
raise ValueError(
"_embedding cannot be None for similarity_search when "
"_by_text=False"
)
embedding = self._embedding.embed_query(query)
return self.similarity_search_by_vector(embedding, k, **kwargs)
def similarity_search_by_text(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
content: Dict[str, Any] = {"concepts": [query]}
if kwargs.get("search_distance"):
content["certainty"] = kwargs.get("search_distance")
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("tenant"):
query_obj = query_obj.with_tenant(kwargs.get("tenant"))
if kwargs.get("additional"):
query_obj = query_obj.with_additional(kwargs.get("additional"))
result = query_obj.with_near_text(content).with_limit(k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Look up similar documents by embedding vector in Weaviate."""
vector = {"vector": embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("tenant"):
query_obj = query_obj.with_tenant(kwargs.get("tenant"))
if kwargs.get("additional"):
query_obj = query_obj.with_additional(kwargs.get("additional"))
result = query_obj.with_near_vector(vector).with_limit(k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding is not None:
embedding = self._embedding.embed_query(query)
else:
raise ValueError(
"max_marginal_relevance_search requires a suitable Embeddings object"
)
return self.max_marginal_relevance_search_by_vector(
embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
vector = {"vector": embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("tenant"):
query_obj = query_obj.with_tenant(kwargs.get("tenant"))
results = (
query_obj.with_additional("vector")
.with_near_vector(vector)
.with_limit(fetch_k)
.do()
)
payload = results["data"]["Get"][self._index_name]
embeddings = [result["_additional"]["vector"] for result in payload]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
docs = []
for idx in mmr_selected:
text = payload[idx].pop(self._text_key)
payload[idx].pop("_additional")
meta = payload[idx]
docs.append(Document(page_content=text, metadata=meta))
return docs
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""
Return list of documents most similar to the query
text and cosine distance in float for each.
Lower score represents more similarity.
"""
if self._embedding is None:
raise ValueError(
"_embedding cannot be None for similarity_search_with_score"
)
content: Dict[str, Any] = {"concepts": [query]}
if kwargs.get("search_distance"):
content["certainty"] = kwargs.get("search_distance")
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("tenant"):
query_obj = query_obj.with_tenant(kwargs.get("tenant"))
embedded_query = self._embedding.embed_query(query)
if not self._by_text:
vector = {"vector": embedded_query}
result = (
query_obj.with_near_vector(vector)
.with_limit(k)
.with_additional("vector")
.do()
)
else:
result = (
query_obj.with_near_text(content)
.with_limit(k)
.with_additional("vector")
.do()
)
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs_and_scores = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
score = np.dot(res["_additional"]["vector"], embedded_query)
docs_and_scores.append((Document(page_content=text, metadata=res), score))
return docs_and_scores
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
*,
client: Optional[weaviate.Client] = None,
weaviate_url: Optional[str] = None,
weaviate_api_key: Optional[str] = None,
batch_size: Optional[int] = None,
index_name: Optional[str] = None,
text_key: str = "text",
by_text: bool = False,
relevance_score_fn: Optional[
Callable[[float], float]
] = _default_score_normalizer,
**kwargs: Any,
) -> Weaviate:
"""Construct Weaviate wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Weaviate instance.
3. Adds the documents to the newly created Weaviate index.
This is intended to be a quick way to get started.
Args:
texts: Texts to add to vector store.
embedding: Text embedding model to use.
metadatas: Metadata associated with each text.
client: weaviate.Client to use.
weaviate_url: The Weaviate URL. If using Weaviate Cloud Services get it
from the ``Details`` tab. Can be passed in as a named param or by
setting the environment variable ``WEAVIATE_URL``. Should not be
specified if client is provided.
weaviate_api_key: The Weaviate API key. If enabled and using Weaviate Cloud
Services, get it from ``Details`` tab. Can be passed in as a named param
or by setting the environment variable ``WEAVIATE_API_KEY``. Should
not be specified if client is provided.
batch_size: Size of batch operations.
index_name: Index name.
text_key: Key to use for uploading/retrieving text to/from vectorstore.
by_text: Whether to search by text or by embedding.
relevance_score_fn: Function for converting whatever distance function the
vector store uses to a relevance score, which is a normalized similarity
score (0 means dissimilar, 1 means similar).
**kwargs: Additional named parameters to pass to ``Weaviate.__init__()``.
Example:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Weaviate
embeddings = OpenAIEmbeddings()
weaviate = Weaviate.from_texts(
texts,
embeddings,
weaviate_url="http://localhost:8080"
)
"""
try:
from weaviate.util import get_valid_uuid
except ImportError as e:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`"
) from e
client = client or _create_weaviate_client(
url=weaviate_url,
api_key=weaviate_api_key,
)
if batch_size:
client.batch.configure(batch_size=batch_size)
index_name = index_name or f"LangChain_{uuid4().hex}"
schema = _default_schema(index_name)
# check whether the index already exists
if not client.schema.exists(index_name):
client.schema.create_class(schema)
embeddings = embedding.embed_documents(texts) if embedding else None
attributes = list(metadatas[0].keys()) if metadatas else None
# If the UUID of one of the objects already exists
# then the existing object will be replaced by the new object.
if "uuids" in kwargs:
uuids = kwargs.pop("uuids")
else:
uuids = [get_valid_uuid(uuid4()) for _ in range(len(texts))]
with client.batch as batch:
for i, text in enumerate(texts):
data_properties = {
text_key: text,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = uuids[i]
# if an embedding strategy is not provided, we let
# weaviate create the embedding. Note that this will only
# work if weaviate has been installed with a vectorizer module
# like text2vec-contextionary for example
params = {
"uuid": _id,
"data_object": data_properties,
"class_name": index_name,
}
if embeddings is not None:
params["vector"] = embeddings[i]
batch.add_data_object(**params)
batch.flush()
return cls(
client,
index_name,
text_key,
embedding=embedding,
attributes=attributes,
relevance_score_fn=relevance_score_fn,
by_text=by_text,
**kwargs,
)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
# TODO: Check if this can be done in bulk
for id in ids:
self._client.data_object.delete(uuid=id)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~runnables~hub.py | from typing import Any, Optional
from langchain.schema.runnable.base import Input, Output, RunnableBindingBase
class HubRunnable(RunnableBindingBase[Input, Output]):
"""
An instance of a runnable stored in the LangChain Hub.
"""
owner_repo_commit: str
def __init__(
self,
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
from langchain.hub import pull
pulled = pull(owner_repo_commit, api_url=api_url, api_key=api_key)
super_kwargs = {
"kwargs": {},
"config": {},
**kwargs,
"bound": pulled,
"owner_repo_commit": owner_repo_commit,
}
super().__init__(**super_kwargs)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~chat_models~pai_eas_endpoint.py | import asyncio
import json
import logging
from functools import partial
from typing import Any, AsyncIterator, Dict, List, Optional, cast
import requests
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import root_validator
from langchain.schema import ChatGeneration, ChatResult
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain.schema.output import ChatGenerationChunk
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class PaiEasChatEndpoint(BaseChatModel):
"""Eas LLM Service chat model API.
To use, must have a deployed eas chat llm service on AliCloud. One can set the
environment variable ``eas_service_url`` and ``eas_service_token`` set with your eas
service url and service token.
Example:
.. code-block:: python
from langchain.chat_models import PaiEasChatEndpoint
eas_chat_endpoint = PaiEasChatEndpoint(
eas_service_url="your_service_url",
eas_service_token="your_service_token"
)
"""
"""PAI-EAS Service URL"""
eas_service_url: str
"""PAI-EAS Service TOKEN"""
eas_service_token: str
"""PAI-EAS Service Infer Params"""
max_new_tokens: Optional[int] = 512
temperature: Optional[float] = 0.8
top_p: Optional[float] = 0.1
top_k: Optional[int] = 10
do_sample: Optional[bool] = False
use_cache: Optional[bool] = True
stop_sequences: Optional[List[str]] = None
"""Enable stream chat mode."""
streaming: bool = False
"""Key/value arguments to pass to the model. Reserved for future use"""
model_kwargs: Optional[dict] = None
version: Optional[str] = "2.0"
timeout: Optional[int] = 5000
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["eas_service_url"] = get_from_dict_or_env(
values, "eas_service_url", "EAS_SERVICE_URL"
)
values["eas_service_token"] = get_from_dict_or_env(
values, "eas_service_token", "EAS_SERVICE_TOKEN"
)
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
"eas_service_url": self.eas_service_url,
"eas_service_token": self.eas_service_token,
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "pai_eas_chat_endpoint"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"stop_sequences": [],
"do_sample": self.do_sample,
"use_cache": self.use_cache,
}
def _invocation_params(
self, stop_sequences: Optional[List[str]], **kwargs: Any
) -> dict:
params = self._default_params
if self.model_kwargs:
params.update(self.model_kwargs)
if self.stop_sequences is not None and stop_sequences is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop_sequences is not None:
params["stop"] = self.stop_sequences
else:
params["stop"] = stop_sequences
return {**params, **kwargs}
def format_request_payload(
self, messages: List[BaseMessage], **model_kwargs: Any
) -> dict:
prompt: Dict[str, Any] = {}
user_content: List[str] = []
assistant_content: List[str] = []
for message in messages:
"""Converts message to a dict according to role"""
content = cast(str, message.content)
if isinstance(message, HumanMessage):
user_content = user_content + [content]
elif isinstance(message, AIMessage):
assistant_content = assistant_content + [content]
elif isinstance(message, SystemMessage):
prompt["system_prompt"] = content
elif isinstance(message, ChatMessage) and message.role in [
"user",
"assistant",
"system",
]:
if message.role == "system":
prompt["system_prompt"] = content
elif message.role == "user":
user_content = user_content + [content]
elif message.role == "assistant":
assistant_content = assistant_content + [content]
else:
supported = ",".join([role for role in ["user", "assistant", "system"]])
raise ValueError(
f"""Received unsupported role.
Supported roles for the LLaMa Foundation Model: {supported}"""
)
prompt["prompt"] = user_content[len(user_content) - 1]
history = [
history_item
for _, history_item in enumerate(zip(user_content[:-1], assistant_content))
]
prompt["history"] = history
return {**prompt, **model_kwargs}
def _format_response_payload(
self, output: bytes, stop_sequences: Optional[List[str]]
) -> str:
"""Formats response"""
try:
text = json.loads(output)["response"]
if stop_sequences:
text = enforce_stop_tokens(text, stop_sequences)
return text
except Exception as e:
if isinstance(e, json.decoder.JSONDecodeError):
return output.decode("utf-8")
raise e
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
params = self._invocation_params(stop, **kwargs)
request_payload = self.format_request_payload(messages, **params)
response_payload = self._call_eas(request_payload)
generated_text = self._format_response_payload(response_payload, params["stop"])
if run_manager:
run_manager.on_llm_new_token(generated_text)
return generated_text
def _call_eas(self, query_body: dict) -> Any:
"""Generate text from the eas service."""
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"{self.eas_service_token}",
}
# make request
response = requests.post(
self.eas_service_url, headers=headers, json=query_body, timeout=self.timeout
)
if response.status_code != 200:
raise Exception(
f"Request failed with status code {response.status_code}"
f" and message {response.text}"
)
return response.text
def _call_eas_stream(self, query_body: dict) -> Any:
"""Generate text from the eas service."""
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"{self.eas_service_token}",
}
# make request
response = requests.post(
self.eas_service_url, headers=headers, json=query_body, timeout=self.timeout
)
if response.status_code != 200:
raise Exception(
f"Request failed with status code {response.status_code}"
f" and message {response.text}"
)
return response
def _convert_chunk_to_message_message(
self,
chunk: str,
) -> AIMessageChunk:
data = json.loads(chunk.encode("utf-8"))
return AIMessageChunk(content=data.get("response", ""))
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
params = self._invocation_params(stop, **kwargs)
request_payload = self.format_request_payload(messages, **params)
request_payload["use_stream_chat"] = True
response = self._call_eas_stream(request_payload)
for chunk in response.iter_lines(
chunk_size=8192, decode_unicode=False, delimiter=b"\0"
):
if chunk:
content = self._convert_chunk_to_message_message(chunk)
# identify stop sequence in generated text, if any
stop_seq_found: Optional[str] = None
for stop_seq in params["stop"]:
if stop_seq in content.content:
stop_seq_found = stop_seq
# identify text to yield
text: Optional[str] = None
if stop_seq_found:
content.content = content.content[
: content.content.index(stop_seq_found)
]
# yield text, if any
if text:
if run_manager:
await run_manager.on_llm_new_token(cast(str, content.content))
yield ChatGenerationChunk(message=content)
# break if stop sequence found
if stop_seq_found:
break
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
if stream if stream is not None else self.streaming:
generation: Optional[ChatGenerationChunk] = None
async for chunk in self._astream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
):
generation = chunk
assert generation is not None
return ChatResult(generations=[generation])
func = partial(
self._generate, messages, stop=stop, run_manager=run_manager, **kwargs
)
return await asyncio.get_event_loop().run_in_executor(None, func)
| [
"{}"
] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~vectorstores~qdrant.py | from __future__ import annotations
import asyncio
import functools
import uuid
import warnings
from itertools import islice
from operator import itemgetter
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from qdrant_client import grpc # noqa
from qdrant_client.conversions import common_types
from qdrant_client.http import models as rest
DictFilter = Dict[str, Union[str, int, bool, dict, list]]
MetadataFilter = Union[DictFilter, common_types.Filter]
class QdrantException(Exception):
"""`Qdrant` related exceptions."""
def sync_call_fallback(method: Callable) -> Callable:
"""
Decorator to call the synchronous method of the class if the async method is not
implemented. This decorator might be only used for the methods that are defined
as async in the class.
"""
@functools.wraps(method)
async def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
try:
return await method(self, *args, **kwargs)
except NotImplementedError:
# If the async method is not implemented, call the synchronous method
# by removing the first letter from the method name. For example,
# if the async method is called ``aaad_texts``, the synchronous method
# will be called ``aad_texts``.
sync_method = functools.partial(
getattr(self, method.__name__[1:]), *args, **kwargs
)
return await asyncio.get_event_loop().run_in_executor(None, sync_method)
return wrapper
class Qdrant(VectorStore):
"""`Qdrant` vector store.
To use you should have the ``qdrant-client`` package installed.
Example:
.. code-block:: python
from qdrant_client import QdrantClient
from langchain.vectorstores import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
"""
CONTENT_KEY = "page_content"
METADATA_KEY = "metadata"
VECTOR_NAME = None
def __init__(
self,
client: Any,
collection_name: str,
embeddings: Optional[Embeddings] = None,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
distance_strategy: str = "COSINE",
vector_name: Optional[str] = VECTOR_NAME,
embedding_function: Optional[Callable] = None, # deprecated
):
"""Initialize with necessary components."""
try:
import qdrant_client
except ImportError:
raise ImportError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
if not isinstance(client, qdrant_client.QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
if embeddings is None and embedding_function is None:
raise ValueError(
"`embeddings` value can't be None. Pass `Embeddings` instance."
)
if embeddings is not None and embedding_function is not None:
raise ValueError(
"Both `embeddings` and `embedding_function` are passed. "
"Use `embeddings` only."
)
self._embeddings = embeddings
self._embeddings_function = embedding_function
self.client: qdrant_client.QdrantClient = client
self.collection_name = collection_name
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
self.vector_name = vector_name or self.VECTOR_NAME
if embedding_function is not None:
warnings.warn(
"Using `embedding_function` is deprecated. "
"Pass `Embeddings` instance to `embeddings` instead."
)
if not isinstance(embeddings, Embeddings):
warnings.warn(
"`embeddings` should be an instance of `Embeddings`."
"Using `embeddings` as `embedding_function` which is deprecated"
)
self._embeddings_function = embeddings
self._embeddings = None
self.distance_strategy = distance_strategy.upper()
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embeddings
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
batch_size:
How many vectors upload per-request.
Default: 64
Returns:
List of ids from adding the texts into the vectorstore.
"""
added_ids = []
for batch_ids, points in self._generate_rest_batches(
texts, metadatas, ids, batch_size
):
self.client.upsert(
collection_name=self.collection_name, points=points, **kwargs
)
added_ids.extend(batch_ids)
return added_ids
@sync_call_fallback
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
batch_size:
How many vectors upload per-request.
Default: 64
Returns:
List of ids from adding the texts into the vectorstore.
"""
from qdrant_client import grpc # noqa
from qdrant_client.conversions.conversion import RestToGrpc
added_ids = []
async for batch_ids, points in self._agenerate_rest_batches(
texts, metadatas, ids, batch_size
):
await self.client.async_grpc_points.Upsert(
grpc.UpsertPoints(
collection_name=self.collection_name,
points=[RestToGrpc.convert_point_struct(point) for point in points],
)
)
added_ids.extend(batch_ids)
return added_ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(
query,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def asimilarity_search(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
results = await self.asimilarity_search_with_score(query, k, filter, **kwargs)
return list(map(itemgetter(0), results))
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of documents most similar to the query text and distance for each.
"""
return self.similarity_search_with_score_by_vector(
self._embed_query(query),
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
@sync_call_fallback
async def asimilarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
QdrantClient.async_grpc_points.Search().
Returns:
List of documents most similar to the query text and distance for each.
"""
return await self.asimilarity_search_with_score_by_vector(
self._embed_query(query),
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def asimilarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
QdrantClient.async_grpc_points.Search().
Returns:
List of Documents most similar to the query.
"""
results = await self.asimilarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of documents most similar to the query text and distance for each.
"""
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
query_vector = embedding
if self.vector_name is not None:
query_vector = (self.vector_name, embedding) # type: ignore[assignment]
results = self.client.search(
collection_name=self.collection_name,
query_vector=query_vector,
query_filter=qdrant_filter,
search_params=search_params,
limit=k,
offset=offset,
with_payload=True,
with_vectors=False, # Langchain does not expect vectors to be returned
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return [
(
self._document_from_scored_point(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in results
]
async def _asearch_with_score_by_vector(
self,
embedding: List[float],
*,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
with_vectors: bool = False,
**kwargs: Any,
) -> Any:
"""Return results most similar to embedding vector."""
from qdrant_client import grpc # noqa
from qdrant_client.conversions.conversion import RestToGrpc
from qdrant_client.http import models as rest
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
if qdrant_filter is not None and isinstance(qdrant_filter, rest.Filter):
qdrant_filter = RestToGrpc.convert_filter(qdrant_filter)
response = await self.client.async_grpc_points.Search(
grpc.SearchPoints(
collection_name=self.collection_name,
vector_name=self.vector_name,
vector=embedding,
filter=qdrant_filter,
params=search_params,
limit=k,
offset=offset,
with_payload=grpc.WithPayloadSelector(enable=True),
with_vectors=grpc.WithVectorsSelector(enable=with_vectors),
score_threshold=score_threshold,
read_consistency=consistency,
**kwargs,
)
)
return response
@sync_call_fallback
async def asimilarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
QdrantClient.async_grpc_points.Search().
Returns:
List of documents most similar to the query text and distance for each.
"""
response = await self._asearch_with_score_by_vector(
embedding,
k=k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return [
(
self._document_from_scored_point_grpc(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in response.result
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance.
"""
query_embedding = self._embed_query(query)
return self.max_marginal_relevance_search_by_vector(
query_embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
@sync_call_fallback
async def amax_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
QdrantClient.async_grpc_points.Search().
Returns:
List of Documents selected by maximal marginal relevance.
"""
query_embedding = self._embed_query(query)
return await self.amax_marginal_relevance_search_by_vector(
query_embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance.
"""
results = self.max_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def amax_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
QdrantClient.async_grpc_points.Search().
Returns:
List of Documents selected by maximal marginal relevance and distance for
each.
"""
results = await self.amax_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
def max_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance and distance for
each.
"""
query_vector = embedding
if self.vector_name is not None:
query_vector = (self.vector_name, query_vector) # type: ignore[assignment]
results = self.client.search(
collection_name=self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=search_params,
limit=fetch_k,
with_payload=True,
with_vectors=True,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
embeddings = [
result.vector.get(self.vector_name) # type: ignore[index, union-attr]
if self.vector_name is not None
else result.vector
for result in results
]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
return [
(
self._document_from_scored_point(
results[i], self.content_payload_key, self.metadata_payload_key
),
results[i].score,
)
for i in mmr_selected
]
@sync_call_fallback
async def amax_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance and distance for
each.
"""
from qdrant_client.conversions.conversion import GrpcToRest
response = await self._asearch_with_score_by_vector(
embedding,
k=fetch_k,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
with_vectors=True,
**kwargs,
)
results = [
GrpcToRest.convert_vectors(result.vectors) for result in response.result
]
embeddings: List[List[float]] = [
result.get(self.vector_name) # type: ignore
if isinstance(result, dict)
else result
for result in results
]
mmr_selected: List[int] = maximal_marginal_relevance(
np.array(embedding),
embeddings,
k=k,
lambda_mult=lambda_mult,
)
return [
(
self._document_from_scored_point_grpc(
response.result[i],
self.content_payload_key,
self.metadata_payload_key,
),
response.result[i].score,
)
for i in mmr_selected
]
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
from qdrant_client.http import models as rest
result = self.client.delete(
collection_name=self.collection_name,
points_selector=ids,
)
return result.status == rest.UpdateStatus.COMPLETED
@classmethod
def from_texts(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: Optional[str] = VECTOR_NAME,
batch_size: int = 64,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[common_types.HnswConfigDiff] = None,
optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
wal_config: Optional[common_types.WalConfigDiff] = None,
quantization_config: Optional[common_types.QuantizationConfig] = None,
init_from: Optional[common_types.InitFrom] = None,
on_disk: Optional[bool] = None,
force_recreate: bool = False,
**kwargs: Any,
) -> Qdrant:
"""Construct Qdrant wrapper from a list of texts.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
location:
If `:memory:` - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.
If `None` - fallback to relying on `host` and `port` parameters.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If true - use gPRC interface whenever possible in custom methods.
Default: False
https: If true - use HTTPS(SSL) protocol. Default: None
api_key: API key for authentication in Qdrant Cloud. Default: None
prefix:
If not None - add prefix to the REST URL path.
Example: service/v1 will result in
http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
Default: None
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: None
path:
Path in which the vectors will be stored while using local mode.
Default: None
collection_name:
Name of the Qdrant collection to be used. If not provided,
it will be created randomly. Default: None
distance_func:
Distance function. One of: "Cosine" / "Euclid" / "Dot".
Default: "Cosine"
content_payload_key:
A payload key used to store the content of the document.
Default: "page_content"
metadata_payload_key:
A payload key used to store the metadata of the document.
Default: "metadata"
vector_name:
Name of the vector to be used internally in Qdrant.
Default: None
batch_size:
How many vectors upload per-request.
Default: 64
shard_number: Number of shards in collection. Default is 1, minimum is 1.
replication_factor:
Replication factor for collection. Default is 1, minimum is 1.
Defines how many copies of each shard will be created.
Have effect only in distributed mode.
write_consistency_factor:
Write consistency factor for collection. Default is 1, minimum is 1.
Defines how many replicas should apply the operation for us to consider
it successful. Increasing this number will make the collection more
resilient to inconsistencies, but will also make it fail if not enough
replicas are available.
Does not have any performance impact.
Have effect only in distributed mode.
on_disk_payload:
If true - point`s payload will not be stored in memory.
It will be read from the disk every time it is requested.
This setting saves RAM by (slightly) increasing the response time.
Note: those payload values that are involved in filtering and are
indexed - remain in RAM.
hnsw_config: Params for HNSW index
optimizers_config: Params for optimizer
wal_config: Params for Write-Ahead-Log
quantization_config:
Params for quantization, if None - quantization will be disabled
init_from:
Use data stored in another collection to initialize this collection
force_recreate:
Force recreating the collection
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user-friendly interface that:
1. Creates embeddings, one for each text
2. Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
3. Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Qdrant
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
"""
qdrant = cls.construct_instance(
texts,
embedding,
location,
url,
port,
grpc_port,
prefer_grpc,
https,
api_key,
prefix,
timeout,
host,
path,
collection_name,
distance_func,
content_payload_key,
metadata_payload_key,
vector_name,
shard_number,
replication_factor,
write_consistency_factor,
on_disk_payload,
hnsw_config,
optimizers_config,
wal_config,
quantization_config,
init_from,
on_disk,
force_recreate,
**kwargs,
)
qdrant.add_texts(texts, metadatas, ids, batch_size)
return qdrant
@classmethod
@sync_call_fallback
async def afrom_texts(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: Optional[str] = VECTOR_NAME,
batch_size: int = 64,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[common_types.HnswConfigDiff] = None,
optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
wal_config: Optional[common_types.WalConfigDiff] = None,
quantization_config: Optional[common_types.QuantizationConfig] = None,
init_from: Optional[common_types.InitFrom] = None,
on_disk: Optional[bool] = None,
force_recreate: bool = False,
**kwargs: Any,
) -> Qdrant:
"""Construct Qdrant wrapper from a list of texts.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
location:
If `:memory:` - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.
If `None` - fallback to relying on `host` and `port` parameters.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If true - use gPRC interface whenever possible in custom methods.
Default: False
https: If true - use HTTPS(SSL) protocol. Default: None
api_key: API key for authentication in Qdrant Cloud. Default: None
prefix:
If not None - add prefix to the REST URL path.
Example: service/v1 will result in
http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
Default: None
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: None
path:
Path in which the vectors will be stored while using local mode.
Default: None
collection_name:
Name of the Qdrant collection to be used. If not provided,
it will be created randomly. Default: None
distance_func:
Distance function. One of: "Cosine" / "Euclid" / "Dot".
Default: "Cosine"
content_payload_key:
A payload key used to store the content of the document.
Default: "page_content"
metadata_payload_key:
A payload key used to store the metadata of the document.
Default: "metadata"
vector_name:
Name of the vector to be used internally in Qdrant.
Default: None
batch_size:
How many vectors upload per-request.
Default: 64
shard_number: Number of shards in collection. Default is 1, minimum is 1.
replication_factor:
Replication factor for collection. Default is 1, minimum is 1.
Defines how many copies of each shard will be created.
Have effect only in distributed mode.
write_consistency_factor:
Write consistency factor for collection. Default is 1, minimum is 1.
Defines how many replicas should apply the operation for us to consider
it successful. Increasing this number will make the collection more
resilient to inconsistencies, but will also make it fail if not enough
replicas are available.
Does not have any performance impact.
Have effect only in distributed mode.
on_disk_payload:
If true - point`s payload will not be stored in memory.
It will be read from the disk every time it is requested.
This setting saves RAM by (slightly) increasing the response time.
Note: those payload values that are involved in filtering and are
indexed - remain in RAM.
hnsw_config: Params for HNSW index
optimizers_config: Params for optimizer
wal_config: Params for Write-Ahead-Log
quantization_config:
Params for quantization, if None - quantization will be disabled
init_from:
Use data stored in another collection to initialize this collection
force_recreate:
Force recreating the collection
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user-friendly interface that:
1. Creates embeddings, one for each text
2. Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
3. Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Qdrant
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = await Qdrant.afrom_texts(texts, embeddings, "localhost")
"""
qdrant = await cls.aconstruct_instance(
texts,
embedding,
location,
url,
port,
grpc_port,
prefer_grpc,
https,
api_key,
prefix,
timeout,
host,
path,
collection_name,
distance_func,
content_payload_key,
metadata_payload_key,
vector_name,
shard_number,
replication_factor,
write_consistency_factor,
on_disk_payload,
hnsw_config,
optimizers_config,
wal_config,
quantization_config,
init_from,
on_disk,
force_recreate,
**kwargs,
)
await qdrant.aadd_texts(texts, metadatas, ids, batch_size)
return qdrant
@classmethod
def construct_instance(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: Optional[str] = VECTOR_NAME,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[common_types.HnswConfigDiff] = None,
optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
wal_config: Optional[common_types.WalConfigDiff] = None,
quantization_config: Optional[common_types.QuantizationConfig] = None,
init_from: Optional[common_types.InitFrom] = None,
on_disk: Optional[bool] = None,
force_recreate: bool = False,
**kwargs: Any,
) -> Qdrant:
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
from grpc import RpcError
from qdrant_client.http import models as rest
from qdrant_client.http.exceptions import UnexpectedResponse
# Just do a single quick embedding to get vector size
partial_embeddings = embedding.embed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client = qdrant_client.QdrantClient(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
try:
# Skip any validation in case of forced collection recreate.
if force_recreate:
raise ValueError
# Get the vector configuration of the existing collection and vector, if it
# was specified. If the old configuration does not match the current one,
# an exception is being thrown.
collection_info = client.get_collection(collection_name=collection_name)
current_vector_config = collection_info.config.params.vectors
if isinstance(current_vector_config, dict) and vector_name is not None:
if vector_name not in current_vector_config:
raise QdrantException(
f"Existing Qdrant collection {collection_name} does not "
f"contain vector named {vector_name}. Did you mean one of the "
f"existing vectors: {', '.join(current_vector_config.keys())}? "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
current_vector_config = current_vector_config.get(vector_name) # type: ignore[assignment]
elif isinstance(current_vector_config, dict) and vector_name is None:
raise QdrantException(
f"Existing Qdrant collection {collection_name} uses named vectors. "
f"If you want to reuse it, please set `vector_name` to any of the "
f"existing named vectors: "
f"{', '.join(current_vector_config.keys())}." # noqa
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
elif (
not isinstance(current_vector_config, dict) and vector_name is not None
):
raise QdrantException(
f"Existing Qdrant collection {collection_name} doesn't use named "
f"vectors. If you want to reuse it, please set `vector_name` to "
f"`None`. If you want to recreate the collection, set "
f"`force_recreate` parameter to `True`."
)
# Check if the vector configuration has the same dimensionality.
if current_vector_config.size != vector_size: # type: ignore[union-attr]
raise QdrantException(
f"Existing Qdrant collection is configured for vectors with "
f"{current_vector_config.size} " # type: ignore[union-attr]
f"dimensions. Selected embeddings are {vector_size}-dimensional. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
current_distance_func = (
current_vector_config.distance.name.upper() # type: ignore[union-attr]
)
if current_distance_func != distance_func:
raise QdrantException(
f"Existing Qdrant collection is configured for "
f"{current_distance_func} similarity, but requested "
f"{distance_func}. Please set `distance_func` parameter to "
f"`{current_distance_func}` if you want to reuse it. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
except (UnexpectedResponse, RpcError, ValueError):
vectors_config = rest.VectorParams(
size=vector_size,
distance=rest.Distance[distance_func],
on_disk=on_disk,
)
# If vector name was provided, we're going to use the named vectors feature
# with just a single vector.
if vector_name is not None:
vectors_config = { # type: ignore[assignment]
vector_name: vectors_config,
}
client.recreate_collection(
collection_name=collection_name,
vectors_config=vectors_config,
shard_number=shard_number,
replication_factor=replication_factor,
write_consistency_factor=write_consistency_factor,
on_disk_payload=on_disk_payload,
hnsw_config=hnsw_config,
optimizers_config=optimizers_config,
wal_config=wal_config,
quantization_config=quantization_config,
init_from=init_from,
timeout=timeout, # type: ignore[arg-type]
)
qdrant = cls(
client=client,
collection_name=collection_name,
embeddings=embedding,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
distance_strategy=distance_func,
vector_name=vector_name,
)
return qdrant
@classmethod
async def aconstruct_instance(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: Optional[str] = VECTOR_NAME,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[common_types.HnswConfigDiff] = None,
optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
wal_config: Optional[common_types.WalConfigDiff] = None,
quantization_config: Optional[common_types.QuantizationConfig] = None,
init_from: Optional[common_types.InitFrom] = None,
on_disk: Optional[bool] = None,
force_recreate: bool = False,
**kwargs: Any,
) -> Qdrant:
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
from grpc import RpcError
from qdrant_client.http import models as rest
from qdrant_client.http.exceptions import UnexpectedResponse
# Just do a single quick embedding to get vector size
partial_embeddings = await embedding.aembed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client = qdrant_client.QdrantClient(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
try:
# Skip any validation in case of forced collection recreate.
if force_recreate:
raise ValueError
# Get the vector configuration of the existing collection and vector, if it
# was specified. If the old configuration does not match the current one,
# an exception is being thrown.
collection_info = client.get_collection(collection_name=collection_name)
current_vector_config = collection_info.config.params.vectors
if isinstance(current_vector_config, dict) and vector_name is not None:
if vector_name not in current_vector_config:
raise QdrantException(
f"Existing Qdrant collection {collection_name} does not "
f"contain vector named {vector_name}. Did you mean one of the "
f"existing vectors: {', '.join(current_vector_config.keys())}? "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
current_vector_config = current_vector_config.get(vector_name) # type: ignore[assignment]
elif isinstance(current_vector_config, dict) and vector_name is None:
raise QdrantException(
f"Existing Qdrant collection {collection_name} uses named vectors. "
f"If you want to reuse it, please set `vector_name` to any of the "
f"existing named vectors: "
f"{', '.join(current_vector_config.keys())}." # noqa
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
elif (
not isinstance(current_vector_config, dict) and vector_name is not None
):
raise QdrantException(
f"Existing Qdrant collection {collection_name} doesn't use named "
f"vectors. If you want to reuse it, please set `vector_name` to "
f"`None`. If you want to recreate the collection, set "
f"`force_recreate` parameter to `True`."
)
# Check if the vector configuration has the same dimensionality.
if current_vector_config.size != vector_size: # type: ignore[union-attr]
raise QdrantException(
f"Existing Qdrant collection is configured for vectors with "
f"{current_vector_config.size} " # type: ignore[union-attr]
f"dimensions. Selected embeddings are {vector_size}-dimensional. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
current_distance_func = (
current_vector_config.distance.name.upper() # type: ignore[union-attr]
)
if current_distance_func != distance_func:
raise QdrantException(
f"Existing Qdrant collection is configured for "
f"{current_vector_config.distance} " # type: ignore[union-attr]
f"similarity. Please set `distance_func` parameter to "
f"`{distance_func}` if you want to reuse it. If you want to "
f"recreate the collection, set `force_recreate` parameter to "
f"`True`."
)
except (UnexpectedResponse, RpcError, ValueError):
vectors_config = rest.VectorParams(
size=vector_size,
distance=rest.Distance[distance_func],
on_disk=on_disk,
)
# If vector name was provided, we're going to use the named vectors feature
# with just a single vector.
if vector_name is not None:
vectors_config = { # type: ignore[assignment]
vector_name: vectors_config,
}
client.recreate_collection(
collection_name=collection_name,
vectors_config=vectors_config,
shard_number=shard_number,
replication_factor=replication_factor,
write_consistency_factor=write_consistency_factor,
on_disk_payload=on_disk_payload,
hnsw_config=hnsw_config,
optimizers_config=optimizers_config,
wal_config=wal_config,
quantization_config=quantization_config,
init_from=init_from,
timeout=timeout, # type: ignore[arg-type]
)
qdrant = cls(
client=client,
collection_name=collection_name,
embeddings=embedding,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
distance_strategy=distance_func,
vector_name=vector_name,
)
return qdrant
@staticmethod
def _cosine_relevance_score_fn(distance: float) -> float:
"""Normalize the distance to a score on a scale [0, 1]."""
return (distance + 1.0) / 2.0
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.distance_strategy == "COSINE":
return self._cosine_relevance_score_fn
elif self.distance_strategy == "DOT":
return self._max_inner_product_relevance_score_fn
elif self.distance_strategy == "EUCLID":
return self._euclidean_relevance_score_fn
else:
raise ValueError(
"Unknown distance strategy, must be cosine, "
"max_inner_product, or euclidean"
)
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
"""
return self.similarity_search_with_score(query, k, **kwargs)
@classmethod
def _build_payloads(
cls,
texts: Iterable[str],
metadatas: Optional[List[dict]],
content_payload_key: str,
metadata_payload_key: str,
) -> List[dict]:
payloads = []
for i, text in enumerate(texts):
if text is None:
raise ValueError(
"At least one of the texts is None. Please remove it before "
"calling .from_texts or .add_texts on Qdrant instance."
)
metadata = metadatas[i] if metadatas is not None else None
payloads.append(
{
content_payload_key: text,
metadata_payload_key: metadata,
}
)
return payloads
@classmethod
def _document_from_scored_point(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
return Document(
page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
@classmethod
def _document_from_scored_point_grpc(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
from qdrant_client.conversions.conversion import grpc_to_payload
payload = grpc_to_payload(scored_point.payload)
return Document(
page_content=payload[content_payload_key],
metadata=payload.get(metadata_payload_key) or {},
)
def _build_condition(self, key: str, value: Any) -> List[rest.FieldCondition]:
from qdrant_client.http import models as rest
out = []
if isinstance(value, dict):
for _key, value in value.items():
out.extend(self._build_condition(f"{key}.{_key}", value))
elif isinstance(value, list):
for _value in value:
if isinstance(_value, dict):
out.extend(self._build_condition(f"{key}[]", _value))
else:
out.extend(self._build_condition(f"{key}", _value))
else:
out.append(
rest.FieldCondition(
key=f"{self.metadata_payload_key}.{key}",
match=rest.MatchValue(value=value),
)
)
return out
def _qdrant_filter_from_dict(
self, filter: Optional[DictFilter]
) -> Optional[rest.Filter]:
from qdrant_client.http import models as rest
if not filter:
return None
return rest.Filter(
must=[
condition
for key, value in filter.items()
for condition in self._build_condition(key, value)
]
)
def _embed_query(self, query: str) -> List[float]:
"""Embed query text.
Used to provide backward compatibility with `embedding_function` argument.
Args:
query: Query text.
Returns:
List of floats representing the query embedding.
"""
if self.embeddings is not None:
embedding = self.embeddings.embed_query(query)
else:
if self._embeddings_function is not None:
embedding = self._embeddings_function(query)
else:
raise ValueError("Neither of embeddings or embedding_function is set")
return embedding.tolist() if hasattr(embedding, "tolist") else embedding
def _embed_texts(self, texts: Iterable[str]) -> List[List[float]]:
"""Embed search texts.
Used to provide backward compatibility with `embedding_function` argument.
Args:
texts: Iterable of texts to embed.
Returns:
List of floats representing the texts embedding.
"""
if self.embeddings is not None:
embeddings = self.embeddings.embed_documents(list(texts))
if hasattr(embeddings, "tolist"):
embeddings = embeddings.tolist()
elif self._embeddings_function is not None:
embeddings = []
for text in texts:
embedding = self._embeddings_function(text)
if hasattr(embeddings, "tolist"):
embedding = embedding.tolist()
embeddings.append(embedding)
else:
raise ValueError("Neither of embeddings or embedding_function is set")
return embeddings
async def _aembed_texts(self, texts: Iterable[str]) -> List[List[float]]:
"""Embed search texts.
Used to provide backward compatibility with `embedding_function` argument.
Args:
texts: Iterable of texts to embed.
Returns:
List of floats representing the texts embedding.
"""
if self.embeddings is not None:
embeddings = await self.embeddings.aembed_documents(list(texts))
if hasattr(embeddings, "tolist"):
embeddings = embeddings.tolist()
elif self._embeddings_function is not None:
embeddings = []
for text in texts:
embedding = self._embeddings_function(text)
if hasattr(embeddings, "tolist"):
embedding = embedding.tolist()
embeddings.append(embedding)
else:
raise ValueError("Neither of embeddings or embedding_function is set")
return embeddings
def _generate_rest_batches(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
) -> Generator[Tuple[List[str], List[rest.PointStruct]], None, None]:
from qdrant_client.http import models as rest
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
# Generate the embeddings for all the texts in a batch
batch_embeddings = self._embed_texts(batch_texts)
points = [
rest.PointStruct(
id=point_id,
vector=vector
if self.vector_name is None
else {self.vector_name: vector},
payload=payload,
)
for point_id, vector, payload in zip(
batch_ids,
batch_embeddings,
self._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
)
]
yield batch_ids, points
async def _agenerate_rest_batches(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
) -> AsyncGenerator[Tuple[List[str], List[rest.PointStruct]], None]:
from qdrant_client.http import models as rest
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
# Generate the embeddings for all the texts in a batch
batch_embeddings = await self._aembed_texts(batch_texts)
points = [
rest.PointStruct(
id=point_id,
vector=vector
if self.vector_name is None
else {self.vector_name: vector},
payload=payload,
)
for point_id, vector, payload in zip(
batch_ids,
batch_embeddings,
self._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
)
]
yield batch_ids, points
| [] |
2024-01-10 | bentoml/langchain | libs~experimental~langchain_experimental~open_clip~open_clip.py | from typing import Any, Dict, List
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema.embeddings import Embeddings
class OpenCLIPEmbeddings(BaseModel, Embeddings):
model: Any
preprocess: Any
tokenizer: Any
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that open_clip and torch libraries are installed."""
try:
import open_clip
### Smaller, less performant
# model_name = "ViT-B-32"
# checkpoint = "laion2b_s34b_b79k"
### Larger, more performant
model_name = "ViT-g-14"
checkpoint = "laion2b_s34b_b88k"
model, _, preprocess = open_clip.create_model_and_transforms(
model_name=model_name, pretrained=checkpoint
)
tokenizer = open_clip.get_tokenizer(model_name)
values["model"] = model
values["preprocess"] = preprocess
values["tokenizer"] = tokenizer
except ImportError:
raise ImportError(
"Please ensure both open_clip and torch libraries are installed. "
"pip install open_clip_torch torch"
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
text_features = []
for text in texts:
# Tokenize the text
tokenized_text = self.tokenizer(text)
# Encode the text to get the embeddings
embeddings_tensor = self.model.encode_text(tokenized_text)
# Normalize the embeddings
norm = embeddings_tensor.norm(p=2, dim=1, keepdim=True)
normalized_embeddings_tensor = embeddings_tensor.div(norm)
# Convert normalized tensor to list and add to the text_features list
embeddings_list = normalized_embeddings_tensor.squeeze(0).tolist()
text_features.append(embeddings_list)
return text_features
def embed_query(self, text: str) -> List[float]:
return self.embed_documents([text])[0]
def embed_image(self, uris: List[str]) -> List[List[float]]:
try:
from PIL import Image as _PILImage
except ImportError:
raise ImportError("Please install the PIL library: pip install pillow")
# Open images directly as PIL images
pil_images = [_PILImage.open(uri) for uri in uris]
image_features = []
for pil_image in pil_images:
# Preprocess the image for the model
preprocessed_image = self.preprocess(pil_image).unsqueeze(0)
# Encode the image to get the embeddings
embeddings_tensor = self.model.encode_image(preprocessed_image)
# Normalize the embeddings tensor
norm = embeddings_tensor.norm(p=2, dim=1, keepdim=True)
normalized_embeddings_tensor = embeddings_tensor.div(norm)
# Convert tensor to list and add to the image_features list
embeddings_list = normalized_embeddings_tensor.squeeze(0).tolist()
image_features.append(embeddings_list)
return image_features
| [] |
2024-01-10 | bentoml/langchain | libs~cli~langchain_cli~namespaces~template.py | """
Develop installable templates.
"""
import re
import shutil
import subprocess
from pathlib import Path
from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli.utils.packages import get_langserve_export, get_package_root
package_cli = typer.Typer(no_args_is_help=True, add_completion=False)
@package_cli.command()
def new(
name: Annotated[str, typer.Argument(help="The name of the folder to create")],
with_poetry: Annotated[
bool,
typer.Option("--with-poetry/--no-poetry", help="Don't run poetry install"),
] = False,
):
"""
Creates a new template package.
"""
computed_name = name if name != "." else Path.cwd().name
destination_dir = Path.cwd() / name if name != "." else Path.cwd()
# copy over template from ../package_template
project_template_dir = Path(__file__).parents[1] / "package_template"
shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=name == ".")
package_name_split = computed_name.split("/")
package_name = (
package_name_split[-2]
if len(package_name_split) > 1 and package_name_split[-1] == ""
else package_name_split[-1]
)
module_name = re.sub(
r"[^a-zA-Z0-9_]",
"_",
package_name,
)
# generate app route code
chain_name = f"{module_name}_chain"
app_route_code = (
f"from {module_name} import chain as {chain_name}\n\n"
f'add_routes(app, {chain_name}, path="/{package_name}")'
)
# replace template strings
pyproject = destination_dir / "pyproject.toml"
pyproject_contents = pyproject.read_text()
pyproject.write_text(
pyproject_contents.replace("__package_name__", package_name).replace(
"__module_name__", module_name
)
)
# move module folder
package_dir = destination_dir / module_name
shutil.move(destination_dir / "package_template", package_dir)
# update init
init = package_dir / "__init__.py"
init_contents = init.read_text()
init.write_text(init_contents.replace("__module_name__", module_name))
# replace readme
readme = destination_dir / "README.md"
readme_contents = readme.read_text()
readme.write_text(
readme_contents.replace("__package_name__", package_name).replace(
"__app_route_code__", app_route_code
)
)
# poetry install
if with_poetry:
subprocess.run(["poetry", "install"], cwd=destination_dir)
@package_cli.command()
def serve(
*,
port: Annotated[
Optional[int], typer.Option(help="The port to run the server on")
] = None,
host: Annotated[
Optional[str], typer.Option(help="The host to run the server on")
] = None,
configurable: Annotated[
bool,
typer.Option(
"--configurable/--no-configurable",
help="Whether to include a configurable route",
),
] = True,
) -> None:
"""
Starts a demo app for this template.
"""
# load pyproject.toml
project_dir = get_package_root()
pyproject = project_dir / "pyproject.toml"
# get langserve export - throws KeyError if invalid
get_langserve_export(pyproject)
host_str = host if host is not None else "127.0.0.1"
script = (
"langchain_cli.dev_scripts:create_demo_server"
if not configurable
else "langchain_cli.dev_scripts:create_demo_server_configurable"
)
import uvicorn
uvicorn.run(
script,
factory=True,
reload=True,
port=port if port is not None else 8000,
host=host_str,
)
| [
"package_template"
] |
2024-01-10 | bentoml/langchain | libs~langchain~tests~integration_tests~memory~test_neo4j.py | import json
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import Neo4jChatMessageHistory
from langchain.schema.messages import _message_to_dict
def test_memory_with_message_store() -> None:
"""Test the memory with a message store."""
# setup MongoDB as a message store
message_history = Neo4jChatMessageHistory(session_id="test-session")
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# get the message history from the memory store and turn it into a json
messages = memory.chat_memory.messages
messages_json = json.dumps([_message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# remove the record from Azure Cosmos DB, so the next test run won't pick it up
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~vectorstores~pgvector.py | from __future__ import annotations
import asyncio
import contextlib
import enum
import logging
import uuid
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
Type,
)
import numpy as np
import sqlalchemy
from sqlalchemy import delete
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import Session
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from langchain.vectorstores._pgvector_data_models import CollectionStore
class DistanceStrategy(str, enum.Enum):
"""Enumerator of the Distance strategies."""
EUCLIDEAN = "l2"
COSINE = "cosine"
MAX_INNER_PRODUCT = "inner"
DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE
Base = declarative_base() # type: Any
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
class BaseModel(Base):
"""Base model for the SQL stores."""
__abstract__ = True
uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
def _results_to_docs(docs_and_scores: Any) -> List[Document]:
"""Return docs from docs and scores."""
return [doc for doc, _ in docs_and_scores]
class PGVector(VectorStore):
"""`Postgres`/`PGVector` vector store.
To use, you should have the ``pgvector`` python package installed.
Args:
connection_string: Postgres connection string.
embedding_function: Any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
collection_name: The name of the collection to use. (default: langchain)
NOTE: This is not the name of the table, but the name of the collection.
The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
distance_strategy: The distance strategy to use. (default: COSINE)
pre_delete_collection: If True, will delete the collection if it exists.
(default: False). Useful for testing.
engine_args: SQLAlchemy's create engine arguments.
Example:
.. code-block:: python
from langchain.vectorstores import PGVector
from langchain.embeddings.openai import OpenAIEmbeddings
CONNECTION_STRING = "postgresql+psycopg2://hwc@localhost:5432/test3"
COLLECTION_NAME = "state_of_the_union_test"
embeddings = OpenAIEmbeddings()
vectorestore = PGVector.from_documents(
embedding=embeddings,
documents=docs,
collection_name=COLLECTION_NAME,
connection_string=CONNECTION_STRING,
)
"""
def __init__(
self,
connection_string: str,
embedding_function: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
collection_metadata: Optional[dict] = None,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
*,
connection: Optional[sqlalchemy.engine.Connection] = None,
engine_args: Optional[dict[str, Any]] = None,
) -> None:
self.connection_string = connection_string
self.embedding_function = embedding_function
self.collection_name = collection_name
self.collection_metadata = collection_metadata
self._distance_strategy = distance_strategy
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.override_relevance_score_fn = relevance_score_fn
self.engine_args = engine_args or {}
# Create a connection if not provided, otherwise use the provided connection
self._conn = connection if connection else self.connect()
self.__post_init__()
def __post_init__(
self,
) -> None:
"""Initialize the store."""
self.create_vector_extension()
from langchain.vectorstores._pgvector_data_models import (
CollectionStore,
EmbeddingStore,
)
self.CollectionStore = CollectionStore
self.EmbeddingStore = EmbeddingStore
self.create_tables_if_not_exists()
self.create_collection()
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def connect(self) -> sqlalchemy.engine.Connection:
engine = sqlalchemy.create_engine(self.connection_string, **self.engine_args)
conn = engine.connect()
return conn
def create_vector_extension(self) -> None:
try:
with Session(self._conn) as session:
# The advisor lock fixes issue arising from concurrent
# creation of the vector extension.
# https://github.com/langchain-ai/langchain/issues/12933
# For more information see:
# https://www.postgresql.org/docs/16/explicit-locking.html#ADVISORY-LOCKS
statement = sqlalchemy.text(
"BEGIN;"
"SELECT pg_advisory_xact_lock(1573678846307946496);"
"CREATE EXTENSION IF NOT EXISTS vector;"
"COMMIT;"
)
session.execute(statement)
session.commit()
except Exception as e:
raise Exception(f"Failed to create vector extension: {e}") from e
def create_tables_if_not_exists(self) -> None:
with self._conn.begin():
Base.metadata.create_all(self._conn)
def drop_tables(self) -> None:
with self._conn.begin():
Base.metadata.drop_all(self._conn)
def create_collection(self) -> None:
if self.pre_delete_collection:
self.delete_collection()
with Session(self._conn) as session:
self.CollectionStore.get_or_create(
session, self.collection_name, cmetadata=self.collection_metadata
)
def delete_collection(self) -> None:
self.logger.debug("Trying to delete collection")
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
self.logger.warning("Collection not found")
return
session.delete(collection)
session.commit()
@contextlib.contextmanager
def _make_session(self) -> Generator[Session, None, None]:
"""Create a context manager for the session, bind to _conn string."""
yield Session(self._conn)
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""Delete vectors by ids or uuids.
Args:
ids: List of ids to delete.
"""
with Session(self._conn) as session:
if ids is not None:
self.logger.debug(
"Trying to delete vectors by ids (represented by the model "
"using the custom ids field)"
)
stmt = delete(self.EmbeddingStore).where(
self.EmbeddingStore.custom_id.in_(ids)
)
session.execute(stmt)
session.commit()
def get_collection(self, session: Session) -> Optional["CollectionStore"]:
return self.CollectionStore.get_by_name(session, self.collection_name)
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
connection_string: Optional[str] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
if connection_string is None:
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
store.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
def add_embeddings(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
embedding_store = self.EmbeddingStore(
embedding=embedding,
document=text,
cmetadata=metadata,
custom_id=id,
collection_id=collection.uuid,
)
session.add(embedding_store)
session.commit()
return ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = self.embedding_function.embed_documents(list(texts))
return self.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with PGVector with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return docs
@property
def distance_strategy(self) -> Any:
if self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self.EmbeddingStore.embedding.l2_distance
elif self._distance_strategy == DistanceStrategy.COSINE:
return self.EmbeddingStore.embedding.cosine_distance
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self.EmbeddingStore.embedding.max_inner_product
else:
raise ValueError(
f"Got unexpected value for distance: {self._distance_strategy}. "
f"Should be one of {', '.join([ds.value for ds in DistanceStrategy])}."
)
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
results = self.__query_collection(embedding=embedding, k=k, filter=filter)
return self._results_to_docs_and_scores(results)
def _results_to_docs_and_scores(self, results: Any) -> List[Tuple[Document, float]]:
"""Return docs and scores from results."""
docs = [
(
Document(
page_content=result.EmbeddingStore.document,
metadata=result.EmbeddingStore.cmetadata,
),
result.distance if self.embedding_function is not None else None,
)
for result in results
]
return docs
def __query_collection(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Any]:
"""Query the collection."""
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
filter_by = self.EmbeddingStore.collection_id == collection.uuid
if filter is not None:
filter_clauses = []
for key, value in filter.items():
IN = "in"
if isinstance(value, dict) and IN in map(str.lower, value):
value_case_insensitive = {
k.lower(): v for k, v in value.items()
}
filter_by_metadata = self.EmbeddingStore.cmetadata[
key
].astext.in_(value_case_insensitive[IN])
filter_clauses.append(filter_by_metadata)
else:
filter_by_metadata = self.EmbeddingStore.cmetadata[
key
].astext == str(value)
filter_clauses.append(filter_by_metadata)
filter_by = sqlalchemy.and_(filter_by, *filter_clauses)
_type = self.EmbeddingStore
results: List[Any] = (
session.query(
self.EmbeddingStore,
self.distance_strategy(embedding).label("distance"), # type: ignore
)
.filter(filter_by)
.order_by(sqlalchemy.asc("distance"))
.join(
self.CollectionStore,
self.EmbeddingStore.collection_id == self.CollectionStore.uuid,
)
.limit(k)
.all()
)
return results
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return _results_to_docs(docs_and_scores)
@classmethod
def from_texts(
cls: Type[PGVector],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
embeddings = embedding.embed_documents(list(texts))
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""Construct PGVector wrapper from raw documents and pre-
generated embeddings.
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
Example:
.. code-block:: python
from langchain.vectorstores import PGVector
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
faiss = PGVector.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_existing_index(
cls: Type[PGVector],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Get instance of an existing PGVector store.This method will
return the instance of the store without inserting any new
embeddings
"""
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
)
return store
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) -> str:
connection_string: str = get_from_dict_or_env(
data=kwargs,
key="connection_string",
env_key="PGVECTOR_CONNECTION_STRING",
)
if not connection_string:
raise ValueError(
"Postgres connection string is required"
"Either pass it as a parameter"
"or set the PGVECTOR_CONNECTION_STRING environment variable."
)
return connection_string
@classmethod
def from_documents(
cls: Type[PGVector],
documents: List[Document],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs["connection_string"] = connection_string
return cls.from_texts(
texts=texts,
pre_delete_collection=pre_delete_collection,
embedding=embedding,
distance_strategy=distance_strategy,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
**kwargs,
)
@classmethod
def connection_string_from_db_params(
cls,
driver: str,
host: str,
port: int,
database: str,
user: str,
password: str,
) -> str:
"""Return connection string from database parameters."""
return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}"
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
# Default strategy is to rely on distance strategy provided
# in vectorstore constructor
if self._distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self._euclidean_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function"
f" for distance_strategy of {self._distance_strategy}."
"Consider providing relevance_score_fn to PGVector constructor."
)
def max_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
results = self.__query_collection(embedding=embedding, k=fetch_k, filter=filter)
embedding_list = [result.EmbeddingStore.embedding for result in results]
mmr_selected = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
embedding_list,
k=k,
lambda_mult=lambda_mult,
)
candidates = self._results_to_docs_and_scores(results)
return [r for i, r in enumerate(candidates) if i in mmr_selected]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
def max_marginal_relevance_search_with_score(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_with_score_by_vector(
embedding=embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return docs
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return _results_to_docs(docs_and_scores)
async def amax_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance."""
# This is a temporary workaround to make the similarity search
# asynchronous. The proper solution is to make the similarity search
# asynchronous in the vector store implementations.
func = partial(
self.max_marginal_relevance_search_by_vector,
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return await asyncio.get_event_loop().run_in_executor(None, func)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~callbacks~tracers~root_listeners.py | from typing import Callable, Optional, Union
from uuid import UUID
from langchain.callbacks.tracers.base import BaseTracer
from langchain.callbacks.tracers.schemas import Run
from langchain.schema.runnable.config import (
RunnableConfig,
call_func_with_variable_args,
)
Listener = Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
class RootListenersTracer(BaseTracer):
def __init__(
self,
*,
config: RunnableConfig,
on_start: Optional[Listener],
on_end: Optional[Listener],
on_error: Optional[Listener],
) -> None:
super().__init__()
self.config = config
self._arg_on_start = on_start
self._arg_on_end = on_end
self._arg_on_error = on_error
self.root_id: Optional[UUID] = None
def _persist_run(self, run: Run) -> None:
# This is a legacy method only called once for an entire run tree
# therefore not useful here
pass
def _on_run_create(self, run: Run) -> None:
if self.root_id is not None:
return
self.root_id = run.id
if self._arg_on_start is not None:
call_func_with_variable_args(self._arg_on_start, run, self.config)
def _on_run_update(self, run: Run) -> None:
if run.id != self.root_id:
return
if run.error is None:
if self._arg_on_end is not None:
call_func_with_variable_args(self._arg_on_end, run, self.config)
else:
if self._arg_on_error is not None:
call_func_with_variable_args(self._arg_on_error, run, self.config)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~smith~evaluation~runner_utils.py | """Utilities for running language models or Chains over datasets."""
from __future__ import annotations
import functools
import inspect
import logging
import uuid
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from langsmith.client import Client
from langsmith.evaluation import RunEvaluator
from langsmith.run_helpers import as_runnable, is_traceable_function
from langsmith.schemas import Dataset, DataType, Example
from langsmith.utils import LangSmithError
from requests import HTTPError
from langchain._api import warn_deprecated
from langchain.callbacks.manager import Callbacks
from langchain.callbacks.tracers.evaluation import (
EvaluatorCallbackHandler,
wait_for_all_evaluators,
)
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.chains.base import Chain
from langchain.evaluation.loading import load_evaluator
from langchain.evaluation.schema import (
EvaluatorType,
PairwiseStringEvaluator,
StringEvaluator,
)
from langchain.schema import ChatResult, LLMResult
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import BaseMessage, messages_from_dict
from langchain.schema.runnable import Runnable, RunnableConfig, RunnableLambda
from langchain.schema.runnable import config as runnable_config
from langchain.schema.runnable import utils as runnable_utils
from langchain.smith import evaluation as smith_eval
from langchain.smith.evaluation import config as smith_eval_config
from langchain.smith.evaluation import name_generation, progress
if TYPE_CHECKING:
import pandas as pd
logger = logging.getLogger(__name__)
MODEL_OR_CHAIN_FACTORY = Union[
Callable[[], Union[Chain, Runnable]],
BaseLanguageModel,
Callable[[dict], Any],
Runnable,
Chain,
]
MCF = Union[Callable[[], Union[Chain, Runnable]], BaseLanguageModel]
class InputFormatError(Exception):
"""Raised when the input format is invalid."""
## Shared Utilities
class TestResult(dict):
"""A dictionary of the results of a single test run."""
def get_aggregate_feedback(
self, quantiles: Optional[Sequence[float]] = None
) -> pd.DataFrame:
"""Return quantiles for the feedback scores.
This method calculates and prints the quantiles for the feedback scores
across all feedback keys.
Returns:
A DataFrame containing the quantiles for each feedback key.
"""
df = self.to_dataframe()
feedback_cols = [
col for col in df.columns if col not in ["input", "output", "reference"]
]
_quantiles = df[feedback_cols].quantile(
quantiles or [0.25, 0.5, 0.75], numeric_only=True
)
_quantiles.loc["mean"] = df[feedback_cols].mean()
_quantiles.loc["mode"] = df[feedback_cols].mode().iloc[0]
return _quantiles.transpose()
def to_dataframe(self) -> pd.DataFrame:
"""Convert the results to a dataframe."""
try:
import pandas as pd
except ImportError as e:
raise ImportError(
"Pandas is required to convert the results to a dataframe."
" to install pandas, run `pip install pandas`."
) from e
indices = []
records = []
for example_id, result in self["results"].items():
feedback = result["feedback"]
r = {
**{f.key: f.score for f in feedback},
"input": result["input"],
"output": result["output"],
}
if "reference" in result:
r["reference"] = result["reference"]
records.append(r)
indices.append(example_id)
return pd.DataFrame(records, index=indices)
def _wrap_in_chain_factory(
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
dataset_name: str = "<my_dataset>",
) -> MCF:
"""Forgive the user if they pass in a chain without memory instead of a chain
factory. It's a common mistake. Raise a more helpful error message as well."""
if isinstance(llm_or_chain_factory, Chain):
chain = llm_or_chain_factory
chain_class = chain.__class__.__name__
if llm_or_chain_factory.memory is not None:
memory_class = chain.memory.__class__.__name__
raise ValueError(
"Cannot directly evaluate a chain with stateful memory."
" To evaluate this chain, pass in a chain constructor"
" that initializes fresh memory each time it is called."
" This will safegaurd against information"
" leakage between dataset examples."
"\nFor example:\n\n"
"def chain_constructor():\n"
f" new_memory = {memory_class}(...)\n"
f" return {chain_class}"
"(memory=new_memory, ...)\n\n"
f'run_on_dataset("{dataset_name}", chain_constructor, ...)'
)
return lambda: chain
elif isinstance(llm_or_chain_factory, BaseLanguageModel):
return llm_or_chain_factory
elif isinstance(llm_or_chain_factory, Runnable):
# Memory may exist here, but it's not elegant to check all those cases.
lcf = llm_or_chain_factory
return lambda: lcf
elif callable(llm_or_chain_factory):
if is_traceable_function(llm_or_chain_factory):
runnable_ = as_runnable(cast(Callable, llm_or_chain_factory))
return lambda: runnable_
try:
_model = llm_or_chain_factory() # type: ignore[call-arg]
except TypeError:
# It's an arbitrary function, wrap it in a RunnableLambda
user_func = cast(Callable, llm_or_chain_factory)
sig = inspect.signature(user_func)
logger.info(f"Wrapping function {sig} as RunnableLambda.")
wrapped = RunnableLambda(user_func)
return lambda: wrapped
constructor = cast(Callable, llm_or_chain_factory)
if isinstance(_model, BaseLanguageModel):
# It's not uncommon to do an LLM constructor instead of raw LLM,
# so we'll unpack it for the user.
return _model
elif is_traceable_function(cast(Callable, _model)):
runnable_ = as_runnable(cast(Callable, _model))
return lambda: runnable_
elif not isinstance(_model, Runnable):
# This is unlikely to happen - a constructor for a model function
return lambda: RunnableLambda(constructor)
else:
# Typical correct case
return constructor # noqa
return llm_or_chain_factory
def _get_prompt(inputs: Dict[str, Any]) -> str:
"""Get prompt from inputs.
Args:
inputs: The input dictionary.
Returns:
A string prompt.
Raises:
InputFormatError: If the input format is invalid.
"""
if not inputs:
raise InputFormatError("Inputs should not be empty.")
prompts = []
if "prompt" in inputs:
if not isinstance(inputs["prompt"], str):
raise InputFormatError(
"Expected string for 'prompt', got"
f" {type(inputs['prompt']).__name__}"
)
prompts = [inputs["prompt"]]
elif "prompts" in inputs:
if not isinstance(inputs["prompts"], list) or not all(
isinstance(i, str) for i in inputs["prompts"]
):
raise InputFormatError(
"Expected list of strings for 'prompts',"
f" got {type(inputs['prompts']).__name__}"
)
prompts = inputs["prompts"]
elif len(inputs) == 1:
prompt_ = next(iter(inputs.values()))
if isinstance(prompt_, str):
prompts = [prompt_]
elif isinstance(prompt_, list) and all(isinstance(i, str) for i in prompt_):
prompts = prompt_
else:
raise InputFormatError(f"LLM Run expects string prompt input. Got {inputs}")
else:
raise InputFormatError(
f"LLM Run expects 'prompt' or 'prompts' in inputs. Got {inputs}"
)
if len(prompts) == 1:
return prompts[0]
else:
raise InputFormatError(
f"LLM Run expects single prompt input. Got {len(prompts)} prompts."
)
def _get_messages(inputs: Dict[str, Any]) -> List[BaseMessage]:
"""Get Chat Messages from inputs.
Args:
inputs: The input dictionary.
Returns:
A list of chat messages.
Raises:
InputFormatError: If the input format is invalid.
"""
if not inputs:
raise InputFormatError("Inputs should not be empty.")
if "messages" in inputs:
single_input = inputs["messages"]
elif len(inputs) == 1:
single_input = next(iter(inputs.values()))
else:
raise InputFormatError(
f"Chat Run expects 'messages' in inputs when example has multiple"
f" input keys. Got {inputs}"
)
if isinstance(single_input, list) and all(
isinstance(i, dict) for i in single_input
):
raw_messages = [single_input]
elif isinstance(single_input, list) and all(
isinstance(i, list) for i in single_input
):
raw_messages = single_input
else:
raise InputFormatError(
f"Chat Run expects List[dict] or List[List[dict]] values for"
f" 'messages' key input. Got {inputs}"
)
if len(raw_messages) == 1:
return messages_from_dict(raw_messages[0])
else:
raise InputFormatError(
f"Chat Run expects single List[dict] or List[List[dict]] 'messages'"
f" input. Got {len(raw_messages)} messages from inputs {inputs}"
)
## Shared data validation utilities
def _validate_example_inputs_for_language_model(
first_example: Example,
input_mapper: Optional[Callable[[Dict], Any]],
) -> None:
if input_mapper:
prompt_input = input_mapper(first_example.inputs)
if not isinstance(prompt_input, str) and not (
isinstance(prompt_input, list)
and all(isinstance(msg, BaseMessage) for msg in prompt_input)
):
raise InputFormatError(
"When using an input_mapper to prepare dataset example inputs"
" for an LLM or chat model, the output must a single string or"
" a list of chat messages."
f"\nGot: {prompt_input} of type {type(prompt_input)}."
)
else:
try:
_get_prompt(first_example.inputs)
except InputFormatError:
try:
_get_messages(first_example.inputs)
except InputFormatError:
raise InputFormatError(
"Example inputs do not match language model input format. "
"Expected a dictionary with messages or a single prompt."
f" Got: {first_example.inputs}"
" Please update your dataset OR provide an input_mapper"
" to convert the example.inputs to a compatible format"
" for the llm or chat model you wish to evaluate."
)
def _validate_example_inputs_for_chain(
first_example: Example,
chain: Chain,
input_mapper: Optional[Callable[[Dict], Any]],
) -> None:
"""Validate that the example inputs match the chain input keys."""
if input_mapper:
first_inputs = input_mapper(first_example.inputs)
missing_keys = set(chain.input_keys).difference(first_inputs)
if not isinstance(first_inputs, dict):
raise InputFormatError(
"When using an input_mapper to prepare dataset example"
" inputs for a chain, the mapped value must be a dictionary."
f"\nGot: {first_inputs} of type {type(first_inputs)}."
)
if missing_keys:
raise InputFormatError(
"Missing keys after loading example using input_mapper."
f"\nExpected: {chain.input_keys}. Got: {first_inputs.keys()}"
)
else:
first_inputs = first_example.inputs
missing_keys = set(chain.input_keys).difference(first_inputs)
if len(first_inputs) == 1 and len(chain.input_keys) == 1:
# We can pass this through the run method.
# Refrain from calling to validate.
pass
elif missing_keys:
raise InputFormatError(
"Example inputs missing expected chain input keys."
" Please provide an input_mapper to convert the example.inputs"
" to a compatible format for the chain you wish to evaluate."
f"Expected: {chain.input_keys}. "
f"Got: {first_inputs.keys()}"
)
def _validate_example_inputs(
example: Example,
llm_or_chain_factory: MCF,
input_mapper: Optional[Callable[[Dict], Any]],
) -> None:
"""Validate that the example inputs are valid for the model."""
if isinstance(llm_or_chain_factory, BaseLanguageModel):
_validate_example_inputs_for_language_model(example, input_mapper)
else:
chain = llm_or_chain_factory()
if isinstance(chain, Chain):
# Otherwise it's a runnable
_validate_example_inputs_for_chain(example, chain, input_mapper)
elif isinstance(chain, Runnable):
logger.debug(f"Skipping input validation for {chain}")
## Shared Evaluator Setup Utilities
def _setup_evaluation(
llm_or_chain_factory: MCF,
examples: List[Example],
evaluation: Optional[smith_eval.RunEvalConfig],
data_type: DataType,
) -> Optional[List[RunEvaluator]]:
"""Configure the evaluators to run on the results of the chain."""
if evaluation:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
run_inputs, run_outputs = None, None
run_type = "llm"
else:
run_type = "chain"
if data_type in (DataType.chat, DataType.llm):
val = data_type.value if isinstance(data_type, Enum) else data_type
raise ValueError(
"Cannot evaluate a chain on dataset with "
f"data_type={val}. "
"Please specify a dataset with the default 'kv' data type."
)
chain = llm_or_chain_factory()
run_inputs = chain.input_keys if isinstance(chain, Chain) else None
run_outputs = chain.output_keys if isinstance(chain, Chain) else None
run_evaluators = _load_run_evaluators(
evaluation,
run_type,
data_type,
list(examples[0].outputs) if examples[0].outputs else None,
run_inputs,
run_outputs,
)
else:
# TODO: Create a default helpfulness evaluator
run_evaluators = None
return run_evaluators
def _determine_input_key(
config: smith_eval.RunEvalConfig,
run_inputs: Optional[List[str]],
) -> Optional[str]:
input_key = None
if config.input_key:
input_key = config.input_key
if run_inputs and input_key not in run_inputs:
raise ValueError(f"Input key {input_key} not in run inputs {run_inputs}")
elif run_inputs and len(run_inputs) == 1:
input_key = run_inputs[0]
elif run_inputs is not None and len(run_inputs) > 1:
raise ValueError(
f"Must specify input key for model with multiple inputs: {run_inputs}"
)
return input_key
def _determine_prediction_key(
config: smith_eval.RunEvalConfig,
run_outputs: Optional[List[str]],
) -> Optional[str]:
prediction_key = None
if config.prediction_key:
prediction_key = config.prediction_key
if run_outputs and prediction_key not in run_outputs:
raise ValueError(
f"Prediction key {prediction_key} not in run outputs {run_outputs}"
)
elif run_outputs and len(run_outputs) == 1:
prediction_key = run_outputs[0]
elif run_outputs is not None and len(run_outputs) > 1:
raise ValueError(
f"Must specify prediction key for model"
f" with multiple outputs: {run_outputs}"
)
return prediction_key
def _determine_reference_key(
config: smith_eval.RunEvalConfig,
example_outputs: Optional[List[str]],
) -> Optional[str]:
if config.reference_key:
reference_key = config.reference_key
if example_outputs and reference_key not in example_outputs:
raise ValueError(
f"Reference key {reference_key} not in Dataset"
f" example outputs: {example_outputs}"
)
elif example_outputs and len(example_outputs) == 1:
reference_key = list(example_outputs)[0]
else:
reference_key = None
return reference_key
def _construct_run_evaluator(
eval_config: Union[EvaluatorType, str, smith_eval_config.EvalConfig],
eval_llm: Optional[BaseLanguageModel],
run_type: str,
data_type: DataType,
example_outputs: Optional[List[str]],
reference_key: Optional[str],
input_key: Optional[str],
prediction_key: Optional[str],
) -> RunEvaluator:
if isinstance(eval_config, (EvaluatorType, str)):
if not isinstance(eval_config, EvaluatorType):
eval_config = EvaluatorType(eval_config)
evaluator_ = load_evaluator(eval_config, llm=eval_llm)
eval_type_tag = eval_config.value
else:
kwargs = {"llm": eval_llm, **eval_config.get_kwargs()}
evaluator_ = load_evaluator(eval_config.evaluator_type, **kwargs)
eval_type_tag = eval_config.evaluator_type.value
if isinstance(evaluator_, StringEvaluator):
if evaluator_.requires_reference and reference_key is None:
raise ValueError(
f"Must specify reference_key in smith_eval.RunEvalConfig to use"
f" evaluator of type {eval_type_tag} with"
f" dataset with multiple output keys: {example_outputs}."
)
run_evaluator = smith_eval.StringRunEvaluatorChain.from_run_and_data_type(
evaluator_,
run_type,
data_type,
input_key=input_key,
prediction_key=prediction_key,
reference_key=reference_key,
tags=[eval_type_tag],
)
elif isinstance(evaluator_, PairwiseStringEvaluator):
raise NotImplementedError(
f"Run evaluator for {eval_type_tag} is not implemented."
" PairwiseStringEvaluators compare the outputs of two different models"
" rather than the output of a single model."
" Did you mean to use a StringEvaluator instead?"
"\nSee: https://python.langchain.com/docs/guides/evaluation/string/"
)
else:
raise NotImplementedError(
f"Run evaluator for {eval_type_tag} is not implemented"
)
return run_evaluator
def _get_keys(
config: smith_eval.RunEvalConfig,
run_inputs: Optional[List[str]],
run_outputs: Optional[List[str]],
example_outputs: Optional[List[str]],
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
input_key = _determine_input_key(config, run_inputs)
prediction_key = _determine_prediction_key(config, run_outputs)
reference_key = _determine_reference_key(config, example_outputs)
return input_key, prediction_key, reference_key
def _load_run_evaluators(
config: smith_eval.RunEvalConfig,
run_type: str,
data_type: DataType,
example_outputs: Optional[List[str]],
run_inputs: Optional[List[str]],
run_outputs: Optional[List[str]],
) -> List[RunEvaluator]:
"""
Load run evaluators from a configuration.
Args:
config: Configuration for the run evaluators.
Returns:
A list of run evaluators.
"""
run_evaluators = []
input_key, prediction_key, reference_key = None, None, None
if (
config.evaluators
or any([isinstance(e, EvaluatorType) for e in config.evaluators])
or (
config.custom_evaluators
and any([isinstance(e, StringEvaluator) for e in config.custom_evaluators])
)
):
input_key, prediction_key, reference_key = _get_keys(
config, run_inputs, run_outputs, example_outputs
)
for eval_config in config.evaluators:
run_evaluator = _construct_run_evaluator(
eval_config,
config.eval_llm,
run_type,
data_type,
example_outputs,
reference_key,
input_key,
prediction_key,
)
run_evaluators.append(run_evaluator)
custom_evaluators = config.custom_evaluators or []
for custom_evaluator in custom_evaluators:
if isinstance(custom_evaluator, RunEvaluator):
run_evaluators.append(custom_evaluator)
elif isinstance(custom_evaluator, StringEvaluator):
run_evaluators.append(
smith_eval.StringRunEvaluatorChain.from_run_and_data_type(
custom_evaluator,
run_type,
data_type,
input_key=input_key,
prediction_key=prediction_key,
reference_key=reference_key,
)
)
else:
raise ValueError(
f"Unsupported custom evaluator: {custom_evaluator}."
f" Expected RunEvaluator or StringEvaluator."
)
return run_evaluators
### Async Helpers
async def _arun_llm(
llm: BaseLanguageModel,
inputs: Dict[str, Any],
*,
tags: Optional[List[str]] = None,
callbacks: Callbacks = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[str, BaseMessage]:
"""Asynchronously run the language model.
Args:
llm: The language model to run.
inputs: The input dictionary.
tags: Optional tags to add to the run.
callbacks: Optional callbacks to use during the run.
input_mapper: Optional function to map inputs to the expected format.
Returns:
The LLMResult or ChatResult.
Raises:
ValueError: If the LLM type is unsupported.
InputFormatError: If the input format is invalid.
"""
if input_mapper is not None:
prompt_or_messages = input_mapper(inputs)
if isinstance(prompt_or_messages, str):
return await llm.apredict(
prompt_or_messages, callbacks=callbacks, tags=tags
)
elif isinstance(prompt_or_messages, list) and all(
isinstance(msg, BaseMessage) for msg in prompt_or_messages
):
return await llm.apredict_messages(
prompt_or_messages, callbacks=callbacks, tags=tags
)
else:
raise InputFormatError(
"Input mapper returned invalid format"
f" {prompt_or_messages}"
"\nExpected a single string or list of chat messages."
)
else:
try:
prompt = _get_prompt(inputs)
llm_output: Union[str, BaseMessage] = await llm.apredict(
prompt, callbacks=callbacks, tags=tags
)
except InputFormatError:
messages = _get_messages(inputs)
llm_output = await llm.apredict_messages(
messages, callbacks=callbacks, tags=tags
)
return llm_output
async def _arun_chain(
chain: Union[Chain, Runnable],
inputs: Dict[str, Any],
callbacks: Callbacks,
*,
tags: Optional[List[str]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[dict, str]:
"""Run a chain asynchronously on inputs."""
inputs_ = inputs if input_mapper is None else input_mapper(inputs)
if (
isinstance(chain, Chain)
and isinstance(inputs_, dict)
and len(inputs_) == 1
and chain.input_keys
):
val = next(iter(inputs_.values()))
output = await chain.acall(val, callbacks=callbacks, tags=tags)
else:
runnable_config = RunnableConfig(tags=tags or [], callbacks=callbacks)
output = await chain.ainvoke(inputs_, config=runnable_config)
return output
async def _arun_llm_or_chain(
example: Example,
config: RunnableConfig,
*,
llm_or_chain_factory: MCF,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[dict, str, LLMResult, ChatResult]:
"""Asynchronously run the Chain or language model.
Args:
example: The example to run.
llm_or_chain_factory: The Chain or language model constructor to run.
tags: Optional tags to add to the run.
callbacks: Optional callbacks to use during the run.
input_mapper: Optional function to map the input to the expected format.
Returns:
A list of outputs.
"""
chain_or_llm = (
"LLM" if isinstance(llm_or_chain_factory, BaseLanguageModel) else "Chain"
)
result = None
try:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
output: Any = await _arun_llm(
llm_or_chain_factory,
example.inputs,
tags=config["tags"],
callbacks=config["callbacks"],
input_mapper=input_mapper,
)
else:
chain = llm_or_chain_factory()
output = await _arun_chain(
chain,
example.inputs,
tags=config["tags"],
callbacks=config["callbacks"],
input_mapper=input_mapper,
)
result = output
except Exception as e:
logger.warning(
f"{chain_or_llm} failed for example {example.id} "
f"with inputs {example.inputs}"
f"\n{repr(e)}"
)
result = {"Error": repr(e)}
return result
## Sync Utilities
def _run_llm(
llm: BaseLanguageModel,
inputs: Dict[str, Any],
callbacks: Callbacks,
*,
tags: Optional[List[str]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[str, BaseMessage]:
"""
Run the language model on the example.
Args:
llm: The language model to run.
inputs: The input dictionary.
callbacks: The callbacks to use during the run.
tags: Optional tags to add to the run.
input_mapper: function to map to the inputs dictionary from an Example
Returns:
The LLMResult or ChatResult.
Raises:
ValueError: If the LLM type is unsupported.
InputFormatError: If the input format is invalid.
"""
if input_mapper is not None:
prompt_or_messages = input_mapper(inputs)
if isinstance(prompt_or_messages, str):
llm_output: Union[str, BaseMessage] = llm.predict(
prompt_or_messages, callbacks=callbacks, tags=tags
)
elif isinstance(prompt_or_messages, list) and all(
isinstance(msg, BaseMessage) for msg in prompt_or_messages
):
llm_output = llm.predict_messages(
prompt_or_messages, callbacks=callbacks, tags=tags
)
else:
raise InputFormatError(
"Input mapper returned invalid format: "
f" {prompt_or_messages}"
"\nExpected a single string or list of chat messages."
)
else:
try:
llm_prompts = _get_prompt(inputs)
llm_output = llm.predict(llm_prompts, callbacks=callbacks, tags=tags)
except InputFormatError:
llm_messages = _get_messages(inputs)
llm_output = llm.predict_messages(llm_messages, callbacks=callbacks)
return llm_output
def _run_chain(
chain: Union[Chain, Runnable],
inputs: Dict[str, Any],
callbacks: Callbacks,
*,
tags: Optional[List[str]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[Dict, str]:
"""Run a chain on inputs."""
inputs_ = inputs if input_mapper is None else input_mapper(inputs)
if (
isinstance(chain, Chain)
and isinstance(inputs_, dict)
and len(inputs_) == 1
and chain.input_keys
):
val = next(iter(inputs_.values()))
output = chain(val, callbacks=callbacks, tags=tags)
else:
runnable_config = RunnableConfig(tags=tags or [], callbacks=callbacks)
output = chain.invoke(inputs_, config=runnable_config)
return output
def _run_llm_or_chain(
example: Example,
config: RunnableConfig,
*,
llm_or_chain_factory: MCF,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[dict, str, LLMResult, ChatResult]:
"""
Run the Chain or language model synchronously.
Args:
example: The example to run.
llm_or_chain_factory: The Chain or language model constructor to run.
tags: Optional tags to add to the run.
callbacks: Optional callbacks to use during the run.
Returns:
Union[List[dict], List[str], List[LLMResult], List[ChatResult]]:
The outputs of the model or chain.
"""
chain_or_llm = (
"LLM" if isinstance(llm_or_chain_factory, BaseLanguageModel) else "Chain"
)
result = None
try:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
output: Any = _run_llm(
llm_or_chain_factory,
example.inputs,
config["callbacks"],
tags=config["tags"],
input_mapper=input_mapper,
)
else:
chain = llm_or_chain_factory()
output = _run_chain(
chain,
example.inputs,
config["callbacks"],
tags=config["tags"],
input_mapper=input_mapper,
)
result = output
except Exception as e:
error_type = type(e).__name__
logger.warning(
f"{chain_or_llm} failed for example {example.id} "
f"with inputs {example.inputs}"
f"\nError Type: {error_type}, Message: {e}"
)
result = {"Error": repr(e)}
return result
## Public API
def _prepare_eval_run(
client: Client,
dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
project_name: str,
project_metadata: Optional[Dict[str, Any]] = None,
) -> Tuple[MCF, str, Dataset, List[Example]]:
wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory, dataset_name)
dataset = client.read_dataset(dataset_name=dataset_name)
try:
project = client.create_project(
project_name,
reference_dataset_id=dataset.id,
project_extra={"metadata": project_metadata} if project_metadata else {},
)
except (HTTPError, ValueError, LangSmithError) as e:
if "already exists " not in str(e):
raise e
uid = uuid.uuid4()
example_msg = f"""
run_on_dataset(
...
project_name="{project_name} - {uid}", # Update since {project_name} already exists
)
"""
raise ValueError(
f"Test project {project_name} already exists. Please use a different name:"
f"\n\n{example_msg}"
)
print(
f"View the evaluation results for project '{project_name}'"
f" at:\n{project.url}?eval=true\n\n"
f"View all tests for Dataset {dataset_name} at:\n{dataset.url}",
flush=True,
)
examples = list(client.list_examples(dataset_id=dataset.id))
if not examples:
raise ValueError(f"Dataset {dataset_name} has no example rows.")
return wrapped_model, project_name, dataset, examples
def _prepare_run_on_dataset(
client: Client,
dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
project_name: Optional[str],
evaluation: Optional[smith_eval.RunEvalConfig] = None,
tags: Optional[List[str]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
concurrency_level: int = 5,
project_metadata: Optional[Dict[str, Any]] = None,
) -> Tuple[MCF, str, List[Example], List[RunnableConfig]]:
project_name = project_name or name_generation.random_name()
wrapped_model, project_name, dataset, examples = _prepare_eval_run(
client,
dataset_name,
llm_or_chain_factory,
project_name,
project_metadata=project_metadata,
)
wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory)
run_evaluators = _setup_evaluation(
wrapped_model, examples, evaluation, dataset.data_type or DataType.kv
)
_validate_example_inputs(examples[0], wrapped_model, input_mapper)
progress_bar = progress.ProgressBarCallback(len(examples))
configs = [
RunnableConfig(
callbacks=[
LangChainTracer(
project_name=project_name,
client=client,
use_threading=False,
example_id=example.id,
),
EvaluatorCallbackHandler(
evaluators=run_evaluators or [],
client=client,
example_id=example.id,
),
progress_bar,
],
tags=tags or [],
max_concurrency=concurrency_level,
)
for example in examples
]
return wrapped_model, project_name, examples, configs
def _collect_test_results(
examples: List[Example],
batch_results: List[Union[dict, str, LLMResult, ChatResult]],
configs: List[RunnableConfig],
project_name: str,
) -> TestResult:
wait_for_all_evaluators()
all_eval_results = {}
for c in configs:
for callback in cast(list, c["callbacks"]):
if isinstance(callback, EvaluatorCallbackHandler):
eval_results = callback.logged_eval_results
all_eval_results.update(
{example_id: v for (_, example_id), v in eval_results.items()}
)
results = {}
for example, output in zip(examples, batch_results):
feedback = all_eval_results.get(str(example.id), [])
results[str(example.id)] = {
"output": output,
"input": example.inputs,
"feedback": feedback,
}
if example.outputs:
results[str(example.id)]["reference"] = example.outputs
return TestResult(
project_name=project_name,
results=results,
)
_INPUT_MAPPER_DEP_WARNING = (
"The input_mapper argument is deprecated and "
"will be removed in a future release. Please add a "
" RunnableLambda to your chain to map inputs to the expected format"
" instead. Example:\n"
"def construct_chain():\n"
" my_chain = ...\n"
" input_mapper = {'other_key': 'MyOtherInput', 'my_input_key': x}\n"
" return input_mapper | my_chain\n"
"run_on_dataset(..., llm_or_chain_factory=construct_chain)\n"
"(See https://api.python.langchain.com/en/latest/schema/"
"langchain.schema.runnable.base.RunnableLambda.html)"
)
async def arun_on_dataset(
client: Optional[Client],
dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
*,
evaluation: Optional[smith_eval.RunEvalConfig] = None,
concurrency_level: int = 5,
project_name: Optional[str] = None,
project_metadata: Optional[Dict[str, Any]] = None,
verbose: bool = False,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
input_mapper = kwargs.pop("input_mapper", None)
if input_mapper:
warn_deprecated("0.0.305", message=_INPUT_MAPPER_DEP_WARNING, pending=True)
if kwargs:
warn_deprecated(
"0.0.305",
message="The following arguments are deprecated and "
"will be removed in a future release: "
f"{kwargs.keys()}.",
removal="0.0.305",
)
client = client or Client()
wrapped_model, project_name, examples, configs = _prepare_run_on_dataset(
client,
dataset_name,
llm_or_chain_factory,
project_name,
evaluation,
tags,
input_mapper,
concurrency_level,
project_metadata=project_metadata,
)
batch_results = await runnable_utils.gather_with_concurrency(
configs[0].get("max_concurrency"),
*map(
functools.partial(
_arun_llm_or_chain,
llm_or_chain_factory=wrapped_model,
input_mapper=input_mapper,
),
examples,
configs,
),
)
results = _collect_test_results(examples, batch_results, configs, project_name)
if verbose:
try:
agg_feedback = results.get_aggregate_feedback()
print("\n Eval quantiles:")
print(agg_feedback)
except Exception as e:
logger.debug(f"Failed to print aggregate feedback: {repr(e)}")
return results
def run_on_dataset(
client: Optional[Client],
dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
*,
evaluation: Optional[smith_eval.RunEvalConfig] = None,
concurrency_level: int = 5,
project_name: Optional[str] = None,
project_metadata: Optional[Dict[str, Any]] = None,
verbose: bool = False,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
input_mapper = kwargs.pop("input_mapper", None)
if input_mapper:
warn_deprecated("0.0.305", message=_INPUT_MAPPER_DEP_WARNING, pending=True)
if kwargs:
warn_deprecated(
"0.0.305",
message="The following arguments are deprecated and "
"will be removed in a future release: "
f"{kwargs.keys()}.",
removal="0.0.305",
)
client = client or Client()
wrapped_model, project_name, examples, configs = _prepare_run_on_dataset(
client,
dataset_name,
llm_or_chain_factory,
project_name,
evaluation,
tags,
input_mapper,
concurrency_level,
project_metadata=project_metadata,
)
if concurrency_level == 0:
batch_results = [
_run_llm_or_chain(
example,
config,
llm_or_chain_factory=wrapped_model,
input_mapper=input_mapper,
)
for example, config in zip(examples, configs)
]
else:
with runnable_config.get_executor_for_config(configs[0]) as executor:
batch_results = list(
executor.map(
functools.partial(
_run_llm_or_chain,
llm_or_chain_factory=wrapped_model,
input_mapper=input_mapper,
),
examples,
configs,
)
)
results = _collect_test_results(examples, batch_results, configs, project_name)
if verbose:
try:
agg_feedback = results.get_aggregate_feedback()
print("\n Eval quantiles:")
print(agg_feedback)
except Exception as e:
logger.debug(f"Failed to print aggregate feedback: {repr(e)}")
return results
_RUN_ON_DATASET_DOCSTRING = """
Run the Chain or language model on a dataset and store traces
to the specified project name.
Args:
dataset_name: Name of the dataset to run the chain on.
llm_or_chain_factory: Language model or Chain constructor to run
over the dataset. The Chain constructor is used to permit
independent calls on each example without carrying over state.
evaluation: Configuration for evaluators to run on the
results of the chain
concurrency_level: The number of async tasks to run concurrently.
project_name: Name of the project to store the traces in.
Defaults to {dataset_name}-{chain class name}-{datetime}.
project_metadata: Optional metadata to add to the project.
Useful for storing information the test variant.
(prompt version, model version, etc.)
client: LangSmith client to use to access the dataset and to
log feedback and run traces.
verbose: Whether to print progress.
tags: Tags to add to each run in the project.
Returns:
A dictionary containing the run's project name and the resulting model outputs.
For the (usually faster) async version of this function, see :func:`arun_on_dataset`.
Examples
--------
.. code-block:: python
from langsmith import Client
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.smith import smith_eval.RunEvalConfig, run_on_dataset
# Chains may have memory. Passing in a constructor function lets the
# evaluation framework avoid cross-contamination between runs.
def construct_chain():
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(
llm,
"What's the answer to {your_input_key}"
)
return chain
# Load off-the-shelf evaluators via config or the EvaluatorType (string or enum)
evaluation_config = smith_eval.RunEvalConfig(
evaluators=[
"qa", # "Correctness" against a reference answer
"embedding_distance",
smith_eval.RunEvalConfig.Criteria("helpfulness"),
smith_eval.RunEvalConfig.Criteria({
"fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?"
}),
]
)
client = Client()
run_on_dataset(
client,
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config,
)
You can also create custom evaluators by subclassing the
:class:`StringEvaluator <langchain.evaluation.schema.StringEvaluator>`
or LangSmith's `RunEvaluator` classes.
.. code-block:: python
from typing import Optional
from langchain.evaluation import StringEvaluator
class MyStringEvaluator(StringEvaluator):
@property
def requires_input(self) -> bool:
return False
@property
def requires_reference(self) -> bool:
return True
@property
def evaluation_name(self) -> str:
return "exact_match"
def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict:
return {"score": prediction == reference}
evaluation_config = smith_eval.RunEvalConfig(
custom_evaluators = [MyStringEvaluator()],
)
run_on_dataset(
client,
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config,
)
""" # noqa: E501
run_on_dataset.__doc__ = _RUN_ON_DATASET_DOCSTRING
arun_on_dataset.__doc__ = _RUN_ON_DATASET_DOCSTRING.replace(
"run_on_dataset(", "await arun_on_dataset("
)
| [
"['PLACEHOLDER']",
"[]"
] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~document_loaders~csv_loader.py | import csv
from io import TextIOWrapper
from typing import Any, Dict, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.helpers import detect_file_encodings
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class CSVLoader(BaseLoader):
"""Load a `CSV` file into a list of Documents.
Each document represents one row of the CSV file. Every row is converted into a
key/value pair and outputted to a new line in the document's page_content.
The source for each document loaded from csv is set to the value of the
`file_path` argument for all documents by default.
You can override this by setting the `source_column` argument to the
name of a column in the CSV file.
The source of each document will then be set to the value of the column
with the name specified in `source_column`.
Output Example:
.. code-block:: txt
column1: value1
column2: value2
column3: value3
"""
def __init__(
self,
file_path: str,
source_column: Optional[str] = None,
metadata_columns: Sequence[str] = (),
csv_args: Optional[Dict] = None,
encoding: Optional[str] = None,
autodetect_encoding: bool = False,
):
"""
Args:
file_path: The path to the CSV file.
source_column: The name of the column in the CSV file to use as the source.
Optional. Defaults to None.
metadata_columns: A sequence of column names to use as metadata. Optional.
csv_args: A dictionary of arguments to pass to the csv.DictReader.
Optional. Defaults to None.
encoding: The encoding of the CSV file. Optional. Defaults to None.
autodetect_encoding: Whether to try to autodetect the file encoding.
"""
self.file_path = file_path
self.source_column = source_column
self.metadata_columns = metadata_columns
self.encoding = encoding
self.csv_args = csv_args or {}
self.autodetect_encoding = autodetect_encoding
def load(self) -> List[Document]:
"""Load data into document objects."""
docs = []
try:
with open(self.file_path, newline="", encoding=self.encoding) as csvfile:
docs = self.__read_file(csvfile)
except UnicodeDecodeError as e:
if self.autodetect_encoding:
detected_encodings = detect_file_encodings(self.file_path)
for encoding in detected_encodings:
try:
with open(
self.file_path, newline="", encoding=encoding.encoding
) as csvfile:
docs = self.__read_file(csvfile)
break
except UnicodeDecodeError:
continue
else:
raise RuntimeError(f"Error loading {self.file_path}") from e
except Exception as e:
raise RuntimeError(f"Error loading {self.file_path}") from e
return docs
def __read_file(self, csvfile: TextIOWrapper) -> List[Document]:
docs = []
csv_reader = csv.DictReader(csvfile, **self.csv_args) # type: ignore
for i, row in enumerate(csv_reader):
try:
source = (
row[self.source_column]
if self.source_column is not None
else self.file_path
)
except KeyError:
raise ValueError(
f"Source column '{self.source_column}' not found in CSV file."
)
content = "\n".join(
f"{k.strip()}: {v.strip() if v is not None else v}"
for k, v in row.items()
if k not in self.metadata_columns
)
metadata = {"source": source, "row": i}
for col in self.metadata_columns:
try:
metadata[col] = row[col]
except KeyError:
raise ValueError(f"Metadata column '{col}' not found in CSV file.")
doc = Document(page_content=content, metadata=metadata)
docs.append(doc)
return docs
class UnstructuredCSVLoader(UnstructuredFileLoader):
"""Load `CSV` files using `Unstructured`.
Like other
Unstructured loaders, UnstructuredCSVLoader can be used in both
"single" and "elements" mode. If you use the loader in "elements"
mode, the CSV file will be a single Unstructured Table element.
If you use the loader in "elements" mode, an HTML representation
of the table will be available in the "text_as_html" key in the
document metadata.
Examples
--------
from langchain.document_loaders.csv_loader import UnstructuredCSVLoader
loader = UnstructuredCSVLoader("stanley-cups.csv", mode="elements")
docs = loader.load()
"""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
"""
Args:
file_path: The path to the CSV file.
mode: The mode to use when loading the CSV file.
Optional. Defaults to "single".
**unstructured_kwargs: Keyword arguments to pass to unstructured.
"""
validate_unstructured_version(min_unstructured_version="0.6.8")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.csv import partition_csv
return partition_csv(filename=self.file_path, **self.unstructured_kwargs)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~chat_models~baidu_qianfan_endpoint.py | from __future__ import annotations
import logging
from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional, cast
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import ChatGeneration, ChatResult
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain.schema.output import ChatGenerationChunk
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a message to a dictionary that can be passed to the API."""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage:
content = _dict.get("result", "") or ""
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
if "thoughts" in additional_kwargs["function_call"]:
# align to api sample, which affects the llm function_call output
additional_kwargs["function_call"].pop("thoughts")
else:
additional_kwargs = {}
return AIMessage(
content=content,
additional_kwargs={**_dict.get("body", {}), **additional_kwargs},
)
class QianfanChatEndpoint(BaseChatModel):
"""Baidu Qianfan chat models.
To use, you should have the ``qianfan`` python package installed, and
the environment variable ``qianfan_ak`` and ``qianfan_sk`` set with your
API key and Secret Key.
ak, sk are required parameters
which you could get from https://cloud.baidu.com/product/wenxinworkshop
Example:
.. code-block:: python
from langchain.chat_models import QianfanChatEndpoint
qianfan_chat = QianfanChatEndpoint(model="ERNIE-Bot",
endpoint="your_endpoint", qianfan_ak="your_ak", qianfan_sk="your_sk")
"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
client: Any
qianfan_ak: Optional[str] = None
qianfan_sk: Optional[str] = None
streaming: Optional[bool] = False
"""Whether to stream the results or not."""
request_timeout: Optional[int] = 60
"""request timeout for chat http requests"""
top_p: Optional[float] = 0.8
temperature: Optional[float] = 0.95
penalty_score: Optional[float] = 1
"""Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo.
In the case of other model, passing these params will not affect the result.
"""
model: str = "ERNIE-Bot-turbo"
"""Model name.
you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
preset models are mapping to an endpoint.
`model` will be ignored if `endpoint` is set.
Default is ERNIE-Bot-turbo.
"""
endpoint: Optional[str] = None
"""Endpoint of the Qianfan LLM, required if custom model used."""
@root_validator()
def validate_enviroment(cls, values: Dict) -> Dict:
values["qianfan_ak"] = get_from_dict_or_env(
values,
"qianfan_ak",
"QIANFAN_AK",
)
values["qianfan_sk"] = get_from_dict_or_env(
values,
"qianfan_sk",
"QIANFAN_SK",
)
params = {
"ak": values["qianfan_ak"],
"sk": values["qianfan_sk"],
"model": values["model"],
"stream": values["streaming"],
}
if values["endpoint"] is not None and values["endpoint"] != "":
params["endpoint"] = values["endpoint"]
try:
import qianfan
values["client"] = qianfan.ChatCompletion(**params)
except ImportError:
raise ValueError(
"qianfan package not found, please install it with "
"`pip install qianfan`"
)
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
**{"endpoint": self.endpoint, "model": self.model},
**super()._identifying_params,
}
@property
def _llm_type(self) -> str:
"""Return type of chat_model."""
return "baidu-qianfan-chat"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Qianfan API."""
normal_params = {
"model": self.model,
"endpoint": self.endpoint,
"stream": self.streaming,
"request_timeout": self.request_timeout,
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_score": self.penalty_score,
}
return {**normal_params, **self.model_kwargs}
def _convert_prompt_msg_params(
self,
messages: List[BaseMessage],
**kwargs: Any,
) -> Dict[str, Any]:
"""
Converts a list of messages into a dictionary containing the message content
and default parameters.
Args:
messages (List[BaseMessage]): The list of messages.
**kwargs (Any): Optional arguments to add additional parameters to the
resulting dictionary.
Returns:
Dict[str, Any]: A dictionary containing the message content and default
parameters.
"""
messages_dict: Dict[str, Any] = {
"messages": [
convert_message_to_dict(m)
for m in messages
if not isinstance(m, SystemMessage)
]
}
for i in [i for i, m in enumerate(messages) if isinstance(m, SystemMessage)]:
if "system" not in messages_dict:
messages_dict["system"] = ""
messages_dict["system"] += cast(str, messages[i].content) + "\n"
return {
**messages_dict,
**self._default_params,
**kwargs,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to an qianfan models endpoint for each generation with a prompt.
Args:
messages: The messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = qianfan_model("Tell me a joke.")
"""
if self.streaming:
completion = ""
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
lc_msg = AIMessage(content=completion, additional_kwargs={})
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
return ChatResult(
generations=[gen],
llm_output={"token_usage": {}, "model_name": self.model},
)
params = self._convert_prompt_msg_params(messages, **kwargs)
response_payload = self.client.do(**params)
lc_msg = _convert_dict_to_message(response_payload)
gen = ChatGeneration(
message=lc_msg,
generation_info={
"finish_reason": "stop",
**response_payload.get("body", {}),
},
)
token_usage = response_payload.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=[gen], llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
completion = ""
token_usage = {}
async for chunk in self._astream(messages, stop, run_manager, **kwargs):
completion += chunk.text
lc_msg = AIMessage(content=completion, additional_kwargs={})
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
return ChatResult(
generations=[gen],
llm_output={"token_usage": {}, "model_name": self.model},
)
params = self._convert_prompt_msg_params(messages, **kwargs)
response_payload = await self.client.ado(**params)
lc_msg = _convert_dict_to_message(response_payload)
generations = []
gen = ChatGeneration(
message=lc_msg,
generation_info={
"finish_reason": "stop",
**response_payload.get("body", {}),
},
)
generations.append(gen)
token_usage = response_payload.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
for res in self.client.do(**params):
if res:
msg = _convert_dict_to_message(res)
chunk = ChatGenerationChunk(
text=res["result"],
message=AIMessageChunk(
content=msg.content,
role="assistant",
additional_kwargs=msg.additional_kwargs,
),
)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
async for res in await self.client.ado(**params):
if res:
msg = _convert_dict_to_message(res)
chunk = ChatGenerationChunk(
text=res["result"],
message=AIMessageChunk(
content=msg.content,
role="assistant",
additional_kwargs=msg.additional_kwargs,
),
)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~utilities~dalle_image_generator.py | """Utility that calls OpenAI's Dall-E Image Generator."""
from typing import Any, Dict, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class DallEAPIWrapper(BaseModel):
"""Wrapper for OpenAI's DALL-E Image Generator.
https://platform.openai.com/docs/guides/images/generations?context=node
Usage instructions:
1. `pip install openai`
2. save your OPENAI_API_KEY in an environment variable
"""
client: Any #: :meta private:
openai_api_key: Optional[str] = None
n: int = 1
"""Number of images to generate"""
size: str = "1024x1024"
"""Size of image to generate"""
separator: str = "\n"
"""Separator to use when multiple URLs are returned."""
model: Optional[str] = None
"""Model to use for image generation."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
try:
import openai
openai.api_key = openai_api_key
values["client"] = openai.Image
except ImportError as e:
raise ImportError(
"Could not import openai python package. "
"Please it install it with `pip install openai`."
) from e
return values
def run(self, query: str) -> str:
"""Run query through OpenAI and parse result."""
response = self.client.create(
prompt=query, n=self.n, size=self.size, model=self.model
)
image_urls = self.separator.join([item["url"] for item in response["data"]])
return image_urls if image_urls else "No image was generated"
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~chat_models~human.py | """ChatModel wrapper which returns user input as the response.."""
import asyncio
from functools import partial
from io import StringIO
from typing import Any, Callable, Dict, List, Mapping, Optional
import yaml
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Field
from langchain.schema.messages import (
BaseMessage,
HumanMessage,
_message_from_dict,
messages_to_dict,
)
from langchain.schema.output import ChatGeneration, ChatResult
def _display_messages(messages: List[BaseMessage]) -> None:
dict_messages = messages_to_dict(messages)
for message in dict_messages:
yaml_string = yaml.dump(
message,
default_flow_style=False,
sort_keys=False,
allow_unicode=True,
width=10000,
line_break=None,
)
print("\n", "======= start of message =======", "\n\n")
print(yaml_string)
print("======= end of message =======", "\n\n")
def _collect_yaml_input(
messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> BaseMessage:
"""Collects and returns user input as a single string."""
lines = []
while True:
line = input()
if not line.strip():
break
if stop and any(seq in line for seq in stop):
break
lines.append(line)
yaml_string = "\n".join(lines)
# Try to parse the input string as YAML
try:
message = _message_from_dict(yaml.safe_load(StringIO(yaml_string)))
if message is None:
return HumanMessage(content="")
if stop:
if isinstance(message.content, str):
message.content = enforce_stop_tokens(message.content, stop)
else:
raise ValueError("Cannot use when output is not a string.")
return message
except yaml.YAMLError:
raise ValueError("Invalid YAML string entered.")
except ValueError:
raise ValueError("Invalid message entered.")
class HumanInputChatModel(BaseChatModel):
"""ChatModel which returns user input as the response."""
input_func: Callable = Field(default_factory=lambda: _collect_yaml_input)
message_func: Callable = Field(default_factory=lambda: _display_messages)
separator: str = "\n"
input_kwargs: Mapping[str, Any] = {}
message_kwargs: Mapping[str, Any] = {}
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
"input_func": self.input_func.__name__,
"message_func": self.message_func.__name__,
}
@property
def _llm_type(self) -> str:
"""Returns the type of LLM."""
return "human-input-chat-model"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""
Displays the messages to the user and returns their input as a response.
Args:
messages (List[BaseMessage]): The messages to be displayed to the user.
stop (Optional[List[str]]): A list of stop strings.
run_manager (Optional[CallbackManagerForLLMRun]): Currently not used.
Returns:
ChatResult: The user's input as a response.
"""
self.message_func(messages, **self.message_kwargs)
user_input = self.input_func(messages, stop=stop, **self.input_kwargs)
return ChatResult(generations=[ChatGeneration(message=user_input)])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
func = partial(
self._generate, messages, stop=stop, run_manager=run_manager, **kwargs
)
return await asyncio.get_event_loop().run_in_executor(None, func)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~document_transformers~beautiful_soup_transformer.py | from typing import Any, Iterator, List, Sequence, cast
from langchain.schema import BaseDocumentTransformer, Document
class BeautifulSoupTransformer(BaseDocumentTransformer):
"""Transform HTML content by extracting specific tags and removing unwanted ones.
Example:
.. code-block:: python
from langchain.document_transformers import BeautifulSoupTransformer
bs4_transformer = BeautifulSoupTransformer()
docs_transformed = bs4_transformer.transform_documents(docs)
"""
def __init__(self) -> None:
"""
Initialize the transformer.
This checks if the BeautifulSoup4 package is installed.
If not, it raises an ImportError.
"""
try:
import bs4 # noqa:F401
except ImportError:
raise ImportError(
"BeautifulSoup4 is required for BeautifulSoupTransformer. "
"Please install it with `pip install beautifulsoup4`."
)
def transform_documents(
self,
documents: Sequence[Document],
unwanted_tags: List[str] = ["script", "style"],
tags_to_extract: List[str] = ["p", "li", "div", "a"],
remove_lines: bool = True,
**kwargs: Any,
) -> Sequence[Document]:
"""
Transform a list of Document objects by cleaning their HTML content.
Args:
documents: A sequence of Document objects containing HTML content.
unwanted_tags: A list of tags to be removed from the HTML.
tags_to_extract: A list of tags whose content will be extracted.
remove_lines: If set to True, unnecessary lines will be
removed from the HTML content.
Returns:
A sequence of Document objects with transformed content.
"""
for doc in documents:
cleaned_content = doc.page_content
cleaned_content = self.remove_unwanted_tags(cleaned_content, unwanted_tags)
cleaned_content = self.extract_tags(cleaned_content, tags_to_extract)
if remove_lines:
cleaned_content = self.remove_unnecessary_lines(cleaned_content)
doc.page_content = cleaned_content
return documents
@staticmethod
def remove_unwanted_tags(html_content: str, unwanted_tags: List[str]) -> str:
"""
Remove unwanted tags from a given HTML content.
Args:
html_content: The original HTML content string.
unwanted_tags: A list of tags to be removed from the HTML.
Returns:
A cleaned HTML string with unwanted tags removed.
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_content, "html.parser")
for tag in unwanted_tags:
for element in soup.find_all(tag):
element.decompose()
return str(soup)
@staticmethod
def extract_tags(html_content: str, tags: List[str]) -> str:
"""
Extract specific tags from a given HTML content.
Args:
html_content: The original HTML content string.
tags: A list of tags to be extracted from the HTML.
Returns:
A string combining the content of the extracted tags.
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_content, "html.parser")
text_parts: List[str] = []
for element in soup.find_all():
if element.name in tags:
# Extract all navigable strings recursively from this element.
text_parts += get_navigable_strings(element)
# To avoid duplicate text, remove all descendants from the soup.
element.decompose()
return " ".join(text_parts)
@staticmethod
def remove_unnecessary_lines(content: str) -> str:
"""
Clean up the content by removing unnecessary lines.
Args:
content: A string, which may contain unnecessary lines or spaces.
Returns:
A cleaned string with unnecessary lines removed.
"""
lines = content.split("\n")
stripped_lines = [line.strip() for line in lines]
non_empty_lines = [line for line in stripped_lines if line]
cleaned_content = " ".join(non_empty_lines)
return cleaned_content
async def atransform_documents(
self,
documents: Sequence[Document],
**kwargs: Any,
) -> Sequence[Document]:
raise NotImplementedError
def get_navigable_strings(element: Any) -> Iterator[str]:
from bs4 import NavigableString, Tag
for child in cast(Tag, element).children:
if isinstance(child, Tag):
yield from get_navigable_strings(child)
elif isinstance(child, NavigableString):
if (element.name == "a") and (href := element.get("href")):
yield f"{child.strip()} ({href})"
else:
yield child.strip()
| [] |
2024-01-10 | bentoml/langchain | templates~rag-timescale-conversation~rag_timescale_conversation~load_sample_dataset.py | import os
import tempfile
from datetime import datetime, timedelta
import requests
from langchain.document_loaders import JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.timescalevector import TimescaleVector
from timescale_vector import client
def parse_date(date_string: str) -> datetime:
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
return datetime.strptime(date_string, time_format)
def extract_metadata(record: dict, metadata: dict) -> dict:
dt = parse_date(record["date"])
metadata["id"] = str(client.uuid_from_time(dt))
if dt is not None:
metadata["date"] = dt.isoformat()
else:
metadata["date"] = None
metadata["author"] = record["author"]
metadata["commit_hash"] = record["commit"]
return metadata
def load_ts_git_dataset(
service_url,
collection_name="timescale_commits",
num_records: int = 500,
partition_interval=timedelta(days=7),
):
json_url = "https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json"
tmp_file = "ts_git_log.json"
temp_dir = tempfile.gettempdir()
json_file_path = os.path.join(temp_dir, tmp_file)
if not os.path.exists(json_file_path):
response = requests.get(json_url)
if response.status_code == 200:
with open(json_file_path, "w") as json_file:
json_file.write(response.text)
else:
print(f"Failed to download JSON file. Status code: {response.status_code}")
loader = JSONLoader(
file_path=json_file_path,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
# Remove documents with None dates
documents = [doc for doc in documents if doc.metadata["date"] is not None]
if num_records > 0:
documents = documents[:num_records]
# Split the documents into chunks for embedding
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
# Create a Timescale Vector instance from the collection of documents
TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=collection_name,
service_url=service_url,
time_partition_interval=partition_interval,
)
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~vectorstores~myscale.py | from __future__ import annotations
import json
import logging
from hashlib import sha1
from threading import Thread
from typing import Any, Dict, Iterable, List, Optional, Tuple
from langchain.docstore.document import Document
from langchain.pydantic_v1 import BaseSettings
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
logger = logging.getLogger()
def has_mul_sub_str(s: str, *args: Any) -> bool:
"""
Check if a string contains multiple substrings.
Args:
s: string to check.
*args: substrings to check.
Returns:
True if all substrings are in the string, False otherwise.
"""
for a in args:
if a not in s:
return False
return True
class MyScaleSettings(BaseSettings):
"""MyScale client configuration.
Attribute:
myscale_host (str) : An URL to connect to MyScale backend.
Defaults to 'localhost'.
myscale_port (int) : URL port to connect with HTTP. Defaults to 8443.
username (str) : Username to login. Defaults to None.
password (str) : Password to login. Defaults to None.
index_type (str): index type string.
index_param (dict): index build parameter.
database (str) : Database name to find the table. Defaults to 'default'.
table (str) : Table name to operate on.
Defaults to 'vector_table'.
metric (str) : Metric to compute distance,
supported are ('L2', 'Cosine', 'IP'). Defaults to 'Cosine'.
column_map (Dict) : Column type map to project column name onto langchain
semantics. Must have keys: `text`, `id`, `vector`,
must be same size to number of columns. For example:
.. code-block:: python
{
'id': 'text_id',
'vector': 'text_embedding',
'text': 'text_plain',
'metadata': 'metadata_dictionary_in_json',
}
Defaults to identity map.
"""
host: str = "localhost"
port: int = 8443
username: Optional[str] = None
password: Optional[str] = None
index_type: str = "MSTG"
index_param: Optional[Dict[str, str]] = None
column_map: Dict[str, str] = {
"id": "id",
"text": "text",
"vector": "vector",
"metadata": "metadata",
}
database: str = "default"
table: str = "langchain"
metric: str = "Cosine"
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
class Config:
env_file = ".env"
env_prefix = "myscale_"
env_file_encoding = "utf-8"
class MyScale(VectorStore):
"""`MyScale` vector store.
You need a `clickhouse-connect` python package, and a valid account
to connect to MyScale.
MyScale can not only search with simple vector indexes.
It also supports a complex query with multiple conditions,
constraints and even sub-queries.
For more information, please visit
[myscale official site](https://docs.myscale.com/en/overview/)
"""
def __init__(
self,
embedding: Embeddings,
config: Optional[MyScaleSettings] = None,
**kwargs: Any,
) -> None:
"""MyScale Wrapper to LangChain
embedding (Embeddings):
config (MyScaleSettings): Configuration to MyScale Client
Other keyword arguments will pass into
[clickhouse-connect](https://docs.myscale.com/)
"""
try:
from clickhouse_connect import get_client
except ImportError:
raise ImportError(
"Could not import clickhouse connect python package. "
"Please install it with `pip install clickhouse-connect`."
)
try:
from tqdm import tqdm
self.pgbar = tqdm
except ImportError:
# Just in case if tqdm is not installed
self.pgbar = lambda x: x
super().__init__()
if config is not None:
self.config = config
else:
self.config = MyScaleSettings()
assert self.config
assert self.config.host and self.config.port
assert (
self.config.column_map
and self.config.database
and self.config.table
and self.config.metric
)
for k in ["id", "vector", "text", "metadata"]:
assert k in self.config.column_map
assert self.config.metric.upper() in ["IP", "COSINE", "L2"]
if self.config.metric in ["ip", "cosine", "l2"]:
logger.warning(
"Lower case metric types will be deprecated "
"the future. Please use one of ('IP', 'Cosine', 'L2')"
)
# initialize the schema
dim = len(embedding.embed_query("try this out"))
index_params = (
", " + ",".join([f"'{k}={v}'" for k, v in self.config.index_param.items()])
if self.config.index_param
else ""
)
schema_ = f"""
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
{self.config.column_map['id']} String,
{self.config.column_map['text']} String,
{self.config.column_map['vector']} Array(Float32),
{self.config.column_map['metadata']} JSON,
CONSTRAINT cons_vec_len CHECK length(\
{self.config.column_map['vector']}) = {dim},
VECTOR INDEX vidx {self.config.column_map['vector']} \
TYPE {self.config.index_type}(\
'metric_type={self.config.metric}'{index_params})
) ENGINE = MergeTree ORDER BY {self.config.column_map['id']}
"""
self.dim = dim
self.BS = "\\"
self.must_escape = ("\\", "'")
self._embeddings = embedding
self.dist_order = (
"ASC" if self.config.metric.upper() in ["COSINE", "L2"] else "DESC"
)
# Create a connection to myscale
self.client = get_client(
host=self.config.host,
port=self.config.port,
username=self.config.username,
password=self.config.password,
**kwargs,
)
self.client.command("SET allow_experimental_object_type=1")
self.client.command(schema_)
@property
def embeddings(self) -> Embeddings:
return self._embeddings
def escape_str(self, value: str) -> str:
return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value)
def _build_istr(self, transac: Iterable, column_names: Iterable[str]) -> str:
ks = ",".join(column_names)
_data = []
for n in transac:
n = ",".join([f"'{self.escape_str(str(_n))}'" for _n in n])
_data.append(f"({n})")
i_str = f"""
INSERT INTO TABLE
{self.config.database}.{self.config.table}({ks})
VALUES
{','.join(_data)}
"""
return i_str
def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None:
_i_str = self._build_istr(transac, column_names)
self.client.command(_i_str)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
batch_size: int = 32,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
ids: Optional list of ids to associate with the texts.
batch_size: Batch size of insertion
metadata: Optional column data to be inserted
Returns:
List of ids from adding the texts into the vectorstore.
"""
# Embed and create the documents
ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts]
colmap_ = self.config.column_map
transac = []
column_names = {
colmap_["id"]: ids,
colmap_["text"]: texts,
colmap_["vector"]: map(self._embeddings.embed_query, texts),
}
metadatas = metadatas or [{} for _ in texts]
column_names[colmap_["metadata"]] = map(json.dumps, metadatas)
assert len(set(colmap_) - set(column_names)) >= 0
keys, values = zip(*column_names.items())
try:
t = None
for v in self.pgbar(
zip(*values), desc="Inserting data...", total=len(metadatas)
):
assert len(v[keys.index(self.config.column_map["vector"])]) == self.dim
transac.append(v)
if len(transac) == batch_size:
if t:
t.join()
t = Thread(target=self._insert, args=[transac, keys])
t.start()
transac = []
if len(transac) > 0:
if t:
t.join()
self._insert(transac, keys)
return [i for i in ids]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
@classmethod
def from_texts(
cls,
texts: Iterable[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
config: Optional[MyScaleSettings] = None,
text_ids: Optional[Iterable[str]] = None,
batch_size: int = 32,
**kwargs: Any,
) -> MyScale:
"""Create Myscale wrapper with existing texts
Args:
texts (Iterable[str]): List or tuple of strings to be added
embedding (Embeddings): Function to extract text embedding
config (MyScaleSettings, Optional): Myscale configuration
text_ids (Optional[Iterable], optional): IDs for the texts.
Defaults to None.
batch_size (int, optional): Batchsize when transmitting data to MyScale.
Defaults to 32.
metadata (List[dict], optional): metadata to texts. Defaults to None.
Other keyword arguments will pass into
[clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api)
Returns:
MyScale Index
"""
ctx = cls(embedding, config, **kwargs)
ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas)
return ctx
def __repr__(self) -> str:
"""Text representation for myscale, prints backends, username and schemas.
Easy to use with `str(Myscale())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ "
_repr += f"{self.config.host}:{self.config.port}\033[0m\n\n"
_repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n"
_repr += "-" * 51 + "\n"
for r in self.client.query(
f"DESC {self.config.database}.{self.config.table}"
).named_results():
_repr += (
f"|\033[94m{r['name']:24s}\033[0m|\033[96m{r['type']:24s}\033[0m|\n"
)
_repr += "-" * 51 + "\n"
return _repr
def _build_qstr(
self, q_emb: List[float], topk: int, where_str: Optional[str] = None
) -> str:
q_emb_str = ",".join(map(str, q_emb))
if where_str:
where_str = f"PREWHERE {where_str}"
else:
where_str = ""
q_str = f"""
SELECT {self.config.column_map['text']},
{self.config.column_map['metadata']}, dist
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk}
"""
return q_str
def similarity_search(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Document]:
"""Perform a similarity search with MyScale
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents
"""
return self.similarity_search_by_vector(
self._embeddings.embed_query(query), k, where_str, **kwargs
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
where_str: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search with MyScale by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_qstr(embedding, k, where_str)
try:
return [
Document(
page_content=r[self.config.column_map["text"]],
metadata=r[self.config.column_map["metadata"]],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Perform a similarity search with MyScale
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of documents most similar to the query text
and cosine distance in float for each.
Lower score represents more similarity.
"""
q_str = self._build_qstr(self._embeddings.embed_query(query), k, where_str)
try:
return [
(
Document(
page_content=r[self.config.column_map["text"]],
metadata=r[self.config.column_map["metadata"]],
),
r["dist"],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
def drop(self) -> None:
"""
Helper function: Drop data
"""
self.client.command(
f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}"
)
def delete(
self,
ids: Optional[List[str]] = None,
where_str: Optional[str] = None,
**kwargs: Any,
) -> Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
assert not (
ids is None and where_str is None
), "You need to specify where to be deleted! Either with `ids` or `where_str`"
conds = []
if ids:
conds.extend([f"{self.config.column_map['id']} = '{id}'" for id in ids])
if where_str:
conds.append(where_str)
assert len(conds) > 0
where_str_final = " AND ".join(conds)
qstr = (
f"DELETE FROM {self.config.database}.{self.config.table} "
f"WHERE {where_str_final}"
)
try:
self.client.command(qstr)
return True
except Exception as e:
logger.error(str(e))
return False
@property
def metadata_column(self) -> str:
return self.config.column_map["metadata"]
class MyScaleWithoutJSON(MyScale):
"""MyScale vector store without metadata column
This is super handy if you are working to a SQL-native table
"""
def __init__(
self,
embedding: Embeddings,
config: Optional[MyScaleSettings] = None,
must_have_cols: List[str] = [],
**kwargs: Any,
) -> None:
"""Building a myscale vector store without metadata column
embedding (Embeddings): embedding model
config (MyScaleSettings): Configuration to MyScale Client
must_have_cols (List[str]): column names to be included in query
Other keyword arguments will pass into
[clickhouse-connect](https://docs.myscale.com/)
"""
super().__init__(embedding, config, **kwargs)
self.must_have_cols: List[str] = must_have_cols
def _build_qstr(
self, q_emb: List[float], topk: int, where_str: Optional[str] = None
) -> str:
q_emb_str = ",".join(map(str, q_emb))
if where_str:
where_str = f"PREWHERE {where_str}"
else:
where_str = ""
q_str = f"""
SELECT {self.config.column_map['text']}, dist,
{','.join(self.must_have_cols)}
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk}
"""
return q_str
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
where_str: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search with MyScale by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_qstr(embedding, k, where_str)
try:
return [
Document(
page_content=r[self.config.column_map["text"]],
metadata={k: r[k] for k in self.must_have_cols},
)
for r in self.client.query(q_str).named_results()
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Perform a similarity search with MyScale
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of documents most similar to the query text
and cosine distance in float for each.
Lower score represents more similarity.
"""
q_str = self._build_qstr(self._embeddings.embed_query(query), k, where_str)
try:
return [
(
Document(
page_content=r[self.config.column_map["text"]],
metadata={k: r[k] for k in self.must_have_cols},
),
r["dist"],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
@property
def metadata_column(self) -> str:
return ""
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~document_loaders~baiducloud_bos_directory.py | from typing import Any, Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class BaiduBOSDirectoryLoader(BaseLoader):
"""Load from `Baidu BOS directory`."""
def __init__(self, conf: Any, bucket: str, prefix: str = ""):
"""Initialize with BOS config, bucket and prefix.
:param conf(BosConfig): BOS config.
:param bucket(str): BOS bucket.
:param prefix(str): prefix.
"""
self.conf = conf
self.bucket = bucket
self.prefix = prefix
def load(self) -> List[Document]:
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
"""Load documents."""
try:
from baidubce.services.bos.bos_client import BosClient
except ImportError:
raise ImportError(
"Please install bce-python-sdk with `pip install bce-python-sdk`."
)
client = BosClient(self.conf)
contents = []
marker = ""
while True:
response = client.list_objects(
bucket_name=self.bucket,
prefix=self.prefix,
marker=marker,
max_keys=1000,
)
contents_len = len(response.contents)
contents.extend(response.contents)
if response.is_truncated or contents_len < int(str(response.max_keys)):
break
marker = response.next_marker
from langchain.document_loaders.baiducloud_bos_file import BaiduBOSFileLoader
for content in contents:
if str(content.key).endswith("/"):
continue
loader = BaiduBOSFileLoader(self.conf, self.bucket, str(content.key))
yield loader.load()[0]
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~memory~chat_message_histories~neo4j.py | from typing import List, Optional, Union
from langchain.schema import BaseChatMessageHistory
from langchain.schema.messages import BaseMessage, messages_from_dict
from langchain.utils import get_from_env
class Neo4jChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Neo4j database."""
def __init__(
self,
session_id: Union[str, int],
url: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
database: str = "neo4j",
node_label: str = "Session",
window: int = 3,
):
try:
import neo4j
except ImportError:
raise ValueError(
"Could not import neo4j python package. "
"Please install it with `pip install neo4j`."
)
# Make sure session id is not null
if not session_id:
raise ValueError("Please ensure that the session_id parameter is provided")
url = get_from_env("url", "NEO4J_URI", url)
username = get_from_env("username", "NEO4J_USERNAME", username)
password = get_from_env("password", "NEO4J_PASSWORD", password)
database = get_from_env("database", "NEO4J_DATABASE", database)
self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password))
self._database = database
self._session_id = session_id
self._node_label = node_label
self._window = window
# Verify connection
try:
self._driver.verify_connectivity()
except neo4j.exceptions.ServiceUnavailable:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the url is correct"
)
except neo4j.exceptions.AuthError:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the username and password are correct"
)
# Create session node
self._driver.execute_query(
f"MERGE (s:`{self._node_label}` {{id:$session_id}})",
{"session_id": self._session_id},
).summary
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from Neo4j"""
query = (
f"MATCH (s:`{self._node_label}`)-[:LAST_MESSAGE]->(last_message) "
"WHERE s.id = $session_id MATCH p=(last_message)<-[:NEXT*0.."
f"{self._window*2}]-() WITH p, length(p) AS length "
"ORDER BY length DESC LIMIT 1 UNWIND reverse(nodes(p)) AS node "
"RETURN {data:{content: node.content}, type:node.type} AS result"
)
records, _, _ = self._driver.execute_query(
query, {"session_id": self._session_id}
)
messages = messages_from_dict([el["result"] for el in records])
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in Neo4j"""
query = (
f"MATCH (s:`{self._node_label}`) WHERE s.id = $session_id "
"OPTIONAL MATCH (s)-[lm:LAST_MESSAGE]->(last_message) "
"CREATE (s)-[:LAST_MESSAGE]->(new:Message) "
"SET new += {type:$type, content:$content} "
"WITH new, lm, last_message WHERE last_message IS NOT NULL "
"CREATE (last_message)-[:NEXT]->(new) "
"DELETE lm"
)
self._driver.execute_query(
query,
{
"type": message.type,
"content": message.content,
"session_id": self._session_id,
},
).summary
def clear(self) -> None:
"""Clear session memory from Neo4j"""
query = (
f"MATCH (s:`{self._node_label}`)-[:LAST_MESSAGE]->(last_message) "
"WHERE s.id = $session_id MATCH p=(last_message)<-[:NEXT]-() "
"WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 "
"UNWIND nodes(p) as node DETACH DELETE node;"
)
self._driver.execute_query(query, {"session_id": self._session_id}).summary
def __del__(self) -> None:
if self._driver:
self._driver.close()
| [] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~vectorstores~astradb.py | from __future__ import annotations
import uuid
import warnings
from concurrent.futures import ThreadPoolExecutor
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils.iter import batch_iterate
from langchain.vectorstores.utils import maximal_marginal_relevance
ADBVST = TypeVar("ADBVST", bound="AstraDB")
T = TypeVar("T")
U = TypeVar("U")
DocDict = Dict[str, Any] # dicts expressing entries to insert
# Batch/concurrency default values (if parameters not provided):
# Size of batches for bulk insertions:
# (20 is the max batch size for the HTTP API at the time of writing)
DEFAULT_BATCH_SIZE = 20
# Number of threads to insert batches concurrently:
DEFAULT_BULK_INSERT_BATCH_CONCURRENCY = 5
# Number of threads in a batch to insert pre-existing entries:
DEFAULT_BULK_INSERT_OVERWRITE_CONCURRENCY = 10
# Number of threads (for deleting multiple rows concurrently):
DEFAULT_BULK_DELETE_CONCURRENCY = 20
def _unique_list(lst: List[T], key: Callable[[T], U]) -> List[T]:
visited_keys: Set[U] = set()
new_lst = []
for item in lst:
item_key = key(item)
if item_key not in visited_keys:
visited_keys.add(item_key)
new_lst.append(item)
return new_lst
class AstraDB(VectorStore):
"""Wrapper around DataStax Astra DB for vector-store workloads.
To use it, you need a recent installation of the `astrapy` library
and an Astra DB cloud database.
For quickstart and details, visit:
docs.datastax.com/en/astra/home/astra.html
Example:
.. code-block:: python
from langchain.vectorstores import AstraDB
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = AstraDB(
embedding=embeddings,
collection_name="my_store",
token="AstraCS:...",
api_endpoint="https://<DB-ID>-us-east1.apps.astra.datastax.com"
)
vectorstore.add_texts(["Giraffes", "All good here"])
results = vectorstore.similarity_search("Everything's ok", k=1)
"""
@staticmethod
def _filter_to_metadata(filter_dict: Optional[Dict[str, str]]) -> Dict[str, Any]:
if filter_dict is None:
return {}
else:
return {f"metadata.{mdk}": mdv for mdk, mdv in filter_dict.items()}
def __init__(
self,
*,
embedding: Embeddings,
collection_name: str,
token: Optional[str] = None,
api_endpoint: Optional[str] = None,
astra_db_client: Optional[Any] = None, # 'astrapy.db.AstraDB' if passed
namespace: Optional[str] = None,
metric: Optional[str] = None,
batch_size: Optional[int] = None,
bulk_insert_batch_concurrency: Optional[int] = None,
bulk_insert_overwrite_concurrency: Optional[int] = None,
bulk_delete_concurrency: Optional[int] = None,
) -> None:
try:
from astrapy.db import (
AstraDB as LibAstraDB,
)
from astrapy.db import (
AstraDBCollection as LibAstraDBCollection,
)
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import a recent astrapy python package. "
"Please install it with `pip install --upgrade astrapy`."
)
"""
Create an AstraDB vector store object.
Args (only keyword-arguments accepted):
embedding (Embeddings): embedding function to use.
collection_name (str): name of the Astra DB collection to create/use.
token (Optional[str]): API token for Astra DB usage.
api_endpoint (Optional[str]): full URL to the API endpoint,
such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
astra_db_client (Optional[Any]): *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AstraDB' instance.
namespace (Optional[str]): namespace (aka keyspace) where the
collection is created. Defaults to the database's "default namespace".
metric (Optional[str]): similarity function to use out of those
available in Astra DB. If left out, it will use Astra DB API's
defaults (i.e. "cosine" - but, for performance reasons,
"dot_product" is suggested if embeddings are normalized to one).
Advanced arguments (coming with sensible defaults):
batch_size (Optional[int]): Size of batches for bulk insertions.
bulk_insert_batch_concurrency (Optional[int]): Number of threads
to insert batches concurrently.
bulk_insert_overwrite_concurrency (Optional[int]): Number of
threads in a batch to insert pre-existing entries.
bulk_delete_concurrency (Optional[int]): Number of threads
(for deleting multiple rows concurrently).
"""
# Conflicting-arg checks:
if astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' to AstraDB if passing "
"'token' and 'api_endpoint'."
)
self.embedding = embedding
self.collection_name = collection_name
self.token = token
self.api_endpoint = api_endpoint
self.namespace = namespace
# Concurrency settings
self.batch_size: int = batch_size or DEFAULT_BATCH_SIZE
self.bulk_insert_batch_concurrency: int = (
bulk_insert_batch_concurrency or DEFAULT_BULK_INSERT_BATCH_CONCURRENCY
)
self.bulk_insert_overwrite_concurrency: int = (
bulk_insert_overwrite_concurrency
or DEFAULT_BULK_INSERT_OVERWRITE_CONCURRENCY
)
self.bulk_delete_concurrency: int = (
bulk_delete_concurrency or DEFAULT_BULK_DELETE_CONCURRENCY
)
# "vector-related" settings
self._embedding_dimension: Optional[int] = None
self.metric = metric
if astra_db_client is not None:
self.astra_db = astra_db_client
else:
self.astra_db = LibAstraDB(
token=self.token,
api_endpoint=self.api_endpoint,
namespace=self.namespace,
)
self._provision_collection()
self.collection = LibAstraDBCollection(
collection_name=self.collection_name,
astra_db=self.astra_db,
)
def _get_embedding_dimension(self) -> int:
if self._embedding_dimension is None:
self._embedding_dimension = len(
self.embedding.embed_query("This is a sample sentence.")
)
return self._embedding_dimension
def _drop_collection(self) -> None:
"""
Drop the collection from storage.
This is meant as an internal-usage method, no members
are set other than actual deletion on the backend.
"""
_ = self.astra_db.delete_collection(
collection_name=self.collection_name,
)
return None
def _provision_collection(self) -> None:
"""
Run the API invocation to create the collection on the backend.
Internal-usage method, no object members are set,
other than working on the underlying actual storage.
"""
_ = self.astra_db.create_collection(
dimension=self._get_embedding_dimension(),
collection_name=self.collection_name,
metric=self.metric,
)
return None
@property
def embeddings(self) -> Embeddings:
return self.embedding
@staticmethod
def _dont_flip_the_cos_score(similarity0to1: float) -> float:
"""Keep similarity from client unchanged ad it's in [0:1] already."""
return similarity0to1
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The underlying API calls already returns a "score proper",
i.e. one in [0, 1] where higher means more *similar*,
so here the final score transformation is not reversing the interval:
"""
return self._dont_flip_the_cos_score
def clear(self) -> None:
"""Empty the collection of all its stored entries."""
self._drop_collection()
self._provision_collection()
return None
def delete_by_document_id(self, document_id: str) -> bool:
"""
Remove a single document from the store, given its document_id (str).
Return True if a document has indeed been deleted, False if ID not found.
"""
deletion_response = self.collection.delete(document_id)
return ((deletion_response or {}).get("status") or {}).get(
"deletedCount", 0
) == 1
def delete(
self,
ids: Optional[List[str]] = None,
concurrency: Optional[int] = None,
**kwargs: Any,
) -> Optional[bool]:
"""Delete by vector ids.
Args:
ids (Optional[List[str]]): List of ids to delete.
concurrency (Optional[int]): max number of threads issuing
single-doc delete requests. Defaults to instance-level setting.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
if kwargs:
warnings.warn(
"Method 'delete' of AstraDB vector store invoked with "
f"unsupported arguments ({', '.join(sorted(kwargs.keys()))}), "
"which will be ignored."
)
if ids is None:
raise ValueError("No ids provided to delete.")
_max_workers = concurrency or self.bulk_delete_concurrency
with ThreadPoolExecutor(max_workers=_max_workers) as tpe:
_ = list(
tpe.map(
self.delete_by_document_id,
ids,
)
)
return True
def delete_collection(self) -> None:
"""
Completely delete the collection from the database (as opposed
to 'clear()', which empties it only).
Stored data is lost and unrecoverable, resources are freed.
Use with caution.
"""
self._drop_collection()
return None
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
*,
batch_size: Optional[int] = None,
batch_concurrency: Optional[int] = None,
overwrite_concurrency: Optional[int] = None,
**kwargs: Any,
) -> List[str]:
"""Run texts through the embeddings and add them to the vectorstore.
If passing explicit ids, those entries whose id is in the store already
will be replaced.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of ids.
batch_size (Optional[int]): Number of documents in each API call.
Check the underlying Astra DB HTTP API specs for the max value
(20 at the time of writing this). If not provided, defaults
to the instance-level setting.
batch_concurrency (Optional[int]): number of threads to process
insertion batches concurrently. Defaults to instance-level
setting if not provided.
overwrite_concurrency (Optional[int]): number of threads to process
pre-existing documents in each batch (which require individual
API calls). Defaults to instance-level setting if not provided.
Returns:
List[str]: List of ids of the added texts.
"""
if kwargs:
warnings.warn(
"Method 'add_texts' of AstraDB vector store invoked with "
f"unsupported arguments ({', '.join(sorted(kwargs.keys()))}), "
"which will be ignored."
)
_texts = list(texts)
if ids is None:
ids = [uuid.uuid4().hex for _ in _texts]
if metadatas is None:
metadatas = [{} for _ in _texts]
#
embedding_vectors = self.embedding.embed_documents(_texts)
documents_to_insert = [
{
"content": b_txt,
"_id": b_id,
"$vector": b_emb,
"metadata": b_md,
}
for b_txt, b_emb, b_id, b_md in zip(
_texts,
embedding_vectors,
ids,
metadatas,
)
]
# make unique by id, keeping the last
uniqued_documents_to_insert = _unique_list(
documents_to_insert[::-1],
lambda document: document["_id"],
)[::-1]
all_ids = []
def _handle_batch(document_batch: List[DocDict]) -> List[str]:
im_result = self.collection.insert_many(
documents=document_batch,
options={"ordered": False},
partial_failures_allowed=True,
)
if "status" not in im_result:
raise ValueError(
f"API Exception while running bulk insertion: {str(im_result)}"
)
batch_inserted = im_result["status"]["insertedIds"]
# estimation of the preexisting documents that failed
missed_inserted_ids = {
document["_id"] for document in document_batch
} - set(batch_inserted)
errors = im_result.get("errors", [])
# careful for other sources of error other than "doc already exists"
num_errors = len(errors)
unexpected_errors = any(
error.get("errorCode") != "DOCUMENT_ALREADY_EXISTS" for error in errors
)
if num_errors != len(missed_inserted_ids) or unexpected_errors:
raise ValueError(
f"API Exception while running bulk insertion: {str(errors)}"
)
# deal with the missing insertions as upserts
missing_from_batch = [
document
for document in document_batch
if document["_id"] in missed_inserted_ids
]
def _handle_missing_document(missing_document: DocDict) -> str:
replacement_result = self.collection.find_one_and_replace(
filter={"_id": missing_document["_id"]},
replacement=missing_document,
)
return replacement_result["data"]["document"]["_id"]
_u_max_workers = (
overwrite_concurrency or self.bulk_insert_overwrite_concurrency
)
with ThreadPoolExecutor(max_workers=_u_max_workers) as tpe2:
batch_replaced = list(
tpe2.map(
_handle_missing_document,
missing_from_batch,
)
)
upsert_ids = batch_inserted + batch_replaced
return upsert_ids
_b_max_workers = batch_concurrency or self.bulk_insert_batch_concurrency
with ThreadPoolExecutor(max_workers=_b_max_workers) as tpe:
all_ids_nested = tpe.map(
_handle_batch,
batch_iterate(
batch_size or self.batch_size,
uniqued_documents_to_insert,
),
)
all_ids = [iid for id_list in all_ids_nested for iid in id_list]
return all_ids
def similarity_search_with_score_id_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float, str]]:
"""Return docs most similar to embedding vector.
Args:
embedding (str): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
Returns:
List of (Document, score, id), the most similar to the query vector.
"""
metadata_parameter = self._filter_to_metadata(filter)
#
hits = list(
self.collection.paginated_find(
filter=metadata_parameter,
sort={"$vector": embedding},
options={"limit": k},
projection={
"_id": 1,
"content": 1,
"metadata": 1,
"$similarity": 1,
},
)
)
#
return [
(
Document(
page_content=hit["content"],
metadata=hit["metadata"],
),
hit["$similarity"],
hit["_id"],
)
for hit in hits
]
def similarity_search_with_score_id(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float, str]]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_with_score_id_by_vector(
embedding=embedding_vector,
k=k,
filter=filter,
)
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding (str): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
Returns:
List of (Document, score), the most similar to the query vector.
"""
return [
(doc, score)
for (doc, score, doc_id) in self.similarity_search_with_score_id_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
]
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_by_vector(
embedding_vector,
k,
filter=filter,
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
return [
doc
for doc, _ in self.similarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
)
]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float]]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_with_score_by_vector(
embedding_vector,
k,
filter=filter,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Returns:
List of Documents selected by maximal marginal relevance.
"""
metadata_parameter = self._filter_to_metadata(filter)
prefetch_hits = list(
self.collection.paginated_find(
filter=metadata_parameter,
sort={"$vector": embedding},
options={"limit": fetch_k},
projection={
"_id": 1,
"content": 1,
"metadata": 1,
"$similarity": 1,
"$vector": 1,
},
)
)
mmr_chosen_indices = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
[prefetch_hit["$vector"] for prefetch_hit in prefetch_hits],
k=k,
lambda_mult=lambda_mult,
)
mmr_hits = [
prefetch_hit
for prefetch_index, prefetch_hit in enumerate(prefetch_hits)
if prefetch_index in mmr_chosen_indices
]
return [
Document(
page_content=hit["content"],
metadata=hit["metadata"],
)
for hit in mmr_hits
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int = 4): Number of Documents to return.
fetch_k (int = 20): Number of Documents to fetch to pass to MMR algorithm.
lambda_mult (float = 0.5): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Optional.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding_vector = self.embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding_vector,
k,
fetch_k,
lambda_mult=lambda_mult,
filter=filter,
)
@classmethod
def from_texts(
cls: Type[ADBVST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> ADBVST:
"""Create an Astra DB vectorstore from raw texts.
Args:
texts (List[str]): the texts to insert.
embedding (Embeddings): the embedding function to use in the store.
metadatas (Optional[List[dict]]): metadata dicts for the texts.
ids (Optional[List[str]]): ids to associate to the texts.
*Additional arguments*: you can pass any argument that you would
to 'add_texts' and/or to the 'AstraDB' class constructor
(see these methods for details). These arguments will be
routed to the respective methods as they are.
Returns:
an `AstraDb` vectorstore.
"""
known_kwargs = {
"collection_name",
"token",
"api_endpoint",
"astra_db_client",
"namespace",
"metric",
"batch_size",
"bulk_insert_batch_concurrency",
"bulk_insert_overwrite_concurrency",
"bulk_delete_concurrency",
"batch_concurrency",
"overwrite_concurrency",
}
if kwargs:
unknown_kwargs = set(kwargs.keys()) - known_kwargs
if unknown_kwargs:
warnings.warn(
"Method 'from_texts' of AstraDB vector store invoked with "
f"unsupported arguments ({', '.join(sorted(unknown_kwargs))}), "
"which will be ignored."
)
collection_name: str = kwargs["collection_name"]
token = kwargs.get("token")
api_endpoint = kwargs.get("api_endpoint")
astra_db_client = kwargs.get("astra_db_client")
namespace = kwargs.get("namespace")
metric = kwargs.get("metric")
astra_db_store = cls(
embedding=embedding,
collection_name=collection_name,
token=token,
api_endpoint=api_endpoint,
astra_db_client=astra_db_client,
namespace=namespace,
metric=metric,
batch_size=kwargs.get("batch_size"),
bulk_insert_batch_concurrency=kwargs.get("bulk_insert_batch_concurrency"),
bulk_insert_overwrite_concurrency=kwargs.get(
"bulk_insert_overwrite_concurrency"
),
bulk_delete_concurrency=kwargs.get("bulk_delete_concurrency"),
)
astra_db_store.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
batch_size=kwargs.get("batch_size"),
batch_concurrency=kwargs.get("batch_concurrency"),
overwrite_concurrency=kwargs.get("overwrite_concurrency"),
)
return astra_db_store
@classmethod
def from_documents(
cls: Type[ADBVST],
documents: List[Document],
embedding: Embeddings,
**kwargs: Any,
) -> ADBVST:
"""Create an Astra DB vectorstore from a document list.
Utility method that defers to 'from_texts' (see that one).
Args: see 'from_texts', except here you have to supply 'documents'
in place of 'texts' and 'metadatas'.
Returns:
an `AstraDB` vectorstore.
"""
return super().from_documents(documents, embedding, **kwargs)
| [
"1"
] |
2024-01-10 | bentoml/langchain | libs~langchain~langchain~schema~runnable~fallbacks.py | import asyncio
from typing import (
TYPE_CHECKING,
Any,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from langchain.load.dump import dumpd
from langchain.pydantic_v1 import BaseModel
from langchain.schema.runnable.base import Runnable, RunnableSerializable
from langchain.schema.runnable.config import (
RunnableConfig,
ensure_config,
get_async_callback_manager_for_config,
get_callback_manager_for_config,
get_config_list,
patch_config,
)
from langchain.schema.runnable.utils import (
ConfigurableFieldSpec,
Input,
Output,
get_unique_config_specs,
)
if TYPE_CHECKING:
from langchain.callbacks.manager import AsyncCallbackManagerForChainRun
class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
"""A Runnable that can fallback to other Runnables if it fails.
External APIs (e.g., APIs for a language model) may at times experience
degraded performance or even downtime.
In these cases, it can be useful to have a fallback runnable that can be
used in place of the original runnable (e.g., fallback to another LLM provider).
Fallbacks can be defined at the level of a single runnable, or at the level
of a chain of runnables. Fallbacks are tried in order until one succeeds or
all fail.
While you can instantiate a ``RunnableWithFallbacks`` directly, it is usually
more convenient to use the ``with_fallbacks`` method on a runnable.
Example:
.. code-block:: python
from langchain.chat_models.openai import ChatOpenAI
from langchain.chat_models.anthropic import ChatAnthropic
model = ChatAnthropic().with_fallbacks([ChatOpenAI()])
# Will usually use ChatAnthropic, but fallback to ChatOpenAI
# if ChatAnthropic fails.
model.invoke('hello')
# And you can also use fallbacks at the level of a chain.
# Here if both LLM providers fail, we'll fallback to a good hardcoded
# response.
from langchain.prompts import PromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda
def when_all_is_lost(inputs):
return ("Looks like our LLM providers are down. "
"Here's a nice 🦜️ emoji for you instead.")
chain_with_fallback = (
PromptTemplate.from_template('Tell me a joke about {topic}')
| model
| StrOutputParser()
).with_fallbacks([RunnableLambda(when_all_is_lost)])
"""
runnable: Runnable[Input, Output]
"""The runnable to run first."""
fallbacks: Sequence[Runnable[Input, Output]]
"""A sequence of fallbacks to try."""
exceptions_to_handle: Tuple[Type[BaseException], ...] = (Exception,)
"""The exceptions on which fallbacks should be tried.
Any exception that is not a subclass of these exceptions will be raised immediately.
"""
class Config:
arbitrary_types_allowed = True
@property
def InputType(self) -> Type[Input]:
return self.runnable.InputType
@property
def OutputType(self) -> Type[Output]:
return self.runnable.OutputType
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
return self.runnable.get_input_schema(config)
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
return self.runnable.get_output_schema(config)
@property
def config_specs(self) -> Sequence[ConfigurableFieldSpec]:
return get_unique_config_specs(
spec
for step in [self.runnable, *self.fallbacks]
for spec in step.config_specs
)
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
@property
def runnables(self) -> Iterator[Runnable[Input, Output]]:
yield self.runnable
yield from self.fallbacks
def invoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
# setup callbacks
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
# start the root run
run_manager = callback_manager.on_chain_start(
dumpd(self), input, name=config.get("run_name")
)
first_error = None
for runnable in self.runnables:
try:
output = runnable.invoke(
input,
patch_config(config, callbacks=run_manager.get_child()),
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
run_manager.on_chain_error(e)
raise e
else:
run_manager.on_chain_end(output)
return output
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
run_manager.on_chain_error(first_error)
raise first_error
async def ainvoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Output:
# setup callbacks
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
dumpd(self), input, name=config.get("run_name")
)
first_error = None
for runnable in self.runnables:
try:
output = await runnable.ainvoke(
input,
patch_config(config, callbacks=run_manager.get_child()),
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
await run_manager.on_chain_error(e)
raise e
else:
await run_manager.on_chain_end(output)
return output
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
await run_manager.on_chain_error(first_error)
raise first_error
def batch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
from langchain.callbacks.manager import CallbackManager
if return_exceptions:
raise NotImplementedError()
if not inputs:
return []
# setup callbacks
configs = get_config_list(config, len(inputs))
callback_managers = [
CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
for config in configs
]
# start the root runs, one per input
run_managers = [
cm.on_chain_start(
dumpd(self),
input if isinstance(input, dict) else {"input": input},
name=config.get("run_name"),
)
for cm, input, config in zip(callback_managers, inputs, configs)
]
first_error = None
for runnable in self.runnables:
try:
outputs = runnable.batch(
inputs,
[
# each step a child run of the corresponding root run
patch_config(config, callbacks=rm.get_child())
for rm, config in zip(run_managers, configs)
],
return_exceptions=return_exceptions,
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
for rm in run_managers:
rm.on_chain_error(e)
raise e
else:
for rm, output in zip(run_managers, outputs):
rm.on_chain_end(output)
return outputs
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
for rm in run_managers:
rm.on_chain_error(first_error)
raise first_error
async def abatch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
from langchain.callbacks.manager import AsyncCallbackManager
if return_exceptions:
raise NotImplementedError()
if not inputs:
return []
# setup callbacks
configs = get_config_list(config, len(inputs))
callback_managers = [
AsyncCallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
for config in configs
]
# start the root runs, one per input
run_managers: List[AsyncCallbackManagerForChainRun] = await asyncio.gather(
*(
cm.on_chain_start(
dumpd(self),
input,
name=config.get("run_name"),
)
for cm, input, config in zip(callback_managers, inputs, configs)
)
)
first_error = None
for runnable in self.runnables:
try:
outputs = await runnable.abatch(
inputs,
[
# each step a child run of the corresponding root run
patch_config(config, callbacks=rm.get_child())
for rm, config in zip(run_managers, configs)
],
return_exceptions=return_exceptions,
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
await asyncio.gather(*(rm.on_chain_error(e) for rm in run_managers))
else:
await asyncio.gather(
*(
rm.on_chain_end(output)
for rm, output in zip(run_managers, outputs)
)
)
return outputs
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
await asyncio.gather(*(rm.on_chain_error(first_error) for rm in run_managers))
raise first_error
| [] |
2024-01-10 | bentoml/langchain | templates~neo4j-vector-memory~neo4j_vector_memory~history.py | from typing import Any, Dict, List, Union
from langchain.graphs import Neo4jGraph
from langchain.memory import ChatMessageHistory
from langchain.schema import AIMessage, HumanMessage
graph = Neo4jGraph()
def convert_messages(input: List[Dict[str, Any]]) -> ChatMessageHistory:
history = ChatMessageHistory()
for item in input:
history.add_user_message(item["result"]["question"])
history.add_ai_message(item["result"]["answer"])
return history
def get_history(input: Dict[str, Any]) -> List[Union[HumanMessage, AIMessage]]:
# Lookback conversation window
window = 3
data = graph.query(
"""
MATCH (u:User {id:$user_id})-[:HAS_SESSION]->(s:Session {id:$session_id}),
(s)-[:LAST_MESSAGE]->(last_message)
MATCH p=(last_message)<-[:NEXT*0.."""
+ str(window)
+ """]-()
WITH p, length(p) AS length
ORDER BY length DESC LIMIT 1
UNWIND reverse(nodes(p)) AS node
MATCH (node)-[:HAS_ANSWER]->(answer)
RETURN {question:node.text, answer:answer.text} AS result
""",
params=input,
)
history = convert_messages(data)
return history.messages
def save_history(input: Dict[str, Any]) -> str:
input["context"] = [el.metadata["id"] for el in input["context"]]
has_history = bool(input.pop("chat_history"))
# store history to database
if has_history:
graph.query(
"""
MATCH (u:User {id: $user_id})-[:HAS_SESSION]->(s:Session{id: $session_id}),
(s)-[l:LAST_MESSAGE]->(last_message)
CREATE (last_message)-[:NEXT]->(q:Question
{text:$question, rephrased:$rephrased_question, date:datetime()}),
(q)-[:HAS_ANSWER]->(:Answer {text:$output}),
(s)-[:LAST_MESSAGE]->(q)
DELETE l
WITH q
UNWIND $context AS c
MATCH (n) WHERE elementId(n) = c
MERGE (q)-[:RETRIEVED]->(n)
""",
params=input,
)
else:
graph.query(
"""MERGE (u:User {id: $user_id})
CREATE (u)-[:HAS_SESSION]->(s1:Session {id:$session_id}),
(s1)-[:LAST_MESSAGE]->(q:Question
{text:$question, rephrased:$rephrased_question, date:datetime()}),
(q)-[:HAS_ANSWER]->(:Answer {text:$output})
WITH q
UNWIND $context AS c
MATCH (n) WHERE elementId(n) = c
MERGE (q)-[:RETRIEVED]->(n)
""",
params=input,
)
# Return LLM response to the chain
return input["output"]
| [] |
2024-01-10 | tomasz-pilawa/prestashop_api | src~ai_boosting.py | import json
import logging
import openai
from bs4 import BeautifulSoup
from src import editing
def classify_categories(prestashop, openai_conn, product_ids_list: list[int]):
openai.api_key = openai_conn
with open('data/cats_dict.json', encoding='utf-8') as file:
cats_classify = json.load(file).get('cats_classify')
cats_id_dict = json.load(file).get('cat_id')
for product_id in product_ids_list:
product = prestashop.get('products', product_id).get('product')
product_desc = product['description_short']['language']['value']
product_cats = []
with open('data/prompts/classify_product.txt', 'r', encoding='utf-8') as file:
prompt_template = file.read().strip()
prompt = prompt_template.format(product=product_desc, cats=cats_classify)
response = openai.Completion.create(engine='text-davinci-003', prompt=prompt, max_tokens=400, temperature=0.2)
generated_text = response.choices[0].text
for part in generated_text.split(","):
category_name = part.strip()
if category_name in list(cats_classify.values()):
product_cats.append(category_name)
product_cats_ids = ['2'] + [cats_id_dict[cat] for cat in product_cats]
product_cats_upload = [{'id': cat_id} for cat_id in product_cats_ids]
product['id_category_default'] = product_cats_ids[-1]
product['associations']['categories']['category'] = product_cats_upload
editing.edit_presta_product(prestashop, product=product)
logging.info('FINISHED product classification')
def write_descriptions(prestashop, openai_conn, product_ids_list: list[int]):
openai.api_key = openai_conn
for product_id in product_ids_list:
product = prestashop.get('products', product_id).get('product')
product_name = product['name']['language']['value']
product_desc = product['description']['language']['value']
product_summary, product_ingredients = editing.manipulate_desc(product_desc)
with open('data/prompts/write_desc_2.txt', 'r', encoding='utf-8') as file:
prompt_template = file.read().strip()
prompt = prompt_template.format(product_name=product_name, product_desc=product_summary)
response = openai.Completion.create(engine='text-davinci-003', prompt=prompt, max_tokens=1900, temperature=0.25)
desc_short, desc_long = editing.make_desc(response.choices[0].text.strip())
with open('data/prompts/write_active.txt', 'r', encoding='utf-8') as file:
prompt_template = file.read().strip()
prompt = prompt_template.format(product_desc=product_ingredients)
response = openai.Completion.create(engine='text-davinci-003', prompt=prompt, max_tokens=1500, temperature=0.25)
desc_active = editing.make_active(response.choices[0].text.strip())
product['description_short']['language']['value'] = desc_short
product['description']['language']['value'] = desc_long + desc_active
editing.edit_presta_product(prestashop, product=product)
logging.info('FINISHED writing product descriptions')
def write_meta(prestashop, openai_conn, product_ids_list: list[int]):
openai.api_key = openai_conn
for product_id in product_ids_list:
product = prestashop.get('products', product_id)['product']
product_name = product['name']['language']['value']
product_desc = product['description_short']['language']['value']
product_desc = BeautifulSoup(product_desc, 'html.parser').get_text()
with open('data/prompts/write_meta_2.txt', 'r', encoding='utf-8') as file:
prompt_template = file.read().strip()
prompt = prompt_template.format(product_name=product_name, product_desc=product_desc)
response = openai.Completion.create(engine='text-davinci-003', prompt=prompt, max_tokens=400, temperature=0.3)
text = response.choices[0].text.strip()
meta_title = text.split('META DESCRIPTION:')[0].split('META TITLE:')[1].strip()
meta_desc = editing.truncate_meta(text.split('META DESCRIPTION:')[1].strip())
product['meta_title']['language']['value'] = meta_title
product['meta_description']['language']['value'] = meta_desc
editing.edit_presta_product(prestashop, product=product)
logging.info('FINISHED writing meta descriptions')
def apply_ai_actions(prestashop, openai_conn, product_ids: list[int],
classify_ai: bool = 0, descriptions_ai: bool = 0, meta_ai: bool = 0, inci_unit: bool = 0):
if classify_ai:
classify_categories(prestashop, openai_conn, product_ids)
if descriptions_ai:
write_descriptions(prestashop, openai_conn, product_ids)
if meta_ai:
write_meta(prestashop, openai_conn, product_ids)
if inci_unit:
editing.fill_inci(prestashop, product_ids=product_ids, source='aleja')
editing.set_unit_price_api_sql(prestashop, product_ids=product_ids)
logging.info('Finished all AI actions.')
| [] |
2024-01-10 | prixingcha/localGPT_old | run_localGPT.py | import logging
import click
import torch
from auto_gptq import AutoGPTQForCausalLM
from huggingface_hub import hf_hub_download
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.llms import HuggingFacePipeline, LlamaCpp
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
LlamaForCausalLM,
LlamaTokenizer,
pipeline,
)
from constants import CHROMA_SETTINGS, EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY
def load_model(device_type, model_id, model_basename=None):
"""
Select a model for text generation using the HuggingFace library.
If you are running this for the first time, it will download a model for you.
subsequent runs will use the model from the disk.
Args:
device_type (str): Type of device to use, e.g., "cuda" for GPU or "cpu" for CPU.
model_id (str): Identifier of the model to load from HuggingFace's model hub.
model_basename (str, optional): Basename of the model if using quantized models.
Defaults to None.
Returns:
HuggingFacePipeline: A pipeline object for text generation using the loaded model.
Raises:
ValueError: If an unsupported model or device type is provided.
"""
logging.info(f"Loading Model: {model_id}, on: {device_type}")
logging.info("This action can take a few minutes!")
if model_basename is not None:
if ".ggml" in model_basename:
logging.info("Using Llamacpp for GGML quantized models")
model_path = hf_hub_download(repo_id=model_id, filename=model_basename)
max_ctx_size = 2048
kwargs = {
"model_path": model_path,
"n_ctx": max_ctx_size,
"max_tokens": max_ctx_size,
}
if device_type.lower() == "mps":
kwargs["n_gpu_layers"] = 1000
if device_type.lower() == "cuda":
kwargs["n_gpu_layers"] = 1000
kwargs["n_batch"] = max_ctx_size
return LlamaCpp(**kwargs)
else:
# The code supports all huggingface models that ends with GPTQ and have some variation
# of .no-act.order or .safetensors in their HF repo.
logging.info("Using AutoGPTQForCausalLM for quantized models")
if ".safetensors" in model_basename:
# Remove the ".safetensors" ending if present
model_basename = model_basename.replace(".safetensors", "")
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
logging.info("Tokenizer loaded")
model = AutoGPTQForCausalLM.from_quantized(
model_id,
model_basename=model_basename,
use_safetensors=True,
trust_remote_code=True,
device="cuda:0",
use_triton=False,
quantize_config=None,
)
elif (
device_type.lower() == "cuda"
): # The code supports all huggingface models that ends with -HF or which have a .bin
# file in their HF repo.
logging.info("Using AutoModelForCausalLM for full models")
tokenizer = AutoTokenizer.from_pretrained(model_id)
logging.info("Tokenizer loaded")
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
trust_remote_code=True,
# max_memory={0: "15GB"} # Uncomment this line with you encounter CUDA out of memory errors
)
model.tie_weights()
else:
logging.info("Using LlamaTokenizer")
tokenizer = LlamaTokenizer.from_pretrained(model_id)
model = LlamaForCausalLM.from_pretrained(model_id)
# Load configuration from the model to avoid warnings
generation_config = GenerationConfig.from_pretrained(model_id)
# see here for details:
# https://huggingface.co./docs/transformers/
# main_classes/text_generation#transformers.GenerationConfig.from_pretrained.returns
# Create a pipeline for text generation
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_length=2048,
temperature=0,
top_p=0.95,
repetition_penalty=1.15,
generation_config=generation_config,
)
local_llm = HuggingFacePipeline(pipeline=pipe)
logging.info("Local LLM Loaded")
return local_llm
# chose device typ to run on as well as to show source documents.
@click.command()
@click.option(
"--device_type",
default="cuda" if torch.cuda.is_available() else "cpu",
type=click.Choice(
[
"cpu",
"cuda",
"ipu",
"xpu",
"mkldnn",
"opengl",
"opencl",
"ideep",
"hip",
"ve",
"fpga",
"ort",
"xla",
"lazy",
"vulkan",
"mps",
"meta",
"hpu",
"mtia",
],
),
help="Device to run on. (Default is cuda)",
)
@click.option(
"--show_sources",
"-s",
is_flag=True,
help="Show sources along with answers (Default is False)",
)
def main(device_type, show_sources):
"""
This function implements the information retrieval task.
1. Loads an embedding model, can be HuggingFaceInstructEmbeddings or HuggingFaceEmbeddings
2. Loads the existing vectorestore that was created by inget.py
3. Loads the local LLM using load_model function - You can now set different LLMs.
4. Setup the Question Answer retreival chain.
5. Question answers.
"""
logging.info(f"Running on: {device_type}")
logging.info(f"Display Source Documents set to: {show_sources}")
embeddings = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": device_type})
# uncomment the following line if you used HuggingFaceEmbeddings in the ingest.py
# embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
# load the vectorstore
db = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=embeddings,
client_settings=CHROMA_SETTINGS,
)
retriever = db.as_retriever()
# load the LLM for generating Natural Language responses
# for HF models
# model_id = "TheBloke/vicuna-7B-1.1-HF"
# model_basename = None
# model_id = "TheBloke/Wizard-Vicuna-7B-Uncensored-HF"
# model_id = "TheBloke/guanaco-7B-HF"
# model_id = 'NousResearch/Nous-Hermes-13b' # Requires ~ 23GB VRAM. Using STransformers
# alongside will 100% create OOM on 24GB cards.
# llm = load_model(device_type, model_id=model_id)
# for GPTQ (quantized) models
# model_id = "TheBloke/Nous-Hermes-13B-GPTQ"
# model_basename = "nous-hermes-13b-GPTQ-4bit-128g.no-act.order"
# model_id = "TheBloke/WizardLM-30B-Uncensored-GPTQ"
# model_basename = "WizardLM-30B-Uncensored-GPTQ-4bit.act-order.safetensors" # Requires
# ~21GB VRAM. Using STransformers alongside can potentially create OOM on 24GB cards.
# model_id = "TheBloke/wizardLM-7B-GPTQ"
# model_basename = "wizardLM-7B-GPTQ-4bit.compat.no-act-order.safetensors"
# model_id = "TheBloke/WizardLM-7B-uncensored-GPTQ"
# model_basename = "WizardLM-7B-uncensored-GPTQ-4bit-128g.compat.no-act-order.safetensors"
# for GGML (quantized cpu+gpu+mps) models - check if they support llama.cpp
# model_id = "TheBloke/wizard-vicuna-13B-GGML"
# model_basename = "wizard-vicuna-13B.ggmlv3.q4_0.bin"
# model_basename = "wizard-vicuna-13B.ggmlv3.q6_K.bin"
# model_basename = "wizard-vicuna-13B.ggmlv3.q2_K.bin"
# model_id = "TheBloke/orca_mini_3B-GGML"
# model_basename = "orca-mini-3b.ggmlv3.q4_0.bin"
model_id = "TheBloke/Llama-2-7B-Chat-GGML"
model_basename = "llama-2-7b-chat.ggmlv3.q4_0.bin"
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer,\
just say that you don't know, don't try to make up an answer.
{context}
{history}
Question: {question}
Helpful Answer:"""
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=template)
memory = ConversationBufferMemory(input_key="question", memory_key="history")
llm = load_model(device_type, model_id=model_id, model_basename=model_basename)
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": prompt, "memory": memory},
)
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
# Get the answer from the chain
res = qa(query)
answer, docs = res["result"], res["source_documents"]
# Print the result
print("\n\n> Question:")
print(query)
print("\n> Answer:")
print(answer)
if show_sources: # this is a flag that you can set to disable showing answers.
# # Print the relevant sources used for the answer
print("----------------------------------SOURCE DOCUMENTS---------------------------")
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
print("----------------------------------SOURCE DOCUMENTS---------------------------")
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO
)
main()
| [
"t know the answer,just say that you don",
"question",
"context",
"Use the following pieces of context to answer the question at the end. If you don't know the answer,just say that you don't know, don't try to make up an answer.\n\n{context}\n\n{history}\nQuestion: {question}\nHelpful Answer:"
] |
2024-01-10 | prixingcha/localGPT_old | run_localGPT_API.py | import logging
import os
import shutil
import subprocess
import torch
# from auto_gptq import AutoGPTQForCausalLMc
from flask import Flask, jsonify, request
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
# from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import HuggingFacePipeline
from run_localGPT import load_model
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
LlamaForCausalLM,
LlamaTokenizer,
pipeline,
)
from werkzeug.utils import secure_filename
from constants import CHROMA_SETTINGS, EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY
DEVICE_TYPE = "cuda" if torch.cuda.is_available() else "cpu"
print(f"DEVICE_TYPE ==> {DEVICE_TYPE}")
SHOW_SOURCES = True
logging.info(f"Running on: {DEVICE_TYPE}")
logging.info(f"Display Source Documents set to: {SHOW_SOURCES}")
EMBEDDINGS = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": DEVICE_TYPE})
# uncomment the following line if you used HuggingFaceEmbeddings in the ingest.py
# EMBEDDINGS = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
if os.path.exists(PERSIST_DIRECTORY):
try:
shutil.rmtree(PERSIST_DIRECTORY)
except OSError as e:
print(f"Error: {e.filename} - {e.strerror}.")
else:
print("The directory does not exist")
run_langest_commands = ["python", "ingest.py"]
if DEVICE_TYPE == "cpu":
run_langest_commands.append("--device_type")
run_langest_commands.append(DEVICE_TYPE)
result = subprocess.run(run_langest_commands, capture_output=True)
if result.returncode != 0:
raise FileNotFoundError(
"No files were found inside SOURCE_DOCUMENTS, please put a starter file inside before starting the API!"
)
# load the vectorstore
DB = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=EMBEDDINGS,
client_settings=CHROMA_SETTINGS,
)
RETRIEVER = DB.as_retriever()
# for HF models
# model_id = "TheBloke/vicuna-7B-1.1-HF"
# model_id = "TheBloke/Wizard-Vicuna-7B-Uncensored-HF"
#psingh this is for enabling UI Aug-5-2023
model_id = "TheBloke/guanaco-7B-HF"
# model_id = 'NousResearch/Nous-Hermes-13b' # Requires ~ 23GB VRAM.
# Using STransformers alongside will 100% create OOM on 24GB cards.
LLM = load_model(device_type=DEVICE_TYPE, model_id=model_id)
# for GPTQ (quantized) models
# model_id = "TheBloke/Nous-Hermes-13B-GPTQ"
# model_basename = "nous-hermes-13b-GPTQ-4bit-128g.no-act.order"
# model_id = "TheBloke/WizardLM-30B-Uncensored-GPTQ"
# model_basename = "WizardLM-30B-Uncensored-GPTQ-4bit.act-order.safetensors"
# Requires ~21GB VRAM. Using STransformers alongside can potentially create OOM on 24GB cards.
# model_id = "TheBloke/wizardLM-7B-GPTQ"
# model_basename = "wizardLM-7B-GPTQ-4bit.compat.no-act-order.safetensors"
# model_id = "TheBloke/WizardLM-7B-uncensored-GPTQ"
# model_basename = "WizardLM-7B-uncensored-GPTQ-4bit-128g.compat.no-act-order.safetensors"
model_id = "TheBloke/Llama-2-7B-Chat-GGML"
model_basename = "llama-2-7b-chat.ggmlv3.q4_0.bin"
LLM = load_model(device_type=DEVICE_TYPE, model_id=model_id, model_basename=model_basename)
QA = RetrievalQA.from_chain_type(
llm=LLM, chain_type="stuff", retriever=RETRIEVER, return_source_documents=SHOW_SOURCES
)
app = Flask(__name__)
@app.route("/api/delete_source", methods=["GET"])
def delete_source_route():
folder_name = "SOURCE_DOCUMENTS"
if os.path.exists(folder_name):
shutil.rmtree(folder_name)
os.makedirs(folder_name)
return jsonify({"message": f"Folder '{folder_name}' successfully deleted and recreated."})
@app.route("/api/save_document", methods=["GET", "POST"])
def save_document_route():
if "document" not in request.files:
return "No document part", 400
file = request.files["document"]
if file.filename == "":
return "No selected file", 400
if file:
filename = secure_filename(file.filename)
folder_path = "SOURCE_DOCUMENTS"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_path = os.path.join(folder_path, filename)
file.save(file_path)
return "File saved successfully", 200
@app.route("/api/run_ingest", methods=["GET"])
def run_ingest_route():
global DB
global RETRIEVER
global QA
try:
if os.path.exists(PERSIST_DIRECTORY):
try:
shutil.rmtree(PERSIST_DIRECTORY)
except OSError as e:
print(f"Error: {e.filename} - {e.strerror}.")
else:
print("The directory does not exist")
run_langest_commands = ["python", "ingest.py"]
if DEVICE_TYPE == "cpu":
run_langest_commands.append("--device_type")
run_langest_commands.append(DEVICE_TYPE)
result = subprocess.run(run_langest_commands, capture_output=True)
if result.returncode != 0:
return "Script execution failed: {}".format(result.stderr.decode("utf-8")), 500
# load the vectorstore
DB = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=EMBEDDINGS,
client_settings=CHROMA_SETTINGS,
)
RETRIEVER = DB.as_retriever()
QA = RetrievalQA.from_chain_type(
llm=LLM, chain_type="stuff", retriever=RETRIEVER, return_source_documents=SHOW_SOURCES
)
return "Script executed successfully: {}".format(result.stdout.decode("utf-8")), 200
except Exception as e:
return f"Error occurred: {str(e)}", 500
@app.route("/api/prompt_route", methods=["GET", "POST"])
def prompt_route():
global QA
user_prompt = request.form.get("user_prompt")
if user_prompt:
# print(f'User Prompt: {user_prompt}')
# Get the answer from the chain
res = QA(user_prompt)
answer, docs = res["result"], res["source_documents"]
prompt_response_dict = {
"Prompt": user_prompt,
"Answer": answer,
}
prompt_response_dict["Sources"] = []
for document in docs:
prompt_response_dict["Sources"].append(
(os.path.basename(str(document.metadata["source"])), str(document.page_content))
)
return jsonify(prompt_response_dict), 200
else:
return "No user prompt received", 400
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO
)
app.run(debug=False, port=5110)
| [
"user_prompt",
"{'Prompt': PLACEHOLDER, 'Answer': PLACEHOLDER}"
] |
2024-01-10 | li-plus/chatglm.cpp | examples~langchain_client.py | from langchain.llms import ChatGLM
llm = ChatGLM(endpoint_url="http://127.0.0.1:8000", max_token=2048, top_p=0.7, temperature=0.95, with_history=False)
print(llm.predict("你好"))
| [] |
2024-01-10 | SAGAR-TAMANG/ChatGPT-Prompt-Engineering | index2.py | # This Shows A Normal Chat Messages From ChatGPT
import openai
openai.api_key = "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"
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
def get_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0, # this is the degree of randomness of the model's output
)
return response.choices[0].message["content"]
def get_completion_from_messages(messages, model="gpt-3.5-turbo", temperature=0):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature, # this is the degree of randomness of the model's output
)
# print(str(response.choices[0].message))
return response.choices[0].message["content"] | [] |
2024-01-10 | SAGAR-TAMANG/ChatGPT-Prompt-Engineering | ChatBot%20(OrderBot).py | # Creating our own ChatBot, known as Order Bot.
import openai
import panel as pn # GUI
pn.extension()
openai.api_key = "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"
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
panels = [] # collect display
def get_completion_from_messages(messages, model="gpt-3.5-turbo", temperature=0):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature, # this is the degree of randomness of the model's output
)
# print(str(response.choices[0].message))
return response.choices[0].message["content"]
def collect_messages(_):
prompt = inp.value_input
inp.value = ''
context.append({'role':'user', 'content':f"{prompt}"})
response = get_completion_from_messages(context)
context.append({'role':'assistant', 'content':f"{response}"})
panels.append(
pn.Row('User:', pn.pane.Markdown(prompt, width=600)))
panels.append(
pn.Row('Assistant:', pn.pane.Markdown(response, width=600, style={'background-color': '#F6F6F6'})))
return pn.Column(*panels)
context = [ {'role':'system', 'content':"""
You are OrderBot, an automated service to collect orders for a pizza restaurant. \
You first greet the customer, then collects the order, \
and then asks if it's a pickup or delivery. \
You wait to collect the entire order, then summarize it and check for a final \
time if the customer wants to add anything else. \
If it's a delivery, you ask for an address. \
Finally you collect the payment.\
Make sure to clarify all options, extras and sizes to uniquely \
identify the item from the menu.\
You respond in a short, very conversational friendly style. \
The menu includes \
pepperoni pizza 12.95, 10.00, 7.00 \
cheese pizza 10.95, 9.25, 6.50 \
eggplant pizza 11.95, 9.75, 6.75 \
fries 4.50, 3.50 \
greek salad 7.25 \
Toppings: \
extra cheese 2.00, \
mushrooms 1.50 \
sausage 3.00 \
canadian bacon 3.50 \
AI sauce 1.50 \
peppers 1.00 \
Drinks: \
coke 3.00, 2.00, 1.00 \
sprite 3.00, 2.00, 1.00 \
bottled water 5.00 \
"""} ] # accumulate messages
inp = pn.widgets.TextInput(value="Hi", placeholder='Enter text here…')
button_conversation = pn.widgets.Button(name="Chat!")
interactive_conversation = pn.bind(collect_messages, button_conversation)
dashboard = pn.Column(
inp,
pn.Row(button_conversation),
pn.panel(interactive_conversation, loading_indicator=True, height=300),
)
dashboard
# For creating a JSON file for our system to get the order summary
messages = context.copy()
messages.append(
{'role':'system', 'content':'create a json summary of the previous food order. Itemize the price for each item\
The fields should be 1) pizza, include size 2) list of toppings 3) list of drinks, include size 4) list of sides include size 5)total price '},
)
#The fields should be 1) pizza, price 2) list of toppings 3) list of drinks, include size include price 4) list of sides include size include price, 5)total price '},
response = get_completion_from_messages(messages, temperature=0)
print(response) | [
"PLACEHOLDER",
"create a json summary of the previous food order. Itemize the price for each item The fields should be 1) pizza, include size 2) list of toppings 3) list of drinks, include size 4) list of sides include size 5)total price ",
"\nYou are OrderBot, an automated service to collect orders for a pizza restaurant. You first greet the customer, then collects the order, and then asks if it's a pickup or delivery. You wait to collect the entire order, then summarize it and check for a final time if the customer wants to add anything else. If it's a delivery, you ask for an address. Finally you collect the payment.Make sure to clarify all options, extras and sizes to uniquely identify the item from the menu.You respond in a short, very conversational friendly style. The menu includes pepperoni pizza 12.95, 10.00, 7.00 cheese pizza 10.95, 9.25, 6.50 eggplant pizza 11.95, 9.75, 6.75 fries 4.50, 3.50 greek salad 7.25 Toppings: extra cheese 2.00, mushrooms 1.50 sausage 3.00 canadian bacon 3.50 AI sauce 1.50 peppers 1.00 Drinks: coke 3.00, 2.00, 1.00 sprite 3.00, 2.00, 1.00 bottled water 5.00 "
] |
2024-01-10 | SAGAR-TAMANG/ChatGPT-Prompt-Engineering | TheChatFormat.py | # This Shows A Normal Chat Messages From ChatGPT
import openai
openai.api_key = "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"
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
def get_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0, # this is the degree of randomness of the model's output
)
return response.choices[0].message["content"]
def get_completion_from_messages(messages, model="gpt-3.5-turbo", temperature=0):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature, # this is the degree of randomness of the model's output
)
# print(str(response.choices[0].message))
return response.choices[0].message["content"]
messages = [
{'role':'system', 'content':'You are an assistant that speaks like Shakespeare.'},
{'role':'user', 'content':'tell me a joke'},
{'role':'assistant', 'content':'Why did the chicken cross the road'},
{'role':'user', 'content':'I don\'t know'} ]
response = get_completion_from_messages(messages, temperature=1)
print(response)
print("\n********************************\n")
messages = [
{'role':'system', 'content':'You are friendly chatbot.'},
{'role':'user', 'content':'Hi, my name is Isa'},
{'role':'assistant', 'content': "Hi Isa! It's nice to meet you. \
Is there anything I can help you with today?"},
{'role':'user', 'content':'Yes, you can remind me, What is my name?'} ]
response = get_completion_from_messages(messages, temperature=1)
print(response) | [
"You are an assistant that speaks like Shakespeare.",
"Why did the chicken cross the road",
"Hi Isa! It's nice to meet you. Is there anything I can help you with today?",
"Hi, my name is Isa",
"tell me a joke",
"You are friendly chatbot.",
"I don't know",
"Yes, you can remind me, What is my name?"
] |
2024-01-10 | albertomendoza98/DASHapp | restapi~src~core~entities~tm_model.py | """This module is similar to the one available in the topicmodeler (https://github.com/IntelCompH2020/topicmodeler/blob/main/src/topicmodeling/manageModels.py). It provides a generic representation of all topic models used for curation purposes.
"""
import argparse
import json
import shutil
import sys
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
import scipy.sparse as sparse
from sparse_dot_topn import awesome_cossim_topn
class TMmodel(object):
# This class represents a Topic Model according to the LDA generative model
# Essentially the TM is characterized by
# _alphas: The weight of each topic
# _betas: The weight of each word in the vocabulary
# _thetas: The weight of each topic in each document
#
# and needs to be backed up with a folder in which all the associated
# files will be saved
#
# The TM can be trained with Blei's LDA, Mallet, or any other toolbox
# that produces a model according to this representation
# The following variables will store original values of matrices alphas, betas, thetas
# They will be used to reset the model to original values
_TMfolder = None
_betas_orig = None
_thetas_orig = None
_alphas_orig = None
_betas = None
_thetas = None
_alphas = None
_edits = None # Store all editions made to the model
_ntopics = None
_betas_ds = None
_coords = None
_topic_entropy = None
_topic_coherence = None
_ndocs_active = None
_tpc_descriptions = None
_tpc_labels = None
_vocab_w2id = None
_vocab_id2w = None
_vocab = None
_size_vocab = None
def __init__(self, TMfolder, logger=None):
"""Class initializer
We just need to make sure that we have a folder where the
model will be stored. If the folder does not exist, it will
create a folder for the model
Parameters
----------
TMfolder: Path
Contains the name of an existing folder or a new folder
where the model will be created
logger:
External logger to use. If None, a logger will be created for the object
"""
if logger:
self._logger = logger
else:
import logging
logging.basicConfig(level='INFO')
self._logger = logging.getLogger('TMmodel')
# Convert strings to Paths if necessary
self._TMfolder = Path(TMfolder)
# If folder already exists no further action is needed
# in other case, the folder is created
if not self._TMfolder.is_dir():
try:
self._TMfolder.mkdir(parents=True)
except:
self._logger.error(
'-- -- Topic model object (TMmodel) could not be created')
self._logger.info(
'-- -- -- Topic model object (TMmodel) successfully created')
def create(self, betas=None, thetas=None, alphas=None, vocab=None, labels=None):
"""Creates the topic model from the relevant matrices that characterize it. In addition to the initialization of the corresponding object's variables, all the associated variables and visualizations which are computationally costly are calculated so they are available for the other methods.
Parameters
----------
betas:
Matrix of size n_topics x n_words (vocab of each topic)
thetas:
Matrix of size n_docs x n_topics (document composition)
alphas:
Vector of length n_topics containing the importance of each topic
vocab: list
List of words sorted according to betas matrix
labels: list
List of labels for automatic topic labeling
"""
# If folder already exists no further action is needed
# in other case, the folder is created
if not self._TMfolder.is_dir():
self._logger.error(
'-- -- Topic model object (TMmodel) folder not ready')
return
self._alphas_orig = alphas
self._betas_orig = betas
self._thetas_orig = thetas
self._alphas = alphas
self._betas = betas
self._thetas = thetas
self._vocab = vocab
self._size_vocab = len(vocab)
self._ntopics = thetas.shape[1]
self._edits = []
# Save original variables
np.save(self._TMfolder.joinpath('alphas_orig.npy'), alphas)
np.save(self._TMfolder.joinpath('betas_orig.npy'), betas)
sparse.save_npz(self._TMfolder.joinpath('thetas_orig.npz'), thetas)
with self._TMfolder.joinpath('vocab.txt').open('w', encoding='utf8') as fout:
fout.write('\n'.join(vocab))
# Initial sort of topics according to size. Calculate other variables
self._sort_topics()
self._calculate_beta_ds()
self._calculate_topic_entropy()
self._ndocs_active = np.array((self._thetas != 0).sum(0).tolist()[0])
self._tpc_descriptions = [el[1]
for el in self.get_tpc_word_descriptions()]
self.calculate_topic_coherence() # cohrs_aux
self._tpc_labels = [el[1] for el in self.get_tpc_labels(labels)]
# We are ready to save all variables in the model
self._save_all()
self._logger.info(
'-- -- Topic model variables were computed and saved to file')
return
def _save_all(self):
"""Saves all variables in Topic Model
* alphas, betas, thetas
* edits
* betas_ds, topic_entropy, ndocs_active
* tpc_descriptions, tpc_labels
This function should only be called after making sure all these
variables exist and are not None
"""
np.save(self._TMfolder.joinpath('alphas.npy'), self._alphas)
np.save(self._TMfolder.joinpath('betas.npy'), self._betas)
sparse.save_npz(self._TMfolder.joinpath('thetas.npz'), self._thetas)
with self._TMfolder.joinpath('edits.txt').open('w', encoding='utf8') as fout:
fout.write('\n'.join(self._edits))
np.save(self._TMfolder.joinpath('betas_ds.npy'), self._betas_ds)
np.save(self._TMfolder.joinpath(
'topic_entropy.npy'), self._topic_entropy)
np.save(self._TMfolder.joinpath(
'topic_coherence.npy'), self._topic_coherence)
np.save(self._TMfolder.joinpath(
'ndocs_active.npy'), self._ndocs_active)
with self._TMfolder.joinpath('tpc_descriptions.txt').open('w', encoding='utf8') as fout:
fout.write('\n'.join(self._tpc_descriptions))
with self._TMfolder.joinpath('tpc_labels.txt').open('w', encoding='utf8') as fout:
fout.write('\n'.join(self._tpc_labels))
# Generate also pyLDAvisualization
# pyLDAvis currently raises some Deprecation warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyLDAvis
# We will compute the visualization using ndocs random documents
# In case the model has gone through topic deletion, we may have rows
# in the thetas matrix that sum up to zero (active topics have been
# removed for these problematic documents). We need to take this into
# account
ndocs = 10000
validDocs = np.sum(self._thetas.toarray(), axis=1) > 0
nValidDocs = np.sum(validDocs)
if ndocs > nValidDocs:
ndocs = nValidDocs
perm = np.sort(np.random.permutation(nValidDocs)[:ndocs])
# We consider all documents are equally important
doc_len = ndocs * [1]
vocabfreq = np.round(ndocs*(self._alphas.dot(self._betas))).astype(int)
vis_data = pyLDAvis.prepare(
self._betas,
self._thetas[validDocs, ][perm, ].toarray(),
doc_len,
self._vocab,
vocabfreq,
lambda_step=0.05,
sort_topics=False,
n_jobs=-1)
# Save html
with self._TMfolder.joinpath("pyLDAvis.html").open("w") as f:
pyLDAvis.save_html(vis_data, f)
# TODO: Check substituting by "pyLDAvis.prepared_data_to_html"
self._modify_pyldavis_html(self._TMfolder.as_posix())
# Get coordinates of topics in the pyLDAvis visualization
vis_data_dict = vis_data.to_dict()
self._coords = list(
zip(*[vis_data_dict['mdsDat']['x'], vis_data_dict['mdsDat']['y']]))
with self._TMfolder.joinpath('tpc_coords.txt').open('w', encoding='utf8') as fout:
for item in self._coords:
fout.write(str(item) + "\n")
return
def _save_cohr(self):
np.save(self._TMfolder.joinpath(
'topic_coherence.npy'), self._topic_coherence)
def _modify_pyldavis_html(self, model_dir):
"""
Modifies the PyLDAvis HTML file returned by the Gensim library to include the direct paths of the 'd3.js' and 'ldavis.v3.0.0.js', which are copied into the model/submodel directory.
Parameters
----------
model_dir: str
String representation of the path wwhere the model/submodel is located
"""
# Copy necessary files in model / submodel folder for PyLDAvis visualization
d3 = Path("src/gui/resources/d3.js")
v3 = Path("src/gui/resources/ldavis.v3.0.0.js")
shutil.copyfile(d3, Path(model_dir, "d3.js"))
shutil.copyfile(v3, Path(model_dir, "ldavis.v3.0.0.js"))
# Update d3 and v3 paths in pyldavis.html
fin = open(Path(model_dir, "pyLDAvis.html").as_posix(),
"rt") # read input file
data = fin.read() # read file contents to string
# Replace all occurrences of the required string
data = data.replace(
"https://d3js.org/d3.v5.js", "d3.js")
data = data.replace(
"https://d3js.org/d3.v5", "d3.js")
data = data.replace(
"https://cdn.jsdelivr.net/gh/bmabey/[email protected]/pyLDAvis/js/ldavis.v3.0.0.js", "ldavis.v3.0.0.js")
fin.close() # close the input file
fin = open(Path(model_dir, "pyLDAvis.html").as_posix(),
"wt") # open the input file in write mode
fin.write(data) # overrite the input file with the resulting data
fin.close() # close the file
return
def _sort_topics(self):
"""Sort topics according to topic size"""
# Load information if necessary
self._load_alphas()
self._load_betas()
self._load_thetas()
self._load_edits()
# Indexes for topics reordering
idx = np.argsort(self._alphas)[::-1]
self._edits.append('s ' + ' '.join([str(el) for el in idx]))
# Sort data matrices
self._alphas = self._alphas[idx]
self._betas = self._betas[idx, :]
self._thetas = self._thetas[:, idx]
return
def _load_alphas(self):
if self._alphas is None:
self._alphas = np.load(self._TMfolder.joinpath('alphas.npy'))
self._ntopics = self._alphas.shape[0]
def _load_betas(self):
if self._betas is None:
self._betas = np.load(self._TMfolder.joinpath('betas.npy'))
self._ntopics = self._betas.shape[0]
self._size_vocab = self._betas.shape[1]
def _load_thetas(self):
if self._thetas is None:
self._thetas = sparse.load_npz(
self._TMfolder.joinpath('thetas.npz'))
self._ntopics = self._thetas.shape[1]
# self._ndocs_active = np.array((self._thetas != 0).sum(0).tolist()[0])
def _load_ndocs_active(self):
if self._ndocs_active is None:
self._ndocs_active = np.load(
self._TMfolder.joinpath('ndocs_active.npy'))
self._ntopics = self._ndocs_active.shape[0]
def _load_edits(self):
if self._edits is None:
with self._TMfolder.joinpath('edits.txt').open('r', encoding='utf8') as fin:
self._edits = fin.readlines()
def _calculate_beta_ds(self):
"""Calculates beta with down-scoring
Emphasizes words appearing less frequently in topics
"""
# Load information if necessary
self._load_betas()
self._betas_ds = np.copy(self._betas)
if np.min(self._betas_ds) < 1e-12:
self._betas_ds += 1e-12
deno = np.reshape((sum(np.log(self._betas_ds)) /
self._ntopics), (self._size_vocab, 1))
deno = np.ones((self._ntopics, 1)).dot(deno.T)
self._betas_ds = self._betas_ds * (np.log(self._betas_ds) - deno)
def _load_betas_ds(self):
if self._betas_ds is None:
self._betas_ds = np.load(self._TMfolder.joinpath('betas_ds.npy'))
self._ntopics = self._betas_ds.shape[0]
self._size_vocab = self._betas_ds.shape[1]
def _load_vocab(self):
if self._vocab is None:
with self._TMfolder.joinpath('vocab.txt').open('r', encoding='utf8') as fin:
self._vocab = [el.strip() for el in fin.readlines()]
def _load_vocab_dicts(self):
"""Creates two vocabulary dictionaries, one that utilizes the words as key, and a second one with the words' id as key.
"""
if self._vocab_w2id is None and self._vocab_w2id is None:
self._vocab_w2id = {}
self._vocab_id2w = {}
with self._TMfolder.joinpath('vocab.txt').open('r', encoding='utf8') as fin:
for i, line in enumerate(fin):
wd = line.strip()
self._vocab_w2id[wd] = i
self._vocab_id2w[str(i)] = wd
def _calculate_topic_entropy(self):
"""Calculates the entropy of all topics in model
"""
# Load information if necessary
self._load_betas()
if np.min(self._betas) < 1e-12:
self._betas += 1e-12
self._topic_entropy = - \
np.sum(self._betas * np.log(self._betas), axis=1)
self._topic_entropy = self._topic_entropy / np.log(self._size_vocab)
def _load_topic_entropy(self):
if self._topic_entropy is None:
self._topic_entropy = np.load(
self._TMfolder.joinpath('topic_entropy.npy'))
def calculate_topic_coherence(self, metrics=["c_v", "c_npmi"], n_words=15, only_one=True):
# Load topic information
if self._tpc_descriptions is None:
self._tpc_descriptions = [el[1]
for el in self.get_tpc_word_descriptions()]
# Convert topic information into list of lists
tpc_descriptions_ = \
[tpc.split(', ') for tpc in self._tpc_descriptions]
# Get texts to calculate coherence
if self._TMfolder.parent.joinpath('modelFiles/corpus.txt').is_file():
corpusFile = self._TMfolder.parent.joinpath(
'modelFiles/corpus.txt')
else:
corpusFile = self._TMfolder.parent.joinpath('corpus.txt')
with corpusFile.open("r", encoding="utf-8") as f:
corpus = [line.rsplit(" 0 ")[1].strip().split() for line in f.readlines(
) if line.rsplit(" 0 ")[1].strip().split() != []]
# Import necessary modules for coherence calculation with Gensim
# TODO: This needs to be substituted by a non-Gensim based calculation of the coherence
from gensim.corpora import Dictionary
from gensim.models.coherencemodel import CoherenceModel
# Get Gensim dictionary
dictionary = None
if self._TMfolder.parent.joinpath('dictionary.gensim').is_file():
try:
dictionary = Dictionary.load_from_text(
self._TMfolder.parent.joinpath('dictionary.gensim').as_posix())
except:
self._logger.warning(
"Gensim dictionary could not be load from vocabulary file.")
else:
if dictionary is None:
dictionary = Dictionary(corpus)
if n_words > len(tpc_descriptions_[0]):
self.logger.error(
'-- -- -- Coherence calculation failed: The number of words per topic must be equal to n_words.')
else:
if only_one:
metric = metrics[0]
self._logger.info(
f"Calculating just coherence {metric}.")
if metric in ["c_npmi", "u_mass", "c_v", "c_uci"]:
cm = CoherenceModel(topics=tpc_descriptions_, texts=corpus,
dictionary=dictionary, coherence=metric, topn=n_words)
self._topic_coherence = cm.get_coherence_per_topic()
else:
self.logger.error(
'-- -- -- Coherence metric provided is not available.')
else:
cohrs_aux = []
for metric in metrics:
self._logger.info(
f"Calculating coherence {metric}.")
if metric in ["c_npmi", "u_mass", "c_v", "c_uci"]:
cm = CoherenceModel(topics=tpc_descriptions_, texts=corpus,
dictionary=dictionary, coherence=metric, topn=n_words)
aux = cm.get_coherence_per_topic()
cohrs_aux.extend(aux)
self._logger.info(cohrs_aux)
else:
self.logger.error(
'-- -- -- Coherence metric provided is not available.')
self._topic_coherence = cohrs_aux
def _load_topic_coherence(self):
if self._topic_coherence is None:
self._topic_coherence = np.load(
self._TMfolder.joinpath('topic_coherence.npy'))
def _calculate_sims(self, topn=50, lb=0):
if self._thetas is None:
self._load_thetas()
thetas_sqrt = np.sqrt(self._thetas)
thetas_col = thetas_sqrt.T
self._sims = awesome_cossim_topn(thetas_sqrt, thetas_col, topn, lb)
def _load_sims(self):
if self._sims is None:
self._sims = sparse.load_npz(
self._TMfolder.joinpath('distances.npz'))
def _largest_indices(self, ary, n):
"""Returns the n largest indices from a numpy array."""
flat = ary.flatten()
indices = np.argpartition(flat, -n)[-n:]
indices = indices[np.argsort(-flat[indices])]
idx0, idx1 = np.unravel_index(indices, ary.shape)
idx0 = idx0.tolist()
idx1 = idx1.tolist()
selected_idx = []
for id0, id1 in zip(idx0, idx1):
if id0 < id1:
selected_idx.append((id0, id1, ary[id0, id1]))
return selected_idx
def get_model_info_for_hierarchical(self):
"""Returns the objects necessary for the creation of a level-2 topic model.
"""
self._load_betas()
self._load_thetas()
self._load_vocab_dicts()
return self._betas, self._thetas, self._vocab_w2id, self._vocab_id2w
def get_model_info_for_vis(self):
self._load_alphas()
self._load_betas()
self._load_thetas()
self._load_vocab()
self.load_tpc_coords()
return self._alphas, self._betas, self._thetas, self._vocab, self._coords
def get_tpc_word_descriptions(self, n_words=15, tfidf=True, tpc=None):
"""returns the chemical description of topics
Parameters
----------
n_words:
Number of terms for each topic that will be included
tfidf:
If true, downscale the importance of words that appear
in several topics, according to beta_ds (Blei and Lafferty, 2009)
tpc:
Topics for which the descriptions will be computed, e.g.: tpc = [0,3,4]
If None, it will compute the descriptions for all topics
Returns
-------
tpc_descs: list of tuples
Each element is a a term (topic_id, "word0, word1, ...")
"""
# Load betas (including n_topics) and vocabulary
if tfidf:
self._load_betas_ds()
else:
self._load_betas()
self._load_vocab()
if not tpc:
tpc = range(self._ntopics)
tpc_descs = []
for i in tpc:
if tfidf:
words = [self._vocab[idx2]
for idx2 in np.argsort(self._betas_ds[i])[::-1][0:n_words]]
else:
words = [self._vocab[idx2]
for idx2 in np.argsort(self._betas[i])[::-1][0:n_words]]
tpc_descs.append((i, ', '.join(words)))
return tpc_descs
def load_tpc_descriptions(self):
if self._tpc_descriptions is None:
with self._TMfolder.joinpath('tpc_descriptions.txt').open('r', encoding='utf8') as fin:
self._tpc_descriptions = [el.strip() for el in fin.readlines()]
def get_tpc_labels(self, labels=None, use_cuda=True):
"""returns the labels of the topics in the model
Parameters
----------
labels: list
List of labels for automatic topic labeling
use_cuda: bool
If True, use cuda.
Returns
-------
tpc_labels: list of tuples
Each element is a a term (topic_id, "label for topic topic_id")
"""
if not labels:
return [(i, "NA") for i, p in enumerate(self._tpc_descriptions)]
if use_cuda:
import torch
if torch.cuda.is_available():
device = 0
self._logger.info("-- -- CUDA available: GPU will be used")
else:
device = -1
self._logger.warning(
"-- -- 'use_cuda' set to True when cuda is unavailable."
"Make sure CUDA is available or set 'use_cuda=False'"
)
self._logger.info(
"-- -- CUDA unavailable: GPU will not be used")
else:
device = -1
self._logger.info("-- -- CUDA unavailable: GPU will not be used")
from transformers import pipeline
classifier = pipeline("zero-shot-classification",
model="facebook/bart-large-mnli",
device=device)
predictions = classifier(self._tpc_descriptions, labels)
predictions = [(i, p["labels"][0]) for i, p in enumerate(predictions)]
return predictions
def load_tpc_labels(self):
if self._tpc_labels is None:
with self._TMfolder.joinpath('tpc_labels.txt').open('r', encoding='utf8') as fin:
self._tpc_labels = [el.strip() for el in fin.readlines()]
def load_tpc_coords(self):
if self._coords is None:
with self._TMfolder.joinpath('tpc_coords.txt').open('r', encoding='utf8') as fin:
# read the data from the file and convert it back to a list of tuples
self._coords = \
[tuple(map(float, line.strip()[1:-1].split(', ')))
for line in fin]
def get_alphas(self):
self._load_alphas()
return self._alphas
def showTopics(self):
self._load_alphas()
self._load_ndocs_active()
self.load_tpc_descriptions()
self.load_tpc_labels()
TpcsInfo = [{"Size": str(round(el[0], 4)), "Label": el[1].strip(), "Word Description": el[2].strip(), "Ndocs Active": str(el[3])} for el in zip(
self._alphas, self._tpc_labels, self._tpc_descriptions, self._ndocs_active)]
return TpcsInfo
def showTopicsAdvanced(self):
self._load_alphas()
self._load_ndocs_active()
self.load_tpc_descriptions()
self.load_tpc_labels()
self._load_topic_entropy()
self._load_topic_coherence()
TpcsInfo = [{"Size": str(round(el[0], 4)), "Label": el[1].strip(), "Word Description": el[2].strip(), "Ndocs Active": str(el[3]), "Topics entropy": str(round(
el[4], 4)), "Topics coherence": str(round(el[5], 4))} for el in zip(self._alphas, self._tpc_labels, self._tpc_descriptions, self._ndocs_active, self._topic_entropy, self._topic_coherence)]
return TpcsInfo
def setTpcLabels(self, TpcLabels):
self._tpc_labels = [el.strip() for el in TpcLabels]
self._load_alphas()
# Check that the number of labels is consistent with model
if len(TpcLabels) == self._ntopics:
with self._TMfolder.joinpath('tpc_labels.txt').open('w', encoding='utf8') as fout:
fout.write('\n'.join(self._tpc_labels))
return 1
else:
return 0
def deleteTopics(self, tpcs):
"""This is a costly operation, almost everything
needs to get modified"""
self._load_alphas()
self._load_betas()
self._load_thetas()
self._load_betas_ds()
self._load_topic_entropy()
self._load_topic_coherence()
self.load_tpc_descriptions()
self.load_tpc_labels()
self._load_ndocs_active()
self._load_edits()
self._load_vocab()
try:
# Get a list of the topics that should be kept
tpc_keep = [k for k in range(self._ntopics) if k not in tpcs]
tpc_keep = [k for k in tpc_keep if k < self._ntopics]
# Calculate new variables
self._thetas = self._thetas[:, tpc_keep]
from sklearn.preprocessing import normalize
self._thetas = normalize(self._thetas, axis=1, norm='l1')
self._alphas = np.asarray(np.mean(self._thetas, axis=0)).ravel()
self._ntopics = self._thetas.shape[1]
self._betas = self._betas[tpc_keep, :]
self._betas_ds = self._betas_ds[tpc_keep, :]
self._ndocs_active = self._ndocs_active[tpc_keep]
self._topic_entropy = self._topic_entropy[tpc_keep]
self._topic_coherence = self._topic_coherence[tpc_keep]
self._tpc_labels = [self._tpc_labels[i] for i in tpc_keep]
self._tpc_descriptions = [
self._tpc_descriptions[i] for i in tpc_keep]
self._edits.append('d ' + ' '.join([str(k) for k in tpcs]))
# We are ready to save all variables in the model
self._save_all()
self._logger.info(
'-- -- Topics deletion successful. All variables saved to file')
return 1
except:
self._logger.info(
'-- -- Topics deletion generated an error. Operation failed')
return 0
def getSimilarTopics(self, npairs, thr=1e-3):
"""Obtains pairs of similar topics
npairs: number of pairs of words
thr: threshold for vocabulary thresholding
"""
self._load_thetas()
self._load_betas()
# Part 1 - Coocurring topics
# Highly correlated topics co-occure together
# Topic mean
med = np.asarray(np.mean(self._thetas, axis=0)).ravel()
# Topic square mean
thetas2 = self._thetas.multiply(self._thetas)
med2 = np.asarray(np.mean(thetas2, axis=0)).ravel()
# Topic stds
stds = np.sqrt(med2 - med ** 2)
# Topic correlation
num = self._thetas.T.dot(
self._thetas).toarray() / self._thetas.shape[0]
num = num - med[..., np.newaxis].dot(med[np.newaxis, ...])
deno = stds[..., np.newaxis].dot(stds[np.newaxis, ...])
corrcoef = num / deno
selected_coocur = self._largest_indices(
corrcoef, self._ntopics + 2 * npairs)
selected_coocur = [(el[0], el[1], el[2].astype(float))
for el in selected_coocur]
# Part 2 - Topics with similar word composition
# Computes inter-topic distance based on word distributions
# using scipy implementation of Jensen Shannon distance
from scipy.spatial.distance import jensenshannon
# For a more efficient computation with very large vocabularies
# we implement a threshold for restricting the distance calculation
# to columns where any element is greater than threshold thr
betas_aux = self._betas[:, np.where(self._betas.max(axis=0) > thr)[0]]
js_mat = np.zeros((self._ntopics, self._ntopics))
for k in range(self._ntopics):
for kk in range(self._ntopics):
js_mat[k, kk] = jensenshannon(
betas_aux[k, :], betas_aux[kk, :])
JSsim = 1 - js_mat
selected_worddesc = self._largest_indices(
JSsim, self._ntopics + 2 * npairs)
selected_worddesc = [(el[0], el[1], el[2].astype(float))
for el in selected_worddesc]
similarTopics = {
'Coocurring': selected_coocur,
'Worddesc': selected_worddesc
}
return similarTopics
def fuseTopics(self, tpcs):
"""This is a costly operation, almost everything
needs to get modified"""
self._load_alphas()
self._load_betas()
self._load_thetas()
# self._load_topic_coherence()
self.load_tpc_descriptions()
self.load_tpc_labels()
self._load_edits()
self._load_vocab()
try:
# List of topics that will be merged
tpcs = sorted(tpcs)
# Calculate new variables
# For beta we keep a weighted average of topic vectors
weights = self._alphas[tpcs]
bet = weights[np.newaxis, ...].dot(
self._betas[tpcs, :]) / (sum(weights))
# keep new topic vector in upper position and delete the others
self._betas[tpcs[0], :] = bet
self._betas = np.delete(self._betas, tpcs[1:], 0)
# For theta we need to keep the sum. Since adding implies changing
# structure, we need to convert to full matrix first
# No need to renormalize
thetas_full = self._thetas.toarray()
thet = np.sum(thetas_full[:, tpcs], axis=1)
thetas_full[:, tpcs[0]] = thet
thetas_full = np.delete(thetas_full, tpcs[1:], 1)
self._thetas = sparse.csr_matrix(thetas_full, copy=True)
# Compute new alphas and number of topics
self._alphas = np.asarray(np.mean(self._thetas, axis=0)).ravel()
self._ntopics = self._thetas.shape[1]
# Compute all other variables
self._calculate_beta_ds()
self._calculate_topic_entropy()
self._ndocs_active = np.array(
(self._thetas != 0).sum(0).tolist()[0])
# Keep label and description of most significant topic
for tpc in tpcs[1:][::-1]:
del self._tpc_descriptions[tpc]
# Recalculate chemical description of most significant topic
self._tpc_descriptions[tpcs[0]] = self.get_tpc_word_descriptions(tpc=[tpcs[0]])[
0][1]
for tpc in tpcs[1:][::-1]:
del self._tpc_labels[tpc]
self.calculate_topic_coherence()
self._edits.append('f ' + ' '.join([str(el) for el in tpcs]))
# We are ready to save all variables in the model
self._save_all()
self._logger.info(
'-- -- Topics merging successful. All variables saved to file')
return 1
except:
self._logger.info(
'-- -- Topics merging generated an error. Operation failed')
return 0
def sortTopics(self):
"""This is a costly operation, almost everything
needs to get modified"""
self._load_alphas()
self._load_betas()
self._load_thetas()
self._load_betas_ds()
self._load_topic_entropy()
self._load_topic_coherence()
self.load_tpc_descriptions()
self.load_tpc_labels()
self._load_ndocs_active()
self._load_edits()
self._load_vocab()
try:
# Calculate order for the topics
idx = np.argsort(self._alphas)[::-1]
self._edits.append('s ' + ' '.join([str(el) for el in idx]))
# Calculate new variables
self._thetas = self._thetas[:, idx]
self._alphas = self._alphas[idx]
self._betas = self._betas[idx, :]
self._betas_ds = self._betas_ds[idx, :]
self._ndocs_active = self._ndocs_active[idx]
self._topic_entropy = self._topic_entropy[idx]
self._topic_coherence = self._topic_coherence[idx]
self._tpc_labels = [self._tpc_labels[i] for i in idx]
self._tpc_descriptions = [self._tpc_descriptions[i] for i in idx]
self._edits.append('s ' + ' '.join([str(el) for el in idx]))
# We are ready to save all variables in the model
self._save_all()
self._logger.info(
'-- -- Topics reordering successful. All variables saved to file')
return 1
except:
self._logger.info(
'-- -- Topics reordering generated an error. Operation failed')
return 0
def resetTM(self):
self._alphas_orig = np.load(self._TMfolder.joinpath('alphas_orig.npy'))
self._betas_orig = np.load(self._TMfolder.joinpath('betas_orig.npy'))
self._thetas_orig = sparse.load_npz(
self._TMfolder.joinpath('thetas_orig.npz'))
self._load_vocab()
try:
self.create(betas=self._betas_orig, thetas=self._thetas_orig,
alphas=self._alphas_orig, vocab=self._vocab)
return 1
except:
return 0
def recalculate_cohrs(self):
self.load_tpc_descriptions()
try:
self.calculate_topic_coherence()
self._save_cohr()
self._logger.info(
'-- -- Topics cohrence recalculation successful. All variables saved to file')
return 1
except:
self._logger.info(
'-- -- Topics cohrence recalculation an error. Operation failed')
return 0
def to_dataframe(self):
self._load_alphas()
self._load_betas()
self._load_thetas()
self._load_betas_ds()
self._load_topic_entropy()
self._load_topic_coherence()
self.load_tpc_descriptions()
self.load_tpc_labels()
self._load_ndocs_active()
self._load_vocab()
self._load_vocab_dicts()
data = {
"betas": [self._betas],
"alphas": [self._alphas],
"topic_entropy": [self._topic_entropy],
"topic_coherence": [self._topic_coherence],
"ndocs_active": [self._ndocs_active],
"tpc_descriptions": [self._tpc_descriptions],
"tpc_labels": [self._tpc_labels],
}
df = pd.DataFrame(data)
return df, self._vocab_id2w
| [] |
2024-01-10 | FaRm3rKing/AI-Hackathon2023 | utils~common_utils.py | import datetime
import glob
import os
import re
from pathlib import Path
from typing import Any, List, Optional, Tuple
from langchain.chat_models import AzureChatOpenAI
def get_azure_chatbot(
openai_api_key: Optional[str] = None,
deployment_name: Optional[str] = None,
openai_api_type: Optional[str] = None,
openai_api_base: Optional[str] = None,
openai_api_version: Optional[str] = None,
**kwargs: Any,
) -> AzureChatOpenAI:
"""
Create an instance of AzureChatOpenAI.
Usage:
azure_chat_openai = get_azure_chatbot()
Args:
openai_api_key: The OpenAI API key. If not provided, the method will try to get it from the
environment variable OPENAI_API_KEY.
deployment_name: The name of the deployment. If not provided, the method will try to get it
from the environment variable DEPLOYMENT_NAME.
openai_api_type: The type of the OpenAI API. If not provided, the method will try to get it
from the environment variable OPENAI_API_TYPE.
openai_api_base: The base of the OpenAI API. If not provided, the method will try to get it
from the environment variable OPENAI_API_BASE.
openai_api_version: The version of the OpenAI API. If not provided, the method will try to
get it from the environment variable OPENAI_API_VERSION.
kwargs: Other optional parameters.
Returns:
An instance of AzureChatOpenAI.
"""
openai_api_key = openai_api_key or os.getenv("OPENAI_API_KEY")
if not openai_api_key:
raise ValueError(
"openai_api_key is required. Please provide it as an argument or set the environment"
" variable OPENAI_API_KEY."
)
deployment_name = deployment_name or os.getenv("DEPLOYMENT_NAME")
if not deployment_name:
raise ValueError(
"deployment_name is required. Please provide it as an argument or set the environment"
" variable DEPLOYMENT_NAME."
)
openai_api_type = openai_api_type or os.getenv("OPENAI_API_TYPE") or "azure"
openai_api_base = openai_api_base or os.getenv("OPENAI_API_BASE")
if not openai_api_base:
raise ValueError(
"openai_api_base is required. Please provide it as an argument or set the environment"
" variable OPENAI_API_BASE."
)
openai_api_version = openai_api_version or os.getenv("OPENAI_API_VERSION")
if not openai_api_version:
raise ValueError(
"openai_api_version is required. Please provide it as an argument or set the environment"
" variable OPENAI_API_VERSION."
)
return AzureChatOpenAI(
deployment_name=deployment_name,
openai_api_type=openai_api_type,
openai_api_base=openai_api_base,
openai_api_version=openai_api_version,
openai_api_key=openai_api_key,
**kwargs,
)
def read_files_from_directory(directory_path: str) -> List[Tuple[str, str]]:
"""
This function reads all the .txt files from the specified directory path,
and returns a list of tuples, where each tuple contains the filename and its corresponding content.
Args:
directory_path (str): The path to the directory containing the .txt files.
Returns:
file_content_list (list): A list of tuples, where each tuple has the filename as the first element and
the corresponding content as the second element.
"""
file_content_list = []
txt_files = glob.glob(os.path.join(directory_path, "*.txt"))
for txt_file in txt_files:
with open(txt_file, "r", encoding="utf-8") as file:
data = file.read()
# Add a tuple (filename, content) to the list
file_content_list.append((os.path.basename(txt_file), data))
return file_content_list
def extract_folder_and_name_from_path(source_var: str, default_path: str) -> (str, str):
"""
Extract and return the folder path and file name from a given source variable (can be an environment variable or regular variable).
Args:
- source_var (str): The name of the environment variable to fetch or directly a string path.
- default_path (str): The default path to use if the environment variable is not set or the source_var doesn't contain a valid path.
Returns:
- folder_path (str): The folder path extracted from the provided source as a string.
- file_name (str): The file name extracted from the provided source.
"""
# If source_var is an environment variable name, fetch its value; otherwise use it directly as a path.
file_path = os.environ.get(source_var, None)
if not file_path:
file_path = default_path
folder_path = str(Path(file_path).parent)
file_name = Path(file_path).name
return folder_path, file_name
def compute_cost(prompt: int, completion: int) -> float:
"""
Computes the cost for Azure OpenAI based on input tokens and output tokens.
Args:
- prompt (int): Number of input tokens.
- completion (int): Number of output tokens.
Returns:
- float: Computed cost.
"""
prompt_cost_per_1000 = 0.003
completion_cost_per_1000 = 0.004
prompt_cost = (prompt / 1000) * prompt_cost_per_1000
completion_cost = (completion / 1000) * completion_cost_per_1000
return prompt_cost + completion_cost
def find_estimate_used_cost_filename() -> str:
"""
Finds the filename that matches the pattern "estimate_used_cost_$..." in the current directory.
If not found, creates a new file with value $0.
Returns:
- str: Filename that matches the pattern.
"""
for filename in os.listdir("."):
if "estimate_used_cost_$" in filename:
return filename
# If not found, create a new file with $0.0000
new_filename = "./estimate_used_cost_$0.0000"
with open(new_filename, "w") as file:
file.write("Initial creation with $0.0000\nOnly calculates costs executed locally, may not be accurate!\n")
return new_filename
def update_estimate_used_cost_file(prompt: int, completion: int) -> None:
"""
Updates the estimate used cost file by adding the new computed cost.
Args:
- prompt (int): Number of input tokens.
- completion (int): Number of output tokens.
Returns:
- None
"""
# Compute the cost
cost = compute_cost(prompt, completion)
# Find the file with the required pattern
filename = find_estimate_used_cost_filename()
# Extract current value from filename
match = re.search(r"estimate_used_cost_\$(\d+\.\d{4})", filename)
if match:
current_value = float(match.group(1))
new_value = current_value + cost
else:
raise ValueError("Could not extract current value from filename.")
# Rename the file with the new cost
os.rename(filename, f"./estimate_used_cost_${new_value:.4f}")
# Write the transaction details into the file
with open(f"./estimate_used_cost_${new_value:.4f}", "a") as file:
file.write(
f"{datetime.datetime.now()} prompt token: {prompt} completion token: {completion} cost: ${cost:.4f}\n"
)
| [
"0.003"
] |
2024-01-10 | FaRm3rKing/AI-Hackathon2023 | utils~question_solver.py | import json
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage
from utils.common_utils import get_azure_chatbot
from utils.log import logger
QUESTION_SOLVER_PROMPT_PATH = r"./prompt/question_solver_prompt.txt"
class QuestionSolver:
def __init__(self) -> None:
self.chatbot = get_azure_chatbot(request_timeout=20)
with open(QUESTION_SOLVER_PROMPT_PATH, "r") as file:
question_solver_prompt = file.read()
self.prompt_template = question_solver_prompt
def solve(self, program_requirement: str) -> dict:
"""
This function accepts a programming requirement as an input, and uses a chatbot to generate a solution.
The solution is then parsed and returned in a dictionary format.
Parameters:
program_requirement (str): A string that describes the programming requirement.
Returns:
result (dict): A dictionary containing two keys - 'thought' and 'solution_code'.
The 'thought' key contains the thought process behind the solution,
while the 'solution_code' key contains the actual code for the solution.
Raises:
ValueError: If the response from ChatGPT is in the wrong format, the function will raise a ValueError.
"""
prompt = PromptTemplate(
template=self.prompt_template, input_variables=["program_requirement"],
)
input = prompt.format_prompt(program_requirement=program_requirement)
logger.debug("Input prompt\n" + input.to_string())
messages = [HumanMessage(content=input.to_string())]
response = self.chatbot(messages).content
logger.debug("ChatGPT response:\n" + str(response))
try:
result = json.loads(response)
logger.debug("Parsed result:\n" + str(result))
except Exception:
raise ValueError(
"ChatGPT's response is in wrong format, please try again or adjust the prompt."
)
return result
| [
"./prompt/question_solver_prompt.txt",
"program_requirement"
] |
2024-01-10 | micahkepe/custom_assistant2.0 | custom_assistant.py | import speech_recognition as sr
import openai
import PySimpleGUI as sg
from elevenlabslib import *
from pathlib import Path
from configparser import ConfigParser
import requests
from dotenv import load_dotenv
import os
import logging
import json
import time
# Load OpenAI and ElevenLabs keys from .env file
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
ELEVENLABS_KEY = os.getenv("ELEVENLABS_KEY")
# find microphone to use later to record audio
r = sr.Recognizer()
mic = sr.Microphone()
# model cache
CACHE_FILE = "models_cache.json"
CACHE_TIME = 60 * 60 # 1 hour
def get_users_models():
"""
Get a list of the available voices in the user's ElevenLabs account
:return: list of strings of available voice models
"""
models = []
# check if cache file exists
if os.path.exists(CACHE_FILE):
# check if cache file is older than CACHE_TIME
if time.time() - os.path.getmtime(CACHE_FILE) > CACHE_TIME:
# delete cache file
logging.info(
f"Cache file is older than {CACHE_TIME} seconds. Deleting cache file."
)
os.remove(CACHE_FILE)
else:
# read cache file
logging.info(
f"Cache file is newer than {CACHE_TIME} seconds. Reading cache file."
)
with open(CACHE_FILE, "r") as f:
models = json.load(f)
return models
# HTTP request to retrieve the voices
API_KEY = ELEVENLABS_KEY
BASE_URL = "https://api.elevenlabs.io/v1"
ENDPOINT = "/voices"
headers = {"accept": "application/json", "xi-api-key": API_KEY}
response = requests.get(BASE_URL + ENDPOINT, headers=headers)
# Check if the request was successful
if response.status_code == 200:
voices = response.json()["voices"]
voice_names = [voice["name"] for voice in voices]
for name in voice_names:
models.append(name)
else:
sg.popup_error(
"ElevenLabs Key is not valid. Try different key at top of program."
)
logging.error(
f"ElevenLabs Key is not valid. HTTP Response Code: {response.status_code}"
)
# write cache file
logging.info(f"Writing cache file.")
with open(CACHE_FILE, "w") as f:
json.dump(models, f)
return models
def has_selected_model(user_values):
"""
Checks if user has selected a model
:param user_values: dictionary of values associated with user's window
:return: None
"""
if user_values["-Selected Model-"] == "":
sg.popup("Please select a model.")
return False
return True
def has_name(user_values):
"""
Checks if user has inputted a name
:param user_values: dictionary of values associated with user's window
:return: None
"""
if user_values["-USER NAME-"] == "":
sg.popup("Please write your name.")
return False
return True
def is_valid_user():
""" "
Checks if user's keys are valid
:return: boolean value of valid or not
"""
try:
ElevenLabsUser(ELEVENLABS_KEY)
openai.api_key = OPENAI_API_KEY
openai.Completion.create(engine="text-davinci-003", prompt="Test", max_tokens=5)
return True
except Exception as e:
sg.popup_error(
"OpenAI and/or ElevenLabs Key is not valid. Try different key(s) at top of program."
)
logging.error(f"OpenAI and/or ElevenLabs Key is not valid. Exception: {e}")
return False
def generate_image_response(prompt):
"""
Generate response from GPT/ DallE for a query involving drawing a picture
:param prompt: string of the user's query
:return: string image url of generated drawing, index of rest of string after "draw"
"""
# find subject user wants
i = prompt.find("draw")
i += 5
# generate GPT response
gpt_response = openai.Image.create(prompt=prompt[i:], n=1, size="1024x1024")
# get url from response and return
url = gpt_response["data"][0]["url"]
return url, i
def generate_text_response(prompt, ongoing_convo):
"""
Generate response from GPT for a standard query not involving drawing a picture
and update chat log
:param prompt: string of the user's query
:param ongoing_convo: list of dictionaries of ongoing conversations between user and model
:return: string of GPT's response
"""
ongoing_convo.append({"role": "user", "content": prompt})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=ongoing_convo,
max_tokens=4000,
n=1,
stop=None,
temperature=0.5,
)
answer = response["choices"][0]["message"]["content"]
ongoing_convo.append({"role": "assistant", "content": answer})
return answer
def generate_and_show_response(prompt, model, ongoing_convo, voice, values):
"""
Generates response to user's question and displays answer in popups and terminal
:param prompt: string of the user's query
:param model: model selected by the user from available models
:param ongoing_convo: list of dictionaries representing ongoing conversation
:param voice: ElevenLabsVoice object tied to selected model
:param values: dictionary of values associated with main window
:return: None
"""
# if user wants to assistant to draw something (has "draw" in prompt)
if "draw" in prompt:
image_url, idx = generate_image_response(prompt)
print(f"{model}: Here's {prompt[idx:]}")
print(image_url)
sg.popup_scrolled(
f"You said: {prompt} \n {model}: Here's {prompt[idx:]} \n {image_url}"
)
print("=====")
# if user asks a standard question (no "draw" request)
else:
message = generate_text_response(prompt, ongoing_convo)
# Show GPT's response
if values["-Spoken Response-"] == "Yes":
voice.generate_and_play_audio(message, playInBackground=False)
print(f"{model}: {message}")
sg.popup_scrolled(f"You said: {prompt}\n\n{model}: {message}")
print("=====")
def main_window():
# Cache available models and voice objects
available_models = get_users_models()
voices = {model: user.get_voices_by_name(model)[0] for model in available_models}
# GUI Definition
sg.theme("dark grey 9")
layout = [
[sg.Text("Your Name:"), sg.Input(key="-USER NAME-")],
[
sg.Text("Model:"),
sg.Combo(available_models, readonly=True, key="-Selected Model-"),
],
[
sg.Text("Spoken Responses:"),
sg.Combo(
["Yes", "No"],
readonly=True,
key="-Spoken Response-",
default_value="Yes",
),
],
[sg.Text("Query Mode:"), sg.Button("Speak"), sg.Button("Type")],
[sg.Save("Save"), sg.Exit("Exit")],
]
# Create the window
window_title = settings["GUI"]["title"]
window = sg.Window(window_title, layout)
# Store ongoing conversations
conversations = {}
while True:
event, values = window.read()
# User closes window
if event in (sg.WINDOW_CLOSED, "Exit"):
break
# Handle events with exceptions
try:
if (
event in ("Speak", "Type")
and has_name(values)
and has_selected_model(values)
):
name = values["-USER NAME-"]
model = values["-Selected Model-"]
voice = voices[model] # Reuse voice object from cache
# Initialize conversation for this model if not already done
if model not in conversations:
conversations[model] = [
{
"role": "system",
"content": f"Your name is {model} and you're an assistant for {name}.",
},
]
conversation = conversations[model]
# Handle Speak event
if event == "Speak":
with mic as source:
r.adjust_for_ambient_noise(
source, duration=1
) # Can set the duration with duration keyword
print("Speak now...")
sg.popup_timed(
"Speak now...", auto_close=True, auto_close_duration=1
)
try:
# Gather audio and transcribe to text
audio = r.listen(source)
word = r.recognize_google(audio)
# Show user's query
print(f"You said: {word}")
# Close window and quit program when user says "That is all"
if word.lower() == "that is all":
print(f"{model}: See you later!")
sg.popup_timed(
f"{model}: See you later!",
auto_close=True,
auto_close_duration=2,
)
window.close()
quit()
# Generate and display response
generate_and_show_response(
word, model, conversation, voice, values
)
except Exception as e:
print(f"Couldn't interpret audio, try again. Error: {e}")
print("=====")
# Handle Type event
elif event == "Type":
# Have user type question in popup
word = sg.popup_get_text("Enter question")
# Show user's query
print(f"You said: {word}")
# Generate and display response
generate_and_show_response(word, model, conversation, voice, values)
except Exception as e:
logging.error(f"Error: {e}")
sg.popup_error(f"Error: {e}")
# Ensure window closes
finally:
window.close()
if __name__ == "__main__":
# Set up logging
logging.basicConfig(level=logging.INFO)
# Check if the user is a valid user from the provided keys
is_valid_user()
user = ElevenLabsUser(ELEVENLABS_KEY)
SETTINGS_PATH = str(Path.cwd())
# create the settings object and use ini format
settings = sg.UserSettings(
path=SETTINGS_PATH,
filename="config.ini",
use_config_file=True,
convert_bools_and_none=True,
)
configur = ConfigParser()
configur.read("config.ini")
theme = configur.get("GUI", "theme")
font_family = configur.get("GUI", "font_family")
font_size = configur.getint("GUI", "font_size")
sg.theme(theme)
sg.set_options(font=(font_family, font_size))
main_window()
| [
"Your name is PLACEHOLDER and you're an assistant for PLACEHOLDER.",
"Test"
] |
2024-01-10 | normand1/HyperFeeder | podcastTextGenerationApp~podcastSummaryPlugins~baseSummaryPlugin.py | from abc import abstractmethod
import os, json, tiktoken
from podcastSummaryPlugins.abstractPluginDefinitions.abstractStorySummaryPlugin import (
AbstractStorySummaryPlugin,
)
from langchain.text_splitter import CharacterTextSplitter
from dotenv import load_dotenv
class BaseSummaryPlugin(AbstractStorySummaryPlugin):
def __init__(self):
currentFile = os.path.realpath(__file__)
currentDirectory = os.path.dirname(currentFile)
load_dotenv(os.path.join(currentDirectory, ".env.summary"))
@abstractmethod
def summarizeText(self, story):
pass
@abstractmethod
def identify(self) -> str:
pass
def writeToDisk(
self, story, summaryText, summaryTextDirName, summaryTextFileNameLambda
):
url = story["link"]
uniqueId = story["uniqueId"]
rawTextFileName = summaryTextFileNameLambda(uniqueId, url)
filePath = os.path.join(summaryTextDirName, rawTextFileName)
os.makedirs(summaryTextDirName, exist_ok=True)
with open(filePath, "w", encoding="utf-8") as file:
json.dump(summaryText, file)
file.flush()
def doesOutputFileExist(
self, story, summaryTextDirName, summaryTextFileNameLambda
) -> bool:
url = story["link"]
uniqueId = story["uniqueId"]
rawTextFileName = summaryTextFileNameLambda(uniqueId, url)
filePath = os.path.join(summaryTextDirName, rawTextFileName)
if os.path.exists(filePath):
print(
"Summary text file already exists at filepath: "
+ filePath
+ ", skipping summarizing story"
)
return True
return False
def prepareForSummarization(self, texts):
if (
self.numberOfTokensFromString(texts)
< (4096 - int(os.getenv("OPENAI_MAX_TOKENS_SUMMARY"))) - 265
):
return [texts]
chunkSize = int(os.getenv("CHUNK_SIZE"))
textSplitter = CharacterTextSplitter.from_tiktoken_encoder(
separator=".\n", chunk_size=chunkSize, chunk_overlap=0 # no overlap
)
splitTexts = textSplitter.split_text(texts)
if len(splitTexts) <= 2:
textSplitter = CharacterTextSplitter.from_tiktoken_encoder(
separator="<p>", chunk_size=chunkSize, chunk_overlap=0 # no overlap
)
splitTexts = textSplitter.split_text(texts)
if len(splitTexts) <= 2:
textSplitter = CharacterTextSplitter.from_tiktoken_encoder(
separator=" ", chunk_size=chunkSize, chunk_overlap=0 # no overlap
)
splitTexts = textSplitter.split_text(texts)
if len(splitTexts) <= 2:
raise ValueError(
"Text cannot be summarized, please check the text and the above separators and try again."
)
return splitTexts
def numberOfTokensFromString(self, string: str) -> int:
encoding = tiktoken.get_encoding("cl100k_base")
numTokens = len(encoding.encode(string))
return numTokens
| [] |
2024-01-10 | normand1/HyperFeeder | podcastTextGenerationApp~podcastSummaryPlugins~utilities~storySummarizer.py | import os
from langchain import OpenAI
from langchain.docstore.document import Document
from langchain.chains.summarize import load_summarize_chain
class StorySummarizer:
def summarize(self, text):
docs = [Document(page_content=text)]
OpenAI(
model=os.getenv("OPENAI_MODEL_SUMMARY"),
max_tokens=int(os.getenv("OPENAI_MAX_TOKENS_SUMMARY")),
temperature=0,
)
chain = load_summarize_chain(llm, chain_type="map_reduce")
result = chain.run(docs)
return result
| [] |
Subsets and Splits