File size: 2,178 Bytes
9b897d3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import os
import logging
import instructor
import openai
from openai import OpenAI, AsyncOpenAI
from dotenv import load_dotenv
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
def api_function_call(
system_message,
query: str,
model: str = "gpt-4-0125-preview",
response_model=None,
max_retries: int = 0,
stream: bool = False,
):
client = instructor.patch(OpenAI())
try:
message_data = {
"model": model,
"messages": [
{"role": "system", "content": system_message},
{"role": "user", "content": query},
],
"max_retries": max_retries,
"stream": stream,
}
if response_model is not None:
message_data["response_model"] = response_model
response = client.chat.completions.create(**message_data)
error = False
except openai.BadRequestError:
error = True
logger.exception("Invalid request to OpenAI API. See traceback:")
error_message = (
"Something went wrong while connecting with OpenAI, try again soon!"
)
return error_message, error
except openai.RateLimitError:
error = True
logger.exception("RateLimit error from OpenAI. See traceback:")
error_message = "OpenAI servers seem to be overloaded, try again later!"
return error_message, error
except Exception as e:
error = True
logger.exception(
"Some kind of error happened trying to generate the response. See traceback:"
)
error_message = (
"Something went wrong with connecting with OpenAI, try again soon!"
)
return error_message, error
if stream is True and response_model is None:
def answer_generator():
for chunk in response:
token = chunk.choices[0].delta.content
token = "" if token is None else token
yield token
return answer_generator(), error
else:
return response, error
|