|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from typing import Optional |
|
|
|
import httpx |
|
import openai |
|
|
|
|
|
class AuthenticationError(openai.AuthenticationError): |
|
def __init__( |
|
self, |
|
message, |
|
llm_provider, |
|
model, |
|
response: Optional[httpx.Response] = None, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.status_code = 401 |
|
self.message = "litellm.AuthenticationError: {}".format(message) |
|
self.llm_provider = llm_provider |
|
self.model = model |
|
self.litellm_debug_info = litellm_debug_info |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
self.response = response or httpx.Response( |
|
status_code=self.status_code, |
|
request=httpx.Request( |
|
method="GET", url="https://litellm.ai" |
|
), |
|
) |
|
super().__init__( |
|
self.message, response=self.response, body=None |
|
) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
|
|
class NotFoundError(openai.NotFoundError): |
|
def __init__( |
|
self, |
|
message, |
|
model, |
|
llm_provider, |
|
response: Optional[httpx.Response] = None, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.status_code = 404 |
|
self.message = "litellm.NotFoundError: {}".format(message) |
|
self.model = model |
|
self.llm_provider = llm_provider |
|
self.litellm_debug_info = litellm_debug_info |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
self.response = response or httpx.Response( |
|
status_code=self.status_code, |
|
request=httpx.Request( |
|
method="GET", url="https://litellm.ai" |
|
), |
|
) |
|
super().__init__( |
|
self.message, response=self.response, body=None |
|
) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
class BadRequestError(openai.BadRequestError): |
|
def __init__( |
|
self, |
|
message, |
|
model, |
|
llm_provider, |
|
response: Optional[httpx.Response] = None, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.status_code = 400 |
|
self.message = "litellm.BadRequestError: {}".format(message) |
|
self.model = model |
|
self.llm_provider = llm_provider |
|
self.litellm_debug_info = litellm_debug_info |
|
response = httpx.Response( |
|
status_code=self.status_code, |
|
request=httpx.Request( |
|
method="GET", url="https://litellm.ai" |
|
), |
|
) |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
super().__init__( |
|
self.message, response=response, body=None |
|
) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
class UnprocessableEntityError(openai.UnprocessableEntityError): |
|
def __init__( |
|
self, |
|
message, |
|
model, |
|
llm_provider, |
|
response: httpx.Response, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.status_code = 422 |
|
self.message = "litellm.UnprocessableEntityError: {}".format(message) |
|
self.model = model |
|
self.llm_provider = llm_provider |
|
self.litellm_debug_info = litellm_debug_info |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
super().__init__( |
|
self.message, response=response, body=None |
|
) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
class Timeout(openai.APITimeoutError): |
|
def __init__( |
|
self, |
|
message, |
|
model, |
|
llm_provider, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
headers: Optional[dict] = None, |
|
): |
|
request = httpx.Request( |
|
method="POST", |
|
url="https://api.openai.com/v1", |
|
) |
|
super().__init__( |
|
request=request |
|
) |
|
self.status_code = 408 |
|
self.message = "litellm.Timeout: {}".format(message) |
|
self.model = model |
|
self.llm_provider = llm_provider |
|
self.litellm_debug_info = litellm_debug_info |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
self.headers = headers |
|
|
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
class PermissionDeniedError(openai.PermissionDeniedError): |
|
def __init__( |
|
self, |
|
message, |
|
llm_provider, |
|
model, |
|
response: httpx.Response, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.status_code = 403 |
|
self.message = "litellm.PermissionDeniedError: {}".format(message) |
|
self.llm_provider = llm_provider |
|
self.model = model |
|
self.litellm_debug_info = litellm_debug_info |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
super().__init__( |
|
self.message, response=response, body=None |
|
) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
class RateLimitError(openai.RateLimitError): |
|
def __init__( |
|
self, |
|
message, |
|
llm_provider, |
|
model, |
|
response: Optional[httpx.Response] = None, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.status_code = 429 |
|
self.message = "litellm.RateLimitError: {}".format(message) |
|
self.llm_provider = llm_provider |
|
self.model = model |
|
self.litellm_debug_info = litellm_debug_info |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
_response_headers = ( |
|
getattr(response, "headers", None) if response is not None else None |
|
) |
|
self.response = httpx.Response( |
|
status_code=429, |
|
headers=_response_headers, |
|
request=httpx.Request( |
|
method="POST", |
|
url=" https://cloud.google.com/vertex-ai/", |
|
), |
|
) |
|
super().__init__( |
|
self.message, response=self.response, body=None |
|
) |
|
self.code = "429" |
|
self.type = "throttling_error" |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
|
|
class ContextWindowExceededError(BadRequestError): |
|
def __init__( |
|
self, |
|
message, |
|
model, |
|
llm_provider, |
|
response: Optional[httpx.Response] = None, |
|
litellm_debug_info: Optional[str] = None, |
|
): |
|
self.status_code = 400 |
|
self.model = model |
|
self.llm_provider = llm_provider |
|
self.litellm_debug_info = litellm_debug_info |
|
request = httpx.Request(method="POST", url="https://api.openai.com/v1") |
|
self.response = httpx.Response(status_code=400, request=request) |
|
super().__init__( |
|
message=message, |
|
model=self.model, |
|
llm_provider=self.llm_provider, |
|
response=self.response, |
|
litellm_debug_info=self.litellm_debug_info, |
|
) |
|
|
|
|
|
self.message = "litellm.ContextWindowExceededError: {}".format(self.message) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
|
|
class RejectedRequestError(BadRequestError): |
|
def __init__( |
|
self, |
|
message, |
|
model, |
|
llm_provider, |
|
request_data: dict, |
|
litellm_debug_info: Optional[str] = None, |
|
): |
|
self.status_code = 400 |
|
self.message = "litellm.RejectedRequestError: {}".format(message) |
|
self.model = model |
|
self.llm_provider = llm_provider |
|
self.litellm_debug_info = litellm_debug_info |
|
self.request_data = request_data |
|
request = httpx.Request(method="POST", url="https://api.openai.com/v1") |
|
response = httpx.Response(status_code=400, request=request) |
|
super().__init__( |
|
message=self.message, |
|
model=self.model, |
|
llm_provider=self.llm_provider, |
|
response=response, |
|
litellm_debug_info=self.litellm_debug_info, |
|
) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
class ContentPolicyViolationError(BadRequestError): |
|
|
|
def __init__( |
|
self, |
|
message, |
|
model, |
|
llm_provider, |
|
response: Optional[httpx.Response] = None, |
|
litellm_debug_info: Optional[str] = None, |
|
): |
|
self.status_code = 400 |
|
self.message = "litellm.ContentPolicyViolationError: {}".format(message) |
|
self.model = model |
|
self.llm_provider = llm_provider |
|
self.litellm_debug_info = litellm_debug_info |
|
request = httpx.Request(method="POST", url="https://api.openai.com/v1") |
|
self.response = httpx.Response(status_code=400, request=request) |
|
super().__init__( |
|
message=self.message, |
|
model=self.model, |
|
llm_provider=self.llm_provider, |
|
response=self.response, |
|
litellm_debug_info=self.litellm_debug_info, |
|
) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
class ServiceUnavailableError(openai.APIStatusError): |
|
def __init__( |
|
self, |
|
message, |
|
llm_provider, |
|
model, |
|
response: Optional[httpx.Response] = None, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.status_code = 503 |
|
self.message = "litellm.ServiceUnavailableError: {}".format(message) |
|
self.llm_provider = llm_provider |
|
self.model = model |
|
self.litellm_debug_info = litellm_debug_info |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
self.response = httpx.Response( |
|
status_code=self.status_code, |
|
request=httpx.Request( |
|
method="POST", |
|
url=" https://cloud.google.com/vertex-ai/", |
|
), |
|
) |
|
super().__init__( |
|
self.message, response=self.response, body=None |
|
) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
class InternalServerError(openai.InternalServerError): |
|
def __init__( |
|
self, |
|
message, |
|
llm_provider, |
|
model, |
|
response: Optional[httpx.Response] = None, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.status_code = 500 |
|
self.message = "litellm.InternalServerError: {}".format(message) |
|
self.llm_provider = llm_provider |
|
self.model = model |
|
self.litellm_debug_info = litellm_debug_info |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
self.response = httpx.Response( |
|
status_code=self.status_code, |
|
request=httpx.Request( |
|
method="POST", |
|
url=" https://cloud.google.com/vertex-ai/", |
|
), |
|
) |
|
super().__init__( |
|
self.message, response=self.response, body=None |
|
) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
|
|
class APIError(openai.APIError): |
|
def __init__( |
|
self, |
|
status_code: int, |
|
message, |
|
llm_provider, |
|
model, |
|
request: Optional[httpx.Request] = None, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.status_code = status_code |
|
self.message = "litellm.APIError: {}".format(message) |
|
self.llm_provider = llm_provider |
|
self.model = model |
|
self.litellm_debug_info = litellm_debug_info |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
if request is None: |
|
request = httpx.Request(method="POST", url="https://api.openai.com/v1") |
|
super().__init__(self.message, request=request, body=None) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
|
|
class APIConnectionError(openai.APIConnectionError): |
|
def __init__( |
|
self, |
|
message, |
|
llm_provider, |
|
model, |
|
request: Optional[httpx.Request] = None, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.message = "litellm.APIConnectionError: {}".format(message) |
|
self.llm_provider = llm_provider |
|
self.model = model |
|
self.status_code = 500 |
|
self.litellm_debug_info = litellm_debug_info |
|
self.request = httpx.Request(method="POST", url="https://api.openai.com/v1") |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
super().__init__(message=self.message, request=self.request) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
|
|
class APIResponseValidationError(openai.APIResponseValidationError): |
|
def __init__( |
|
self, |
|
message, |
|
llm_provider, |
|
model, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.message = "litellm.APIResponseValidationError: {}".format(message) |
|
self.llm_provider = llm_provider |
|
self.model = model |
|
request = httpx.Request(method="POST", url="https://api.openai.com/v1") |
|
response = httpx.Response(status_code=500, request=request) |
|
self.litellm_debug_info = litellm_debug_info |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
super().__init__(response=response, body=None, message=message) |
|
|
|
def __str__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
def __repr__(self): |
|
_message = self.message |
|
if self.num_retries: |
|
_message += f" LiteLLM Retried: {self.num_retries} times" |
|
if self.max_retries: |
|
_message += f", LiteLLM Max Retries: {self.max_retries}" |
|
return _message |
|
|
|
|
|
class JSONSchemaValidationError(APIResponseValidationError): |
|
def __init__( |
|
self, model: str, llm_provider: str, raw_response: str, schema: str |
|
) -> None: |
|
self.raw_response = raw_response |
|
self.schema = schema |
|
self.model = model |
|
message = "litellm.JSONSchemaValidationError: model={}, returned an invalid response={}, for schema={}.\nAccess raw response with `e.raw_response`".format( |
|
model, raw_response, schema |
|
) |
|
self.message = message |
|
super().__init__(model=model, message=message, llm_provider=llm_provider) |
|
|
|
|
|
class OpenAIError(openai.OpenAIError): |
|
def __init__(self, original_exception=None): |
|
super().__init__() |
|
self.llm_provider = "openai" |
|
|
|
|
|
class UnsupportedParamsError(BadRequestError): |
|
def __init__( |
|
self, |
|
message, |
|
llm_provider: Optional[str] = None, |
|
model: Optional[str] = None, |
|
status_code: int = 400, |
|
response: Optional[httpx.Response] = None, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.status_code = 400 |
|
self.message = "litellm.UnsupportedParamsError: {}".format(message) |
|
self.model = model |
|
self.llm_provider = llm_provider |
|
self.litellm_debug_info = litellm_debug_info |
|
response = response or httpx.Response( |
|
status_code=self.status_code, |
|
request=httpx.Request( |
|
method="GET", url="https://litellm.ai" |
|
), |
|
) |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
|
|
|
|
LITELLM_EXCEPTION_TYPES = [ |
|
AuthenticationError, |
|
NotFoundError, |
|
BadRequestError, |
|
UnprocessableEntityError, |
|
UnsupportedParamsError, |
|
Timeout, |
|
PermissionDeniedError, |
|
RateLimitError, |
|
ContextWindowExceededError, |
|
RejectedRequestError, |
|
ContentPolicyViolationError, |
|
InternalServerError, |
|
ServiceUnavailableError, |
|
APIError, |
|
APIConnectionError, |
|
APIResponseValidationError, |
|
OpenAIError, |
|
InternalServerError, |
|
JSONSchemaValidationError, |
|
] |
|
|
|
|
|
class BudgetExceededError(Exception): |
|
def __init__( |
|
self, current_cost: float, max_budget: float, message: Optional[str] = None |
|
): |
|
self.current_cost = current_cost |
|
self.max_budget = max_budget |
|
message = ( |
|
message |
|
or f"Budget has been exceeded! Current cost: {current_cost}, Max budget: {max_budget}" |
|
) |
|
self.message = message |
|
super().__init__(message) |
|
|
|
|
|
|
|
class InvalidRequestError(openai.BadRequestError): |
|
def __init__(self, message, model, llm_provider): |
|
self.status_code = 400 |
|
self.message = message |
|
self.model = model |
|
self.llm_provider = llm_provider |
|
self.response = httpx.Response( |
|
status_code=400, |
|
request=httpx.Request( |
|
method="GET", url="https://litellm.ai" |
|
), |
|
) |
|
super().__init__( |
|
message=self.message, response=self.response, body=None |
|
) |
|
|
|
|
|
class MockException(openai.APIError): |
|
|
|
def __init__( |
|
self, |
|
status_code: int, |
|
message, |
|
llm_provider, |
|
model, |
|
request: Optional[httpx.Request] = None, |
|
litellm_debug_info: Optional[str] = None, |
|
max_retries: Optional[int] = None, |
|
num_retries: Optional[int] = None, |
|
): |
|
self.status_code = status_code |
|
self.message = "litellm.MockException: {}".format(message) |
|
self.llm_provider = llm_provider |
|
self.model = model |
|
self.litellm_debug_info = litellm_debug_info |
|
self.max_retries = max_retries |
|
self.num_retries = num_retries |
|
if request is None: |
|
request = httpx.Request(method="POST", url="https://api.openai.com/v1") |
|
super().__init__(self.message, request=request, body=None) |
|
|