Spaces:
Runtime error
Runtime error
import anthropic | |
import openai | |
class ClaudeCompletion: | |
def __init__( | |
self, | |
prompt, | |
model="claude-v1.3", | |
max_tokens_to_sample=256, | |
stop_sequences=[anthropic.HUMAN_PROMPT], | |
stream=False, | |
temperature=1.0, | |
top_k=-1, | |
top_p=-1 | |
): | |
self.model = model | |
self.prompt = prompt | |
self.max_tokens_to_sample = max_tokens_to_sample | |
self.stop_sequences = stop_sequences | |
self.stream = stream | |
self.temperature = temperature | |
self.top_k = top_k | |
self.top_p = top_p | |
def execute(self, claudeClient): | |
response = claudeClient.completion( | |
prompt = f"{anthropic.HUMAN_PROMPT} {self.prompt} {anthropic.AI_PROMPT}", | |
model = self.model, | |
max_tokens_to_sample = self.max_tokens_to_sample, | |
stop_sequences = self.stop_sequences, | |
steam = self.stream, | |
temperature = self.temperature, | |
top_k = self.top_k, | |
top_p = self.top_p, | |
) | |
return response["completion"].strip() | |
def chatComplete(self, claudeClient, chatHistory): | |
for i in range(len(chatHistory)-1): | |
self.prompt = self.prompt + f"{anthropic.HUMAN_PROMPT} {chatHistory[i][0]} {anthropic.AI_PROMPT}" | |
self.prompt = self.prompt + f"{anthropic.AI_PROMPT} {chatHistory[i][1]}" | |
self.prompt = self.prompt + f"{anthropic.HUMAN_PROMPT} {chatHistory[-1][0]} {anthropic.AI_PROMPT}" | |
# print("------------anthropic------------") | |
# print(self.prompt) | |
response = claudeClient.completion( | |
prompt = self.prompt, | |
model = self.model, | |
max_tokens_to_sample = self.max_tokens_to_sample, | |
stop_sequences = self.stop_sequences, | |
steam = self.stream, | |
temperature = self.temperature, | |
top_k = self.top_k, | |
top_p = self.top_p, | |
) | |
return response["completion"].strip() | |
class GPTCompletion: | |
def __init__( | |
self, | |
system="You are a helpful AI assistant", | |
model="gpt-3.5-turbo", | |
temperature=1.0, | |
top_p=1.0, | |
n=1, | |
stream=False, | |
stop=None, | |
max_tokens=256, | |
presence_penalty=0.0, | |
frequency_penalty=0.0, | |
logit_bias={} | |
): | |
self.system = system | |
self.model = model | |
self.messages = [{"role": "system", "content": f"{self.system}"}] | |
self.temperature = temperature | |
self.top_p = top_p | |
self.n = n | |
self.stream = stream | |
self.stop = stop | |
self.max_tokens = max_tokens | |
self.presence_penalty = presence_penalty | |
self.frequency_penalty = frequency_penalty | |
self.logit_bias = logit_bias | |
def chatComplete(self, chatHistory, firstMessage=""): | |
self.messages.append({"role": "user", "content": f"{firstMessage}"}) | |
for i in range(len(chatHistory)): | |
self.messages.append({"role": "assistant", "content": f"{chatHistory[i][0]}"}) | |
self.messages.append({"role": "user", "content": f"{chatHistory[i][1]}"}) | |
response = openai.ChatCompletion.create( | |
model=self.model, | |
messages=self.messages, | |
temperature=self.temperature, | |
top_p=self.top_p, | |
n=self.n, | |
stream=self.stream, | |
stop=self.stop, | |
max_tokens=self.max_tokens, | |
presence_penalty=self.presence_penalty, | |
frequency_penalty=self.frequency_penalty, | |
logit_bias=self.logit_bias | |
) | |
return response["choices"][0].message["content"].strip() | |