Spaces:
Runtime error
Runtime error
Ulaş Dilek
commited on
Commit
·
802f7a2
1
Parent(s):
71680ec
added app files
Browse files
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
.env
|
3 |
+
venv
|
4 |
+
flagged
|
README.md
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
title: Gpt Claude Dialogue
|
3 |
emoji: 📚
|
4 |
colorFrom: purple
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
sdk_version: 3.28.2
|
8 |
app_file: app.py
|
@@ -10,4 +10,39 @@ pinned: false
|
|
10 |
license: mit
|
11 |
---
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
title: Gpt Claude Dialogue
|
3 |
emoji: 📚
|
4 |
colorFrom: purple
|
5 |
+
colorTo: blue
|
6 |
sdk: gradio
|
7 |
sdk_version: 3.28.2
|
8 |
app_file: app.py
|
|
|
10 |
license: mit
|
11 |
---
|
12 |
|
13 |
+
# GPT-3.5 Turbo and Claude-v1.3 Dialogue
|
14 |
+
|
15 |
+
This app uses the OpenAI API and the Anthropic API to generate responses between the two AI models. The user can type in a context and see GPT-3.5 Turbo and Claude-v1.3 have a generated conversation with each other.
|
16 |
+
|
17 |
+
Enjoy the chaos! Let me know if you have any issues running the app.
|
18 |
+
|
19 |
+
To run this app locally:
|
20 |
+
|
21 |
+
1. Clone this repository
|
22 |
+
|
23 |
+
2. Install the requirements:
|
24 |
+
|
25 |
+
```bash
|
26 |
+
pip install gradio python-dotenv openai anthropic
|
27 |
+
```
|
28 |
+
|
29 |
+
3. Obtain API keys for:
|
30 |
+
- OpenAI's API
|
31 |
+
- Anthropic's API
|
32 |
+
|
33 |
+
4. Add the API keys to a file called `.env` with the variables:
|
34 |
+
|
35 |
+
```
|
36 |
+
OPENAI_API_KEY="YOUR_OPENAI_API_KEY"
|
37 |
+
CLAUDE_API_KEY="YOUR_CLAUDE_API_KEY"
|
38 |
+
```
|
39 |
+
|
40 |
+
5. Run the gradio app:
|
41 |
+
|
42 |
+
```bash
|
43 |
+
gradio run app.py
|
44 |
+
```
|
45 |
+
|
46 |
+
6. The gradio app will launch in your browser. Type a context to start a conversation between GPT-3.5 Turbo and Claude-v1.3!
|
47 |
+
|
48 |
+
7. To close the app, stop the process in your terminal.
|
app.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
import os
|
4 |
+
import anthropic
|
5 |
+
import openai
|
6 |
+
from util import ClaudeCompletion, GPTCompletion
|
7 |
+
|
8 |
+
gr.close_all()
|
9 |
+
|
10 |
+
load_dotenv()
|
11 |
+
CLAUDE_API_KEY = os.environ["CLAUDE_API_KEY"]
|
12 |
+
openai.api_key = os.environ["OPENAI_API_KEY"]
|
13 |
+
|
14 |
+
claudeClient = anthropic.Client(CLAUDE_API_KEY)
|
15 |
+
|
16 |
+
def talkToClaude(prompt):
|
17 |
+
completion = ClaudeCompletion(prompt, max_tokens_to_sample=1024)
|
18 |
+
claudeResponse = completion.execute(claudeClient)
|
19 |
+
return claudeResponse
|
20 |
+
|
21 |
+
def chatWithClaude(chatHistory):
|
22 |
+
completion = ClaudeCompletion("", max_tokens_to_sample=2048, temperature=0.5)
|
23 |
+
claudeResponse = completion.chatComplete(claudeClient, chatHistory)
|
24 |
+
lastMessage = chatHistory[-1][0]
|
25 |
+
chatHistory.pop()
|
26 |
+
chatHistory.append((lastMessage, claudeResponse))
|
27 |
+
return chatHistory
|
28 |
+
|
29 |
+
def startNewChat(system, chatHistory):
|
30 |
+
systemMes = "You are a human chatting with an AI assistant. This is the context of your conversation: " + system
|
31 |
+
completion = GPTCompletion(system=systemMes, max_tokens=2048, temperature=1.5)
|
32 |
+
gptResponse = completion.chatComplete(chatHistory, firstMessage="Hi, I am an AI assistant. How can I help you?")
|
33 |
+
chatHistory.append((gptResponse, ""))
|
34 |
+
return chatHistory
|
35 |
+
|
36 |
+
def chatWithGPT(chatHistory):
|
37 |
+
completion = GPTCompletion(system="You are a human chatting with an AI assistant.", max_tokens=2048, temperature=1.5)
|
38 |
+
gptResponse = completion.chatComplete(chatHistory)
|
39 |
+
chatHistory.append((gptResponse, ""))
|
40 |
+
return chatHistory
|
41 |
+
|
42 |
+
with gr.Blocks() as demo:
|
43 |
+
chatHistory = []
|
44 |
+
gptsTurn = True
|
45 |
+
|
46 |
+
def startConversation(prompt):
|
47 |
+
global chatHistory, gptsTurn
|
48 |
+
# nextBtn.interactive = False
|
49 |
+
chatHistory = []
|
50 |
+
startNewChat(prompt, chatHistory)
|
51 |
+
# nextBtn.interactive = True
|
52 |
+
gptsTurn = False
|
53 |
+
return chatHistory
|
54 |
+
|
55 |
+
def nextResponse():
|
56 |
+
global gptsTurn
|
57 |
+
# nextBtn.interactive = False
|
58 |
+
if (gptsTurn):
|
59 |
+
chatWithGPT(chatHistory)
|
60 |
+
else:
|
61 |
+
chatWithClaude(chatHistory)
|
62 |
+
gptsTurn = not gptsTurn
|
63 |
+
# nextBtn.interactive = True
|
64 |
+
return chatHistory
|
65 |
+
|
66 |
+
context = gr.Textbox(label="Context",
|
67 |
+
placeholder="Set the context for two LLMs to chat with each other",
|
68 |
+
info="OpenAI's gpt-3.5-turbo model with chat completion and Anthropic's claude-v1.3 text completion model will talk to each other. Context will be integrated into GPT's system message for the first line of the dialoge and will be scrapped afterwards. The two LLM's are expected to keep the conversation going based on the conversation history. Clicking \"Start conversation\" will reset the conversation and bring the first message. Then, clicking \"Next response\" will bring the next line of the dialogue.")
|
69 |
+
start_button = gr.Button("Start conversation")
|
70 |
+
|
71 |
+
chatbot = gr.Chatbot().style(height=460)
|
72 |
+
nextBtn = gr.Button("Next response")
|
73 |
+
examples = gr.Examples(
|
74 |
+
["A person is interested in Formula 1 and asks questions about the sport",
|
75 |
+
"A little child wants to understand how planes fly",
|
76 |
+
"You are a young indie game dev searching for ideas about their next strategy game",
|
77 |
+
"You are an old irish man is interested in medieval european history"]
|
78 |
+
, context)
|
79 |
+
# nextBtn.interactive = False
|
80 |
+
start_button.click(startConversation, context, chatbot, scroll_to_output=True, show_progress=True)
|
81 |
+
nextBtn.click(nextResponse, None, chatbot, scroll_to_output=True, show_progress=True)
|
82 |
+
|
83 |
+
try:
|
84 |
+
demo.launch(share=True)
|
85 |
+
except KeyboardInterrupt:
|
86 |
+
demo.close()
|
87 |
+
except Exception as e:
|
88 |
+
print(e)
|
89 |
+
demo.close()
|
90 |
+
gr.close_all()
|
util.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import anthropic
|
2 |
+
import openai
|
3 |
+
|
4 |
+
class ClaudeCompletion:
|
5 |
+
def __init__(
|
6 |
+
self,
|
7 |
+
prompt,
|
8 |
+
model="claude-v1.3",
|
9 |
+
max_tokens_to_sample=256,
|
10 |
+
stop_sequences=[anthropic.HUMAN_PROMPT],
|
11 |
+
stream=False,
|
12 |
+
temperature=1.0,
|
13 |
+
top_k=-1,
|
14 |
+
top_p=-1
|
15 |
+
):
|
16 |
+
self.model = model
|
17 |
+
self.prompt = prompt
|
18 |
+
self.max_tokens_to_sample = max_tokens_to_sample
|
19 |
+
self.stop_sequences = stop_sequences
|
20 |
+
self.stream = stream
|
21 |
+
self.temperature = temperature
|
22 |
+
self.top_k = top_k
|
23 |
+
self.top_p = top_p
|
24 |
+
|
25 |
+
|
26 |
+
def execute(self, claudeClient):
|
27 |
+
|
28 |
+
response = claudeClient.completion(
|
29 |
+
prompt = f"{anthropic.HUMAN_PROMPT} {self.prompt} {anthropic.AI_PROMPT}",
|
30 |
+
model = self.model,
|
31 |
+
max_tokens_to_sample = self.max_tokens_to_sample,
|
32 |
+
stop_sequences = self.stop_sequences,
|
33 |
+
steam = self.stream,
|
34 |
+
temperature = self.temperature,
|
35 |
+
top_k = self.top_k,
|
36 |
+
top_p = self.top_p,
|
37 |
+
)
|
38 |
+
return response["completion"].strip()
|
39 |
+
|
40 |
+
|
41 |
+
def chatComplete(self, claudeClient, chatHistory):
|
42 |
+
|
43 |
+
for i in range(len(chatHistory)-1):
|
44 |
+
self.prompt = self.prompt + f"{anthropic.HUMAN_PROMPT} {chatHistory[i][0]} {anthropic.AI_PROMPT}"
|
45 |
+
self.prompt = self.prompt + f"{anthropic.AI_PROMPT} {chatHistory[i][1]}"
|
46 |
+
self.prompt = self.prompt + f"{anthropic.HUMAN_PROMPT} {chatHistory[-1][0]} {anthropic.AI_PROMPT}"
|
47 |
+
|
48 |
+
# print("------------anthropic------------")
|
49 |
+
# print(self.prompt)
|
50 |
+
|
51 |
+
response = claudeClient.completion(
|
52 |
+
prompt = self.prompt,
|
53 |
+
model = self.model,
|
54 |
+
max_tokens_to_sample = self.max_tokens_to_sample,
|
55 |
+
stop_sequences = self.stop_sequences,
|
56 |
+
steam = self.stream,
|
57 |
+
temperature = self.temperature,
|
58 |
+
top_k = self.top_k,
|
59 |
+
top_p = self.top_p,
|
60 |
+
)
|
61 |
+
return response["completion"].strip()
|
62 |
+
|
63 |
+
class GPTCompletion:
|
64 |
+
def __init__(
|
65 |
+
self,
|
66 |
+
system="You are a helpful AI assistant",
|
67 |
+
model="gpt-3.5-turbo",
|
68 |
+
temperature=1.0,
|
69 |
+
top_p=1.0,
|
70 |
+
n=1,
|
71 |
+
stream=False,
|
72 |
+
stop=None,
|
73 |
+
max_tokens=256,
|
74 |
+
presence_penalty=0.0,
|
75 |
+
frequency_penalty=0.0,
|
76 |
+
logit_bias={}
|
77 |
+
):
|
78 |
+
self.system = system
|
79 |
+
self.model = model
|
80 |
+
self.messages = [{"role": "system", "content": f"{self.system}"}]
|
81 |
+
self.temperature = temperature
|
82 |
+
self.top_p = top_p
|
83 |
+
self.n = n
|
84 |
+
self.stream = stream
|
85 |
+
self.stop = stop
|
86 |
+
self.max_tokens = max_tokens
|
87 |
+
self.presence_penalty = presence_penalty
|
88 |
+
self.frequency_penalty = frequency_penalty
|
89 |
+
self.logit_bias = logit_bias
|
90 |
+
|
91 |
+
|
92 |
+
def chatComplete(self, chatHistory, firstMessage=""):
|
93 |
+
|
94 |
+
self.messages.append({"role": "user", "content": f"{firstMessage}"})
|
95 |
+
for i in range(len(chatHistory)):
|
96 |
+
self.messages.append({"role": "assistant", "content": f"{chatHistory[i][0]}"})
|
97 |
+
self.messages.append({"role": "user", "content": f"{chatHistory[i][1]}"})
|
98 |
+
|
99 |
+
response = openai.ChatCompletion.create(
|
100 |
+
model=self.model,
|
101 |
+
messages=self.messages,
|
102 |
+
temperature=self.temperature,
|
103 |
+
top_p=self.top_p,
|
104 |
+
n=self.n,
|
105 |
+
stream=self.stream,
|
106 |
+
stop=self.stop,
|
107 |
+
max_tokens=self.max_tokens,
|
108 |
+
presence_penalty=self.presence_penalty,
|
109 |
+
frequency_penalty=self.frequency_penalty,
|
110 |
+
logit_bias=self.logit_bias
|
111 |
+
)
|
112 |
+
|
113 |
+
return response["choices"][0].message["content"].strip()
|