initial commit
Browse files- LLM.py +96 -0
- app.py +89 -0
- prompt_parser.py +30 -0
- requirements.txt +0 -0
- scoreboard.csv +11 -0
- scoreboard.py +28 -0
LLM.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
|
2 |
+
from langchain_groq import ChatGroq
|
3 |
+
from langchain_huggingface import ChatHuggingFace
|
4 |
+
from langchain_huggingface import HuggingFaceEndpoint
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
from langchain.schema.output_parser import StrOutputParser
|
7 |
+
from langchain_huggingface import ChatHuggingFace
|
8 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
9 |
+
import os
|
10 |
+
|
11 |
+
os.environ['CURL_CA_BUNDLE'] = ''
|
12 |
+
load_dotenv()
|
13 |
+
|
14 |
+
class Bot():
|
15 |
+
def __init__(self):
|
16 |
+
self.groq_models = ['gemma-7b-it', 'llama3-70b-8192',\
|
17 |
+
'llama3-8b-8192', 'mixtral-8x7b-32768']
|
18 |
+
self.hf_models = ["01-ai/Yi-1.5-34B-Chat", "google/gemma-1.1-2b-it",\
|
19 |
+
"google/gemma-1.1-7b-it"]
|
20 |
+
self.google_models = ["gemini-1.0-pro", "gemini-1.5-flash",\
|
21 |
+
"gemini-1.5-pro"]
|
22 |
+
self.models = ["gemini-1.0-pro", "gemini-1.5-flash", "gemini-1.5-pro", "01-ai/Yi-1.5-34B-Chat", "google/gemma-1.1-2b-it",\
|
23 |
+
"google/gemma-1.1-7b-it", 'gemma-7b-it', 'llama3-70b-8192', 'llama3-8b-8192', 'mixtral-8x7b-32768']
|
24 |
+
|
25 |
+
def call_groq(self, model, temp = 0.7, given_prompt = "Hi"):
|
26 |
+
try:
|
27 |
+
llm = ChatGroq(
|
28 |
+
temperature=temp,
|
29 |
+
model= model
|
30 |
+
)
|
31 |
+
|
32 |
+
system = "You are a helpful assistant."
|
33 |
+
human = "{text}"
|
34 |
+
prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
|
35 |
+
|
36 |
+
chain = prompt | llm | StrOutputParser()
|
37 |
+
return chain.invoke({"text": given_prompt})
|
38 |
+
|
39 |
+
except Exception as e:
|
40 |
+
return f"Error: {str(e)}"
|
41 |
+
|
42 |
+
def call_hf(self,model, temp = 0.7, given_prompt = "Hi"):
|
43 |
+
try:
|
44 |
+
llm = HuggingFaceEndpoint(
|
45 |
+
repo_id=model,
|
46 |
+
temperature=temp
|
47 |
+
)
|
48 |
+
|
49 |
+
chat = ChatHuggingFace(llm=llm, verbose=True)
|
50 |
+
|
51 |
+
template = """
|
52 |
+
You are a helpful assistant
|
53 |
+
|
54 |
+
User: {query}
|
55 |
+
|
56 |
+
Answer:
|
57 |
+
"""
|
58 |
+
|
59 |
+
prompt = PromptTemplate(
|
60 |
+
template=template,
|
61 |
+
input_variables=["query"]
|
62 |
+
)
|
63 |
+
|
64 |
+
chain =prompt | chat | StrOutputParser()
|
65 |
+
|
66 |
+
return chain.invoke({"query": given_prompt})
|
67 |
+
|
68 |
+
except Exception as e:
|
69 |
+
return f"Error: {str(e)}"
|
70 |
+
|
71 |
+
def call_google(self,model, temp=0.7, given_prompt = "Hi"):
|
72 |
+
try:
|
73 |
+
model = ChatGoogleGenerativeAI(model = model, temprature = temp)
|
74 |
+
system = "You are a helpful assistant."
|
75 |
+
human = "{text}"
|
76 |
+
prompt = ChatPromptTemplate.from_messages([("human", human)])
|
77 |
+
chain = prompt | model | StrOutputParser()
|
78 |
+
return chain.invoke({"text": given_prompt})
|
79 |
+
except Exception as e:
|
80 |
+
return f"Error: {str(e)}"
|
81 |
+
|
82 |
+
def response(self, model, prompt="Hi", temprature = 0.7):
|
83 |
+
if model in self.groq_models:
|
84 |
+
res_show = self.call_groq(temp = temprature, given_prompt = prompt, model= model)
|
85 |
+
elif model in self.hf_models:
|
86 |
+
res_show = self.call_hf(given_prompt = prompt, temp = temprature, model = model)
|
87 |
+
elif model in self.google_models:
|
88 |
+
res_show = self.call_google(given_prompt = prompt, temp = temprature, model = model)
|
89 |
+
else:
|
90 |
+
return "Sorry! App not working properly"
|
91 |
+
return res_show
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
|
app.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from prompt_parser import Parse_Prompt
|
3 |
+
from scoreboard import Score
|
4 |
+
import warnings
|
5 |
+
|
6 |
+
warnings.filterwarnings("ignore")
|
7 |
+
|
8 |
+
arena = Parse_Prompt()
|
9 |
+
score = Score()
|
10 |
+
|
11 |
+
with gr.Blocks(fill_height = True) as app:
|
12 |
+
with gr.Tab("🪖 Battle Field"):
|
13 |
+
gr.Markdown('''## ⚔️ LLM: Large Language Mayhem
|
14 |
+
- Voting should be fair and based on the performance of the models.
|
15 |
+
- No cheating or manipulating the outcomes.
|
16 |
+
- Press 🎲 Random to change the models.
|
17 |
+
- Everything else except the Random button will only clear the screen, model being the same.
|
18 |
+
- Have fun and enjoy the language mayhem!
|
19 |
+
''')
|
20 |
+
with gr.Row():
|
21 |
+
with gr.Accordion("🥷 Warriors", open = False):
|
22 |
+
gr.Dataframe([[model] for model in arena.models], col_count = 1, headers = ["🥷"])
|
23 |
+
with gr.Group():
|
24 |
+
with gr.Row():
|
25 |
+
with gr.Column():
|
26 |
+
chatbox1 = gr.Chatbot(label = "Warrior A", show_copy_button = True)
|
27 |
+
with gr.Column():
|
28 |
+
chatbox2 = gr.Chatbot(label = "Warrior B", show_copy_button = True)
|
29 |
+
textbox = gr.Textbox(show_label = False, placeholder = "👉 Enter your prompt")
|
30 |
+
|
31 |
+
with gr.Row():
|
32 |
+
with gr.Accordion("👆 Vote", open = False):
|
33 |
+
with gr.Row():
|
34 |
+
vote_a = gr.ClearButton([textbox, chatbox1, chatbox2], value = "👈 Warrior A Wins")
|
35 |
+
vote_b = gr.ClearButton([textbox, chatbox1, chatbox2], value = "👉 Warrior B Wins")
|
36 |
+
vote_tie = gr.ClearButton([textbox, chatbox1, chatbox2], value = "🤝 Both Won")
|
37 |
+
|
38 |
+
submit_button = gr.Button("Submit")
|
39 |
+
with gr.Row():
|
40 |
+
new_round = gr.ClearButton( [textbox, chatbox1, chatbox2], value = "🎲New Round🎲")
|
41 |
+
clear = gr.ClearButton([textbox, chatbox1, chatbox2], value = "🧹 Clear")
|
42 |
+
with gr.Row():
|
43 |
+
with gr.Accordion("🔩 Parameters", open = False):
|
44 |
+
temp_slider = gr.Slider(0,1,value = 0.7, step=0.1, label = "Temprature")
|
45 |
+
|
46 |
+
textbox.submit(
|
47 |
+
fn = arena.gen_output,
|
48 |
+
inputs = [temp_slider, textbox],
|
49 |
+
outputs = [chatbox1, chatbox2]
|
50 |
+
)
|
51 |
+
submit_button.click(
|
52 |
+
fn = arena.gen_output,
|
53 |
+
inputs = [temp_slider, textbox],
|
54 |
+
outputs = [chatbox1, chatbox2]
|
55 |
+
)
|
56 |
+
vote_a.click(
|
57 |
+
fn=lambda: score.update(arena.model1, score.df)
|
58 |
+
)
|
59 |
+
vote_b.click(
|
60 |
+
fn = lambda: score.update(arena.model2, score.df)
|
61 |
+
)
|
62 |
+
vote_tie.click(
|
63 |
+
fn = arena.change_models
|
64 |
+
)
|
65 |
+
new_round.click(
|
66 |
+
fn = arena.change_models
|
67 |
+
)
|
68 |
+
clear.click(
|
69 |
+
fn = arena.clear_history
|
70 |
+
)
|
71 |
+
|
72 |
+
with gr.Tab("💯 Score Board") as data_tab:
|
73 |
+
gr.Markdown('''## ⚔️ LLM: Large Language Mayhem
|
74 |
+
- Voting should be fair and based on the performance of the models.
|
75 |
+
- No cheating or manipulating the outcomes.
|
76 |
+
- Click on Generate button to Update the 💯 Scoreboard.
|
77 |
+
''')
|
78 |
+
gr.Interface(
|
79 |
+
fn = score.df_show,
|
80 |
+
inputs = None,
|
81 |
+
outputs=gr.Dataframe(type="pandas", label="Scoreboard", headers = ["","",""]),
|
82 |
+
live = True,
|
83 |
+
allow_flagging = "never",
|
84 |
+
clear_btn = None
|
85 |
+
|
86 |
+
)
|
87 |
+
|
88 |
+
|
89 |
+
app.launch(server_port=7000)
|
prompt_parser.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from LLM import Bot
|
2 |
+
import random
|
3 |
+
|
4 |
+
class Parse_Prompt(Bot):
|
5 |
+
def __init__(self):
|
6 |
+
super().__init__()
|
7 |
+
self.change = True
|
8 |
+
self.model1 = None
|
9 |
+
self.model2 = None
|
10 |
+
self.chat_history_1 = []
|
11 |
+
self.chat_history_2 = []
|
12 |
+
|
13 |
+
def model_init(self):
|
14 |
+
return random.sample(self.models, 2)
|
15 |
+
|
16 |
+
def clear_history(self):
|
17 |
+
self.chat_history_1 = []
|
18 |
+
self.chat_history_2 = []
|
19 |
+
|
20 |
+
def change_models(self):
|
21 |
+
self.clear_history()
|
22 |
+
self.change = True
|
23 |
+
|
24 |
+
def gen_output(self, temp, prompt):
|
25 |
+
if self.change:
|
26 |
+
[self.model1, self.model2] = self.model_init()
|
27 |
+
self.change = False
|
28 |
+
self.chat_history_1.append([prompt, self.response(self.model1, prompt, temp)])
|
29 |
+
self.chat_history_2.append([prompt, self.response(self.model2, prompt, temp)])
|
30 |
+
return self.chat_history_1, self.chat_history_2
|
requirements.txt
ADDED
Binary file (474 Bytes). View file
|
|
scoreboard.csv
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Models,Fights Won
|
2 |
+
gemini-1.0-pro,2
|
3 |
+
gemini-1.5-flash,0
|
4 |
+
gemini-1.5-pro,1
|
5 |
+
01-ai/Yi-1.5-34B-Chat,0
|
6 |
+
google/gemma-1.1-2b-it,5
|
7 |
+
google/gemma-1.1-7b-it,0
|
8 |
+
gemma-7b-it,0
|
9 |
+
llama3-70b-8192,0
|
10 |
+
llama3-8b-8192,2
|
11 |
+
mixtral-8x7b-32768,1
|
scoreboard.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from prompt_parser import Parse_Prompt
|
3 |
+
import pandas as pd
|
4 |
+
|
5 |
+
class Score(Parse_Prompt):
|
6 |
+
def __init__(self):
|
7 |
+
super().__init__()
|
8 |
+
self.file_path = 'scoreboard.csv'
|
9 |
+
self.init_scores()
|
10 |
+
|
11 |
+
def init_scores(self):
|
12 |
+
try:
|
13 |
+
self.df = pd.read_csv(self.file_path)
|
14 |
+
except FileNotFoundError:
|
15 |
+
data = {
|
16 |
+
'Models': self.models,
|
17 |
+
'Fights Won': np.zeros(10, dtype = int)
|
18 |
+
}
|
19 |
+
self.df = pd.DataFrame(data)
|
20 |
+
self.df.to_csv(self.file_path, index=False)
|
21 |
+
|
22 |
+
def update(self, model, df):
|
23 |
+
df.loc[self.df["Models"] == model, 'Fights Won'] += 1
|
24 |
+
df.to_csv(self.file_path, index=False)
|
25 |
+
self.clear_history()
|
26 |
+
|
27 |
+
def df_show(self):
|
28 |
+
return pd.read_csv(self.file_path)
|