Spaces:
Sleeping
Sleeping
Sarkosos
commited on
Commit
·
aad220f
1
Parent(s):
acbfa41
initial dashboard push
Browse files- README.md +1 -13
- api.py +147 -0
- app.py +140 -0
- requirements.txt +11 -0
- utils.py +270 -0
README.md
CHANGED
@@ -1,13 +1 @@
|
|
1 |
-
|
2 |
-
title: Sn25
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.36.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
# folding-api
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import atexit
|
3 |
+
import datetime
|
4 |
+
|
5 |
+
from flask import Flask, request, jsonify
|
6 |
+
from apscheduler.schedulers.background import BackgroundScheduler
|
7 |
+
|
8 |
+
import utils
|
9 |
+
|
10 |
+
app = Flask(__name__)
|
11 |
+
|
12 |
+
|
13 |
+
# Global variables (saves time on loading data)
|
14 |
+
state_vars = None
|
15 |
+
reload_timestamp = datetime.datetime.now().strftime('%D %T')
|
16 |
+
|
17 |
+
|
18 |
+
def load_data():
|
19 |
+
"""
|
20 |
+
Reload the state variables
|
21 |
+
"""
|
22 |
+
global state_vars, reload_timestamp
|
23 |
+
state_vars = utils.load_state_vars()
|
24 |
+
|
25 |
+
reload_timestamp = datetime.datetime.now().strftime('%D %T')
|
26 |
+
|
27 |
+
print(f'Reloaded data at {reload_timestamp}')
|
28 |
+
|
29 |
+
|
30 |
+
def start_scheduler():
|
31 |
+
scheduler = BackgroundScheduler()
|
32 |
+
scheduler.add_job(func=load_data, trigger="interval", seconds=60*30)
|
33 |
+
scheduler.start()
|
34 |
+
|
35 |
+
# Shut down the scheduler when exiting the app
|
36 |
+
atexit.register(lambda: scheduler.shutdown())
|
37 |
+
|
38 |
+
|
39 |
+
@app.route('/', methods=['GET'])
|
40 |
+
def home():
|
41 |
+
return "Welcome to the Bittensor Protein Folding Leaderboard API!"
|
42 |
+
|
43 |
+
|
44 |
+
@app.route('/updated', methods=['GET'])
|
45 |
+
def updated():
|
46 |
+
return reload_timestamp
|
47 |
+
|
48 |
+
|
49 |
+
@app.route('/data', methods=['GET'])
|
50 |
+
@app.route('/data/<period>', methods=['GET'])
|
51 |
+
def data(period=None):
|
52 |
+
"""
|
53 |
+
Get the productivity metrics
|
54 |
+
"""
|
55 |
+
assert period in ('24h', None), f"Invalid period: {period}. Must be '24h' or None."
|
56 |
+
df = state_vars["dataframe_24h"] if period == '24h' else state_vars["dataframe"]
|
57 |
+
return jsonify(
|
58 |
+
df.astype(str).to_dict(orient='records')
|
59 |
+
)
|
60 |
+
|
61 |
+
@app.route('/productivity', methods=['GET'])
|
62 |
+
@app.route('/productivity/<period>', methods=['GET'])
|
63 |
+
def productivity_metrics(period=None):
|
64 |
+
"""
|
65 |
+
Get the productivity metrics
|
66 |
+
"""
|
67 |
+
|
68 |
+
assert period in ('24h', None), f"Invalid period: {period}. Must be '24h' or None."
|
69 |
+
df = state_vars["dataframe_24h"] if period == '24h' else state_vars["dataframe"]
|
70 |
+
return jsonify(
|
71 |
+
utils.get_productivity(df)
|
72 |
+
)
|
73 |
+
|
74 |
+
|
75 |
+
@app.route('/throughput', methods=['GET'])
|
76 |
+
@app.route('/throughput/<period>', methods=['GET'])
|
77 |
+
def throughput_metrics(period=None):
|
78 |
+
"""
|
79 |
+
Get the throughput metrics
|
80 |
+
"""
|
81 |
+
assert period in ('24h', None), f"Invalid period: {period}. Must be '24h' or None."
|
82 |
+
df = state_vars["dataframe_24h"] if period == '24h' else state_vars["dataframe"]
|
83 |
+
return jsonify(utils.get_data_transferred(df))
|
84 |
+
|
85 |
+
|
86 |
+
@app.route('/metagraph', methods=['GET'])
|
87 |
+
def metagraph():
|
88 |
+
"""
|
89 |
+
Get the metagraph data
|
90 |
+
Returns:
|
91 |
+
- metagraph_data: List of dicts (from pandas DataFrame)
|
92 |
+
"""
|
93 |
+
|
94 |
+
df_m = state_vars["metagraph"]
|
95 |
+
|
96 |
+
return jsonify(
|
97 |
+
df_m.to_dict(orient='records')
|
98 |
+
)
|
99 |
+
|
100 |
+
@app.route('/leaderboard', methods=['GET'])
|
101 |
+
@app.route('/leaderboard/<entity>', methods=['GET'])
|
102 |
+
@app.route('/leaderboard/<entity>/<ntop>', methods=['GET'])
|
103 |
+
def leaderboard(entity='identity',ntop=10):
|
104 |
+
"""
|
105 |
+
Get the leaderboard data
|
106 |
+
Returns:
|
107 |
+
- leaderboard_data: List of dicts (from pandas DataFrame)
|
108 |
+
"""
|
109 |
+
|
110 |
+
assert entity in utils.ENTITY_CHOICES, f"Invalid entity choice: {entity}"
|
111 |
+
|
112 |
+
df_miners = utils.get_leaderboard(
|
113 |
+
state_vars["metagraph"],
|
114 |
+
ntop=int(ntop),
|
115 |
+
entity_choice=entity
|
116 |
+
)
|
117 |
+
|
118 |
+
return jsonify(
|
119 |
+
df_miners.to_dict(orient='records')
|
120 |
+
)
|
121 |
+
|
122 |
+
@app.route('/validator', methods=['GET'])
|
123 |
+
def validator():
|
124 |
+
"""
|
125 |
+
Get the validator data
|
126 |
+
Returns:
|
127 |
+
- validator_data: List of dicts (from pandas DataFrame)
|
128 |
+
"""
|
129 |
+
df_m = state_vars["metagraph"]
|
130 |
+
df_validators = df_m.loc[df_m.validator_trust > 0]
|
131 |
+
|
132 |
+
return jsonify(
|
133 |
+
df_validators.to_dict(orient='records')
|
134 |
+
)
|
135 |
+
|
136 |
+
|
137 |
+
if __name__ == '__main__':
|
138 |
+
|
139 |
+
load_data()
|
140 |
+
start_scheduler()
|
141 |
+
|
142 |
+
app.run(host='0.0.0.0', port=5001, debug=True)
|
143 |
+
|
144 |
+
|
145 |
+
# to test locally
|
146 |
+
# curl -X GET http://0.0.0.0:5001/data
|
147 |
+
|
app.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import pandas as pd
|
3 |
+
import streamlit as st
|
4 |
+
import plotly.express as px
|
5 |
+
|
6 |
+
import utils
|
7 |
+
|
8 |
+
_ = """
|
9 |
+
Proteins folded (delta 24hr)
|
10 |
+
Current proteins folding (24hr)
|
11 |
+
Average time to fold trend
|
12 |
+
Refolded proteins (group by run id and pdb id and get unique)
|
13 |
+
Simulation duration distribution
|
14 |
+
"""
|
15 |
+
|
16 |
+
UPDATE_INTERVAL = 3600
|
17 |
+
|
18 |
+
|
19 |
+
st.title('Folding Subnet Dashboard')
|
20 |
+
st.markdown('<br>', unsafe_allow_html=True)
|
21 |
+
|
22 |
+
# reload data periodically
|
23 |
+
df = utils.build_data(time.time()//UPDATE_INTERVAL)
|
24 |
+
st.toast(f'Loaded {len(df)} runs')
|
25 |
+
|
26 |
+
# TODO: fix the factor for 24 hours ago
|
27 |
+
runs_alive_24h_ago = (df.last_event_at > pd.Timestamp.now() - pd.Timedelta('1d'))
|
28 |
+
df_24h = df.loc[runs_alive_24h_ago]
|
29 |
+
# correction factor to account for the fact that the data straddles the 24h boundary
|
30 |
+
# correction factor is based on the fraction of the run which occurred in the last 24h
|
31 |
+
# factor = (df_24h.last_event_at - pd.Timestamp.now() + pd.Timedelta('1d')) / pd.Timedelta('1d')
|
32 |
+
|
33 |
+
|
34 |
+
#### ------ PRODUCTIVITY ------
|
35 |
+
|
36 |
+
# Overview of productivity
|
37 |
+
st.subheader('Productivity overview')
|
38 |
+
st.info('Productivity metrics show how many proteins have been folded, which is the primary goal of the subnet. Metrics are estimated using weights and biases data combined with heuristics.')
|
39 |
+
|
40 |
+
productivity = utils.get_productivity(df)
|
41 |
+
productivity_24h = utils.get_productivity(df_24h)
|
42 |
+
|
43 |
+
|
44 |
+
m1, m2, m3 = st.columns(3)
|
45 |
+
m1.metric('Unique proteins folded', f'{productivity.get("unique_folded"):,.0f}', delta=f'{productivity_24h.get("unique_folded"):,.0f} (24h)')
|
46 |
+
m2.metric('Total proteins folded', f'{productivity.get("total_simulations"):,.0f}', delta=f'{productivity_24h.get("total_simulations"):,.0f} (24h)')
|
47 |
+
m3.metric('Total simulation steps', f'{productivity.get("total_md_steps"):,.0f}', delta=f'{productivity_24h.get("total_md_steps"):,.0f} (24h)')
|
48 |
+
|
49 |
+
st.markdown('<br>', unsafe_allow_html=True)
|
50 |
+
|
51 |
+
time_binned_data = df.set_index('last_event_at').groupby(pd.Grouper(freq='12h'))
|
52 |
+
|
53 |
+
PROD_CHOICES = {
|
54 |
+
'Unique proteins folded': 'unique_pdbs',
|
55 |
+
'Total simulations': 'total_pdbs',
|
56 |
+
'Total simulation steps': 'total_md_steps',
|
57 |
+
}
|
58 |
+
prod_choice_label = st.radio('Select productivity metric', list(PROD_CHOICES.keys()), index=0, horizontal=True)
|
59 |
+
prod_choice = PROD_CHOICES[prod_choice_label]
|
60 |
+
steps_running_total = time_binned_data[prod_choice].sum().cumsum()
|
61 |
+
st.plotly_chart(
|
62 |
+
# add fillgradient to make it easier to see the trend
|
63 |
+
px.area(steps_running_total, y=prod_choice,
|
64 |
+
labels={'last_event_at':'', prod_choice: prod_choice_label},
|
65 |
+
).update_traces(fill='tozeroy'),
|
66 |
+
use_container_width=True,
|
67 |
+
)
|
68 |
+
|
69 |
+
st.markdown('<br>', unsafe_allow_html=True)
|
70 |
+
|
71 |
+
|
72 |
+
#### ------ THROUGHPUT ------
|
73 |
+
st.subheader('Throughput overview')
|
74 |
+
|
75 |
+
st.info('Throughput metrics show the total amount of data sent and received by the validators. This is a measure of the network activity and the amount of data that is being processed by the subnet.')
|
76 |
+
|
77 |
+
MEM_UNIT = 'GB' #st.radio('Select memory unit', ['TB','GB', 'MB'], index=0, horizontal=True)
|
78 |
+
|
79 |
+
data_transferred = utils.get_data_transferred(df,unit=MEM_UNIT)
|
80 |
+
data_transferred_24h = utils.get_data_transferred(df_24h, unit=MEM_UNIT)
|
81 |
+
|
82 |
+
m1, m2, m3 = st.columns(3)
|
83 |
+
m1.metric(f'Total sent data ({MEM_UNIT})', f'{data_transferred.get("sent"):,.0f}', delta=f'{data_transferred_24h.get("sent"):,.0f} (24h)')
|
84 |
+
m2.metric(f'Total received data ({MEM_UNIT})', f'{data_transferred.get("received"):,.0f}', delta=f'{data_transferred_24h.get("received"):,.0f} (24h)')
|
85 |
+
m3.metric(f'Total transferred data ({MEM_UNIT})', f'{data_transferred.get("total"):,.0f}', delta=f'{data_transferred_24h.get("total"):,.0f} (24h)')
|
86 |
+
|
87 |
+
|
88 |
+
IO_CHOICES = {'total_data_sent':'Sent', 'total_data_received':'Received'}
|
89 |
+
io_running_total = time_binned_data[list(IO_CHOICES.keys())].sum().rename(columns=IO_CHOICES).cumsum().melt(ignore_index=False)
|
90 |
+
io_running_total['value'] = io_running_total['value'].apply(utils.convert_unit, args=(utils.BASE_UNITS, MEM_UNIT))
|
91 |
+
|
92 |
+
st.plotly_chart(
|
93 |
+
px.area(io_running_total, y='value', color='variable',
|
94 |
+
labels={'last_event_at':'', 'value': f'Data transferred ({MEM_UNIT})', 'variable':'Direction'},
|
95 |
+
),
|
96 |
+
use_container_width=True,
|
97 |
+
)
|
98 |
+
|
99 |
+
st.markdown('<br>', unsafe_allow_html=True)
|
100 |
+
|
101 |
+
|
102 |
+
#### ------ LEADERBOARD ------
|
103 |
+
|
104 |
+
st.subheader('Leaderboard')
|
105 |
+
st.info('The leaderboard shows the top miners by incentive.')
|
106 |
+
m1, m2 = st.columns(2)
|
107 |
+
ntop = m1.slider('Number of top miners to display', value=10, min_value=3, max_value=50, step=1)
|
108 |
+
entity_choice = m2.radio('Select entity', utils.ENTITY_CHOICES, index=0, horizontal=True)
|
109 |
+
|
110 |
+
df_m = utils.get_metagraph(time.time()//UPDATE_INTERVAL)
|
111 |
+
df_miners = utils.get_leaderboard(df_m, ntop=ntop, entity_choice=entity_choice)
|
112 |
+
|
113 |
+
# hide colorbar and don't show y axis
|
114 |
+
st.plotly_chart(
|
115 |
+
px.bar(df_miners, x='I', color='I', hover_name=entity_choice, text=entity_choice if ntop < 20 else None,
|
116 |
+
labels={'I':'Incentive', 'trust':'Trust', 'stake':'Stake', '_index':'Rank'},
|
117 |
+
).update_layout(coloraxis_showscale=False, yaxis_visible=False),
|
118 |
+
use_container_width=True,
|
119 |
+
)
|
120 |
+
|
121 |
+
|
122 |
+
with st.expander('Show raw metagraph data'):
|
123 |
+
st.dataframe(df_m)
|
124 |
+
|
125 |
+
st.markdown('<br>', unsafe_allow_html=True)
|
126 |
+
|
127 |
+
|
128 |
+
#### ------ LOGGED RUNS ------
|
129 |
+
|
130 |
+
st.subheader('Logged runs')
|
131 |
+
st.info('The timeline shows the creation and last event time of each run.')
|
132 |
+
st.plotly_chart(
|
133 |
+
px.timeline(df, x_start='created_at', x_end='last_event_at', y='username', color='state',
|
134 |
+
labels={'created_at':'Created at', 'last_event_at':'Last event at', 'username':''},
|
135 |
+
),
|
136 |
+
use_container_width=True
|
137 |
+
)
|
138 |
+
|
139 |
+
with st.expander('Show raw run data'):
|
140 |
+
st.dataframe(df)
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
bittensor
|
2 |
+
requests
|
3 |
+
wandb
|
4 |
+
python-dotenv
|
5 |
+
APScheduler
|
6 |
+
streamlit
|
7 |
+
nbformat
|
8 |
+
plotly
|
9 |
+
pandas
|
10 |
+
flask
|
11 |
+
|
utils.py
ADDED
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import tqdm
|
3 |
+
import time
|
4 |
+
import wandb
|
5 |
+
import streamlit as st
|
6 |
+
import pandas as pd
|
7 |
+
import bittensor as bt
|
8 |
+
|
9 |
+
|
10 |
+
# TODO: Store the runs dataframe (as in sn1 dashboard) and top up with the ones created since the last snapshot
|
11 |
+
# TODO: Store relevant wandb data in a database for faster access
|
12 |
+
|
13 |
+
# TODO: filter out netuid 141(?)
|
14 |
+
|
15 |
+
MIN_STEPS = 12 # minimum number of steps in wandb run in order to be worth analyzing
|
16 |
+
MAX_RUNS = 100#0000
|
17 |
+
NETUID = 25
|
18 |
+
BASE_PATH = 'opentensor-dev/folding-validators'
|
19 |
+
NETWORK = 'finney'
|
20 |
+
KEYS = None
|
21 |
+
ABBREV_CHARS = 8
|
22 |
+
ENTITY_CHOICES = ('identity', 'hotkey', 'coldkey')
|
23 |
+
|
24 |
+
PDBS_PER_RUN_STEP = 0.083
|
25 |
+
AVG_MD_STEPS = 30_000
|
26 |
+
BASE_UNITS = 'MB'
|
27 |
+
|
28 |
+
api = wandb.Api(timeout=120)
|
29 |
+
|
30 |
+
IDENTITIES = {
|
31 |
+
'5F4tQyWrhfGVcNhoqeiNsR6KjD4wMZ2kfhLj4oHYuyHbZAc3': 'opentensor',
|
32 |
+
'5Hddm3iBFD2GLT5ik7LZnT3XJUnRnN8PoeCFgGQgawUVKNm8': 'taostats',
|
33 |
+
'5HEo565WAy4Dbq3Sv271SAi7syBSofyfhhwRNjFNSM2gP9M2': 'foundry',
|
34 |
+
'5HK5tp6t2S59DywmHRWPBVJeJ86T61KjurYqeooqj8sREpeN': 'bittensor-guru',
|
35 |
+
'5FFApaS75bv5pJHfAp2FVLBj9ZaXuFDjEypsaBNc1wCfe52v': 'roundtable-21',
|
36 |
+
'5EhvL1FVkQPpMjZX4MAADcW42i3xPSF1KiCpuaxTYVr28sux': 'tao-validator',
|
37 |
+
'5FKstHjZkh4v3qAMSBa1oJcHCLjxYZ8SNTSz1opTv4hR7gVB': 'datura',
|
38 |
+
'5DvTpiniW9s3APmHRYn8FroUWyfnLtrsid5Mtn5EwMXHN2ed': 'first-tensor',
|
39 |
+
'5HbLYXUBy1snPR8nfioQ7GoA9x76EELzEq9j7F32vWUQHm1x': 'tensorplex',
|
40 |
+
'5CsvRJXuR955WojnGMdok1hbhffZyB4N5ocrv82f3p5A2zVp': 'owl-ventures',
|
41 |
+
'5CXRfP2ekFhe62r7q3vppRajJmGhTi7vwvb2yr79jveZ282w': 'rizzo',
|
42 |
+
'5HNQURvmjjYhTSksi8Wfsw676b4owGwfLR2BFAQzG7H3HhYf': 'neural-internet'
|
43 |
+
}
|
44 |
+
|
45 |
+
EXTRACTORS = {
|
46 |
+
'state': lambda x: x.state,
|
47 |
+
'run_id': lambda x: x.id,
|
48 |
+
'user': lambda x: x.user.name[:16],
|
49 |
+
'username': lambda x: x.user.username[:16],
|
50 |
+
'created_at': lambda x: pd.Timestamp(x.created_at),
|
51 |
+
'last_event_at': lambda x: pd.Timestamp(x.summary.get('_timestamp'), unit='s'),
|
52 |
+
|
53 |
+
'netuid': lambda x: x.config.get('netuid'),
|
54 |
+
'mock': lambda x: x.config.get('neuron').get('mock'),
|
55 |
+
'sample_size': lambda x: x.config.get('neuron').get('sample_size'),
|
56 |
+
'queue_size': lambda x: x.config.get('neuron').get('queue_size'),
|
57 |
+
'timeout': lambda x: x.config.get('neuron').get('timeout'),
|
58 |
+
'update_interval': lambda x: x.config.get('neuron').get('update_interval'),
|
59 |
+
'epoch_length': lambda x: x.config.get('neuron').get('epoch_length'),
|
60 |
+
'disable_set_weights': lambda x: x.config.get('neuron').get('disable_set_weights'),
|
61 |
+
|
62 |
+
# This stuff is from the last logged event
|
63 |
+
'num_steps': lambda x: x.summary.get('_step'),
|
64 |
+
'runtime': lambda x: x.summary.get('_runtime'),
|
65 |
+
'init_energy': lambda x: x.summary.get('init_energy'),
|
66 |
+
'best_energy': lambda x: x.summary.get('best_loss'),
|
67 |
+
'pdb_id': lambda x: x.summary.get('pdb_id'),
|
68 |
+
'pdb_updates': lambda x: x.summary.get('updated_count'),
|
69 |
+
'total_returned_sizes': lambda x: get_total_file_sizes(x),
|
70 |
+
'total_sent_sizes': lambda x: get_total_md_input_sizes(x),
|
71 |
+
|
72 |
+
'pdb_atoms': lambda x: get_pdb_complexity(x),
|
73 |
+
|
74 |
+
'version': lambda x: x.tags[0],
|
75 |
+
'spec_version': lambda x: x.tags[1],
|
76 |
+
'vali_hotkey': lambda x: x.tags[2],
|
77 |
+
|
78 |
+
# System metrics
|
79 |
+
'disk_read': lambda x: x.system_metrics.get('system.disk.in'),
|
80 |
+
'disk_write': lambda x: x.system_metrics.get('system.disk.out'),
|
81 |
+
# Really slow stuff below
|
82 |
+
# 'started_at': lambda x: x.metadata.get('startedAt'),
|
83 |
+
# 'disk_used': lambda x: x.metadata.get('disk').get('/').get('used'),
|
84 |
+
# 'commit': lambda x: x.metadata.get('git').get('commit')
|
85 |
+
}
|
86 |
+
|
87 |
+
def get_pdb_complexity(run, field='ATOM', preprocess=True):
|
88 |
+
data = run.summary.get('pdb_complexity')
|
89 |
+
|
90 |
+
if not isinstance(data, list) or len(data)==0:
|
91 |
+
return None
|
92 |
+
data = data[0]
|
93 |
+
|
94 |
+
counts = data.get(field)
|
95 |
+
if counts is not None:
|
96 |
+
return counts
|
97 |
+
|
98 |
+
counts = 0
|
99 |
+
for key in data.keys():
|
100 |
+
if key.startswith(field):
|
101 |
+
counts+=data.get(key)
|
102 |
+
return counts
|
103 |
+
|
104 |
+
def convert_unit(value, from_unit, to_unit):
|
105 |
+
"""Converts a value from one unit to another
|
106 |
+
|
107 |
+
example:
|
108 |
+
convert_unit(1024, 'KB', 'MB') -> 1
|
109 |
+
convert_unit(1024, 'MB', 'KB') -> 1048576
|
110 |
+
"""
|
111 |
+
units = ['B', 'KB','MB','GB','TB']
|
112 |
+
assert from_unit.upper() in units, f'From unit {from_unit!r} not in {units}'
|
113 |
+
assert to_unit.upper() in units, f'To unit {to_unit!r} not in {units}'
|
114 |
+
|
115 |
+
factor = 1024**(units.index(from_unit) - units.index(to_unit))
|
116 |
+
# print(f'Converting from {from_unit!r} to {to_unit!r}, factor: {factor}')
|
117 |
+
return value * factor
|
118 |
+
|
119 |
+
def get_total_file_sizes(run):
|
120 |
+
"""returns total size of byte strings in bytes"""
|
121 |
+
size_bytes = sum(size for sizes in run.summary.get('response_returned_files_sizes',[[]]) for size in sizes if sizes)
|
122 |
+
return convert_unit(size_bytes, from_unit='B', to_unit=BASE_UNITS)
|
123 |
+
|
124 |
+
def get_total_md_input_sizes(run):
|
125 |
+
"""returns total size of byte strings in bytes"""
|
126 |
+
size_bytes = sum(run.summary.get('md_inputs_sizes',[]))
|
127 |
+
return convert_unit(size_bytes, from_unit='B', to_unit=BASE_UNITS)
|
128 |
+
|
129 |
+
|
130 |
+
def get_data_transferred(df, unit='GB'):
|
131 |
+
|
132 |
+
factor = convert_unit(1, from_unit=BASE_UNITS, to_unit=unit)
|
133 |
+
sent = df.total_data_sent.sum()
|
134 |
+
received = df.total_data_received.sum()
|
135 |
+
return {
|
136 |
+
'sent':sent * factor,
|
137 |
+
'received':received * factor,
|
138 |
+
'total': (sent + received) * factor,
|
139 |
+
'read':df.disk_read.sum() * factor,
|
140 |
+
'write':df.disk_write.sum() * factor,
|
141 |
+
}
|
142 |
+
|
143 |
+
|
144 |
+
def get_productivity(df):
|
145 |
+
|
146 |
+
# Estimate the number of unique pdbs folded using our heuristic
|
147 |
+
unique_folded = df.unique_pdbs.sum().round()
|
148 |
+
# Estimate the total number of simulations completed using our heuristic
|
149 |
+
total_simulations = df.total_pdbs.sum().round()
|
150 |
+
# Estimate the total number of simulation steps completed using our heuristic
|
151 |
+
total_md_steps = df.total_md_steps.sum().round()
|
152 |
+
|
153 |
+
return {
|
154 |
+
'unique_folded': unique_folded,
|
155 |
+
'total_simulations': total_simulations,
|
156 |
+
'total_md_steps': total_md_steps,
|
157 |
+
}
|
158 |
+
|
159 |
+
def get_leaderboard(df, ntop=10, entity_choice='identity'):
|
160 |
+
|
161 |
+
df = df.loc[df.validator_permit==False]
|
162 |
+
df.index = range(df.shape[0])
|
163 |
+
return df.groupby(entity_choice).I.sum().sort_values().reset_index().tail(ntop)
|
164 |
+
|
165 |
+
@st.cache_data()
|
166 |
+
def get_metagraph(time):
|
167 |
+
print(f'Loading metagraph with time {time}')
|
168 |
+
subtensor = bt.subtensor(network=NETWORK)
|
169 |
+
m = subtensor.metagraph(netuid=NETUID)
|
170 |
+
meta_cols = ['I','stake','trust','validator_trust','validator_permit','C','R','E','dividends','last_update']
|
171 |
+
|
172 |
+
df_m = pd.DataFrame({k: getattr(m, k) for k in meta_cols})
|
173 |
+
df_m['uid'] = range(m.n.item())
|
174 |
+
df_m['hotkey'] = list(map(lambda a: a.hotkey, m.axons))
|
175 |
+
df_m['coldkey'] = list(map(lambda a: a.coldkey, m.axons))
|
176 |
+
df_m['ip'] = list(map(lambda a: a.ip, m.axons))
|
177 |
+
df_m['port'] = list(map(lambda a: a.port, m.axons))
|
178 |
+
df_m['coldkey'] = df_m.coldkey.str[:ABBREV_CHARS]
|
179 |
+
df_m['hotkey'] = df_m.hotkey.str[:ABBREV_CHARS]
|
180 |
+
df_m['identity'] = df_m.apply(lambda x: f'{x.hotkey} @ uid {x.uid}', axis=1)
|
181 |
+
return df_m
|
182 |
+
|
183 |
+
|
184 |
+
@st.cache_data()
|
185 |
+
def load_run(run_path, keys=KEYS):
|
186 |
+
|
187 |
+
print('Loading run:', run_path)
|
188 |
+
run = api.run(run_path)
|
189 |
+
df = pd.DataFrame(list(run.scan_history(keys=keys)))
|
190 |
+
for col in ['updated_at', 'best_loss_at', 'created_at']:
|
191 |
+
if col in df.columns:
|
192 |
+
df[col] = pd.to_datetime(df[col])
|
193 |
+
print(f'+ Loaded {len(df)} records')
|
194 |
+
return df
|
195 |
+
|
196 |
+
@st.cache_data(show_spinner=False)
|
197 |
+
def build_data(timestamp=None, path=BASE_PATH, min_steps=MIN_STEPS, use_cache=True):
|
198 |
+
|
199 |
+
save_path = '_saved_runs.csv'
|
200 |
+
filters = {}
|
201 |
+
df = pd.DataFrame()
|
202 |
+
# Load the last saved runs so that we only need to update the new ones
|
203 |
+
if use_cache and os.path.exists(save_path):
|
204 |
+
df = pd.read_csv(save_path)
|
205 |
+
df['created_at'] = pd.to_datetime(df['created_at'])
|
206 |
+
df['last_event_at'] = pd.to_datetime(df['last_event_at'])
|
207 |
+
|
208 |
+
timestamp_str = df['last_event_at'].max().isoformat()
|
209 |
+
filters.update({'updated_at': {'$gte': timestamp_str}})
|
210 |
+
|
211 |
+
progress = st.progress(0, text='Loading data')
|
212 |
+
|
213 |
+
runs = api.runs(path, filters=filters)
|
214 |
+
|
215 |
+
run_data = []
|
216 |
+
n_events = 0
|
217 |
+
for i, run in enumerate(tqdm.tqdm(runs, total=len(runs))):
|
218 |
+
num_steps = run.summary.get('_step',0)
|
219 |
+
if num_steps<min_steps:
|
220 |
+
continue
|
221 |
+
n_events += num_steps
|
222 |
+
prog_msg = f'Loading data {i/len(runs)*100:.0f}%, {n_events:,.0f} events)'
|
223 |
+
progress.progress(i/len(runs),text=f'{prog_msg}... **downloading** `{os.path.join(*run.path)}`')
|
224 |
+
|
225 |
+
run_data.append(run)
|
226 |
+
|
227 |
+
progress.empty()
|
228 |
+
|
229 |
+
df_new = pd.DataFrame([{k: func(run) for k, func in EXTRACTORS.items()} for run in tqdm.tqdm(run_data, total=len(run_data))])
|
230 |
+
df = pd.concat([df, df_new], ignore_index=True)
|
231 |
+
df['duration'] = (df.last_event_at - df.created_at).round('s')
|
232 |
+
df['identity'] = df['vali_hotkey'].map(IDENTITIES).fillna('unknown')
|
233 |
+
df['vali_hotkey'] = df['vali_hotkey'].str[:ABBREV_CHARS]
|
234 |
+
|
235 |
+
# Estimate the number of unique pdbs in a run as a function of the steps in the run
|
236 |
+
df['unique_pdbs'] = df['num_steps'] * PDBS_PER_RUN_STEP
|
237 |
+
df['total_pdbs'] = df['unique_pdbs'] * df['sample_size']
|
238 |
+
# Estimate the number of md steps as the average per simulation multiplied by our estimate of total sims
|
239 |
+
df['total_md_steps'] = df['total_pdbs'] * AVG_MD_STEPS
|
240 |
+
|
241 |
+
df['total_data_sent'] = df['total_sent_sizes'] * df['num_steps']
|
242 |
+
df['total_data_received'] = df['total_returned_sizes'] * df['num_steps']
|
243 |
+
|
244 |
+
df.to_csv(save_path, index=False)
|
245 |
+
return df
|
246 |
+
|
247 |
+
|
248 |
+
def load_state_vars():
|
249 |
+
UPDATE_INTERVAL = 600
|
250 |
+
|
251 |
+
df = build_data(time.time()//UPDATE_INTERVAL)
|
252 |
+
runs_alive_24h_ago = (df.last_event_at > pd.Timestamp.now() - pd.Timedelta('1d'))
|
253 |
+
df_24h = df.loc[runs_alive_24h_ago]
|
254 |
+
|
255 |
+
df_m = get_metagraph(time.time()//UPDATE_INTERVAL)
|
256 |
+
|
257 |
+
return {
|
258 |
+
'dataframe': df,
|
259 |
+
'dataframe_24h': df_24h,
|
260 |
+
'metagraph': df_m,
|
261 |
+
}
|
262 |
+
|
263 |
+
|
264 |
+
if __name__ == '__main__':
|
265 |
+
|
266 |
+
print('Loading runs')
|
267 |
+
df = load_runs()
|
268 |
+
|
269 |
+
df.to_csv('test_wandb_data.csv', index=False)
|
270 |
+
print(df)
|