Spaces:
Sleeping
Sleeping
File size: 6,198 Bytes
aad220f 328256f aad220f 6391563 328256f aad220f ef9b379 aad220f 328256f aad220f 328256f aad220f 328256f aad220f 328256f 14285d3 6391563 14285d3 aad220f 14285d3 aad220f 328256f 14285d3 328256f 14285d3 328256f 14285d3 328256f 14285d3 aad220f 328256f aad220f 6391563 328256f aad220f 6391563 aad220f 328256f aad220f 328256f aad220f 6391563 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
import time
import pandas as pd
import plotly.express as px
import requests
import streamlit as st
import utils
_ = """
Proteins folded (delta 24hr)
Current proteins folding (24hr)
Average time to fold trend
Refolded proteins (group by run id and pdb id and get unique)
Simulation duration distribution
"""
UPDATE_INTERVAL = 3600
BASE_URL = 'http://143.198.21.86:5001/'
st.title('Folding Subnet Dashboard')
st.markdown('<br>', unsafe_allow_html=True)
@st.cache_data(ttl=UPDATE_INTERVAL)
def fetch_productivity_data():
return requests.get(f'{BASE_URL}/productivity').json()
@st.cache_data(ttl=UPDATE_INTERVAL)
def fetch_throughput_data():
return requests.get(f'{BASE_URL}/throughput').json()
@st.cache_data(ttl=UPDATE_INTERVAL)
def fetch_metagraph_data():
return utils.get_metagraph(time.time() // UPDATE_INTERVAL)
@st.cache_data(ttl=UPDATE_INTERVAL)
def fetch_leaderboard_data(df_m, ntop, entity_choice):
return utils.get_leaderboard(df_m, ntop=ntop, entity_choice=entity_choice)
#### ------ PRODUCTIVITY ------
# Overview of productivity
st.subheader('Productivity overview')
st.info('Productivity metrics show how many proteins have been folded, which is the primary goal of the subnet. Metrics are estimated using weights and biases data combined with heuristics.')
productivity_all = fetch_productivity_data()
completed_jobs = productivity_all['all_time']['total_completed_jobs']
productivity_24h = productivity_all['last_24h']
completed_jobs = pd.DataFrame(completed_jobs)
completed_jobs['last_event_at'] = pd.to_datetime(completed_jobs['updated_at'])
unique_folded = completed_jobs.drop_duplicates(subset=['pdb_id'], keep='first')
unique_folded['last_event_at'] = pd.to_datetime(unique_folded['updated_at'])
m1, m2, m3 = st.columns(3)
m1.metric('Unique proteins folded', f'{len(unique_folded):,.0f}', delta=f'{productivity_24h["unique_folded"]:,.0f} (24h)')
m2.metric('Total jobs completed', f'{len(completed_jobs):,.0f}', delta=f'{productivity_24h["total_completed_jobs"]:,.0f} (24h)')
m3.metric('Total simulations ran', f'{len(completed_jobs)*10:,.0f}', delta=f'{productivity_24h["total_completed_jobs"]*10:,.0f} (24h)')
st.markdown('<br>', unsafe_allow_html=True)
PROD_CHOICES = {
'Total jobs completed': 'total_pdbs',
'Unique proteins folded': 'unique_pdbs',
}
prod_choice_label = st.radio('Select productivity metric', list(PROD_CHOICES.keys()), index=0, horizontal=True)
prod_choice = PROD_CHOICES[prod_choice_label]
PROD_DATA = {
'unique_pdbs': unique_folded,
'total_pdbs': completed_jobs,
}
df = PROD_DATA[prod_choice]
df = df.sort_values(by='last_event_at').reset_index()
# Create a cumulative count column
df['cumulative_jobs'] = df.index + 1
# Plot the cumulative jobs over time
st.plotly_chart(
px.line(df, x='last_event_at', y='cumulative_jobs',
labels={'last_event_at': 'Time', 'cumulative_jobs': prod_choice_label}).update_traces(fill='tozeroy'),
use_container_width=True,
)
st.markdown('<br>', unsafe_allow_html=True)
#### ------ THROUGHPUT ------
st.subheader('Throughput overview')
st.info('Throughput metrics show the total amount of data sent and received by the validators. This is a measure of the network activity and the amount of data that is being processed by the subnet.')
MEM_UNIT = 'GB' #st.radio('Select memory unit', ['TB','GB', 'MB'], index=0, horizontal=True)
throughput = fetch_throughput_data()
data_transferred = throughput['all_time']
data_transferred_24h = throughput['last_24h']
data_df = pd.DataFrame(throughput['data'])
data_df = data_df.sort_values(by='updated_at').reset_index()
data_df['updated_at'] = pd.to_datetime(data_df['updated_at'])
data_df['Total validator data sent'] = data_df['md_inputs_sum'].cumsum()
data_df['Total received data'] = data_df['md_outputs_sum'].cumsum()
m1, m2, m3 = st.columns(3)
m1.metric(f'Total validator data sent ({MEM_UNIT})', f'{data_transferred["validator_sent"]:,.0f}', delta=f'{data_transferred_24h["validator_sent"]:,.0f} (24h)')
m2.metric(f'Total received data ({MEM_UNIT})', f'{data_transferred["miner_sent"]:,.0f}', delta=f'{data_transferred_24h["miner_sent"]:,.0f} (24h)')
m3.metric(f'Total transferred data ({MEM_UNIT})', f'{data_transferred["validator_sent"]+data_transferred["miner_sent"]:,.0f}', delta=f'{data_transferred_24h["validator_sent"]+data_transferred_24h["miner_sent"]:,.0f} (24h)')
st.plotly_chart(
px.line(data_df, x='updated_at', y=['Total validator data sent', 'Total received data'],
labels={'updated_at':'Time', 'value':f'Data Transferred ({MEM_UNIT})', 'variable':'Direction'},
).update_traces(fill='tozeroy').update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
)),
use_container_width=True,
)
st.markdown('<br>', unsafe_allow_html=True)
#### ------ LEADERBOARD ------
st.subheader('Leaderboard')
st.info('The leaderboard shows the top miners by incentive.')
m1, m2 = st.columns(2)
ntop = m1.slider('Number of top miners to display', value=10, min_value=3, max_value=50, step=1)
entity_choice = m2.radio('Select entity', utils.ENTITY_CHOICES, index=0, horizontal=True)
df_m = fetch_metagraph_data()
df_miners = fetch_leaderboard_data(df_m, ntop=ntop, entity_choice=entity_choice)
# hide colorbar and don't show y axis
st.plotly_chart(
px.bar(df_miners, x='I', color='I', hover_name=entity_choice, text=entity_choice if ntop < 20 else None,
labels={'I':'Incentive', 'trust':'Trust', 'stake':'Stake', '_index':'Rank'},
).update_layout(coloraxis_showscale=False, yaxis_visible=False),
use_container_width=True,
)
with st.expander('Show raw metagraph data'):
st.dataframe(df_m)
st.markdown('<br>', unsafe_allow_html=True)
#### ------ LOGGED RUNS ------
# st.subheader('Logged runs')
# st.info('The timeline shows the creation and last event time of each run.')
# st.plotly_chart(
# px.timeline(df, x_start='created_at', x_end='last_event_at', y='username', color='state',
# labels={'created_at':'Created at', 'last_event_at':'Last event at', 'username':''},
# ),
# use_container_width=True
# )
# with st.expander('Show raw run data'):
# st.dataframe(df) |