Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Merge pull request #2 from macrocosm-os/update-api
Browse files- .gitignore +3 -0
- README.md +2 -2
- app.py +24 -16
- competitions.py +20 -0
- requirements.txt +2 -1
- utils.py +206 -90
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
.venv
|
2 |
+
__pycache__/
|
3 |
+
.env
|
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
colorFrom: purple
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
|
|
1 |
---
|
2 |
+
title: Finetuning subnet
|
3 |
+
emoji: :em
|
4 |
colorFrom: purple
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
app.py
CHANGED
@@ -2,25 +2,26 @@
|
|
2 |
|
3 |
import os
|
4 |
import datetime
|
|
|
5 |
import gradio as gr
|
6 |
|
7 |
from dotenv import load_dotenv
|
8 |
from huggingface_hub import HfApi
|
9 |
from apscheduler.schedulers.background import BackgroundScheduler
|
10 |
|
|
|
11 |
import utils
|
12 |
|
13 |
FONT = (
|
14 |
"""<link href="https://fonts.cdnfonts.com/css/jmh-typewriter" rel="stylesheet">"""
|
15 |
)
|
16 |
-
TITLE = """<h1 align="center" id="space-title" class="typewriter">Subnet
|
17 |
-
HEADER = """<h2 align="center" class="typewriter"><a href="https://github.com/macrocosm-os/
|
18 |
-
|
19 |
EVALUATION_DETAILS = """<ul><li><b>Name:</b> the 馃 Hugging Face model name (click to go to the model card)</li><li><b>Rewards / Day:</b> the expected rewards per day based on current ranking.</li><li><b>Last Average Loss:</b> the last loss value on the evaluation data for the model as calculated by a validator (lower is better)</li><li><b>UID:</b> the Bittensor UID of the miner</li><li><b>Block:</b> the Bittensor block that the model was submitted in</li></ul><br/>More stats on <a href="https://taostats.io/subnets/netuid-9/" target="_blank">taostats</a>."""
|
20 |
EVALUATION_HEADER = """<h3 align="center">Shows the latest internal evaluation statistics as calculated by the Opentensor validator</h3>"""
|
21 |
|
22 |
-
|
23 |
-
HF_REPO_ID = "macrocosm-os/pretraining-leaderboard"
|
24 |
SECONDS_PER_BLOCK = 12
|
25 |
|
26 |
load_dotenv()
|
@@ -68,6 +69,7 @@ def main():
|
|
68 |
# TODO: Re-enable once ""SubtensorModule.BlocksSinceEpoch" not found" issue is resolved.
|
69 |
# gr.HTML(value=get_next_update_div(current_block, next_epoch_block))
|
70 |
|
|
|
71 |
gr.Label(
|
72 |
value={
|
73 |
f"{c.namespace}/{c.name} ({c.commit[0:8]}) 路 (蟿{round(c.emission, 2):,})": c.incentive
|
@@ -85,28 +87,34 @@ def main():
|
|
85 |
with gr.Accordion("Evaluation Stats"):
|
86 |
gr.HTML(EVALUATION_HEADER)
|
87 |
show_stale = gr.Checkbox(label="Show Stale", interactive=True)
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
96 |
gr.HTML(EVALUATION_DETAILS)
|
97 |
show_stale.change(
|
98 |
lambda stale: utils.leaderboard_data(model_data, scores, stale),
|
99 |
inputs=[show_stale],
|
100 |
-
outputs=
|
101 |
)
|
102 |
|
|
|
103 |
gr.LinePlot(
|
104 |
utils.get_losses_over_time(vali_runs),
|
105 |
x="timestamp",
|
106 |
x_title="Date",
|
107 |
-
y="
|
108 |
y_title="Average Loss",
|
109 |
-
tooltip="
|
110 |
interactive=True,
|
111 |
visible=True,
|
112 |
width=1024,
|
|
|
2 |
|
3 |
import os
|
4 |
import datetime
|
5 |
+
from typing import Dict
|
6 |
import gradio as gr
|
7 |
|
8 |
from dotenv import load_dotenv
|
9 |
from huggingface_hub import HfApi
|
10 |
from apscheduler.schedulers.background import BackgroundScheduler
|
11 |
|
12 |
+
import competitions
|
13 |
import utils
|
14 |
|
15 |
FONT = (
|
16 |
"""<link href="https://fonts.cdnfonts.com/css/jmh-typewriter" rel="stylesheet">"""
|
17 |
)
|
18 |
+
TITLE = """<h1 align="center" id="space-title" class="typewriter">Finetuning Subnet Leaderboard</h1>"""
|
19 |
+
HEADER = """<h2 align="center" class="typewriter"><a href="https://github.com/macrocosm-os/finetuning" target="_blank">Finetuning</a> is a <a href="https://bittensor.com/" target="_blank">Bittensor</a> subnet that rewards miners for producing finetuned models in defined competitions. The model with the best head-to-head score in each competition receive a steady emission of TAO.</h3>"""
|
20 |
+
# TODO: Update links once subnet is regged.
|
21 |
EVALUATION_DETAILS = """<ul><li><b>Name:</b> the 馃 Hugging Face model name (click to go to the model card)</li><li><b>Rewards / Day:</b> the expected rewards per day based on current ranking.</li><li><b>Last Average Loss:</b> the last loss value on the evaluation data for the model as calculated by a validator (lower is better)</li><li><b>UID:</b> the Bittensor UID of the miner</li><li><b>Block:</b> the Bittensor block that the model was submitted in</li></ul><br/>More stats on <a href="https://taostats.io/subnets/netuid-9/" target="_blank">taostats</a>."""
|
22 |
EVALUATION_HEADER = """<h3 align="center">Shows the latest internal evaluation statistics as calculated by the Opentensor validator</h3>"""
|
23 |
|
24 |
+
HF_REPO_ID = "macrocosm-os/finetuning-leaderboard"
|
|
|
25 |
SECONDS_PER_BLOCK = 12
|
26 |
|
27 |
load_dotenv()
|
|
|
69 |
# TODO: Re-enable once ""SubtensorModule.BlocksSinceEpoch" not found" issue is resolved.
|
70 |
# gr.HTML(value=get_next_update_div(current_block, next_epoch_block))
|
71 |
|
72 |
+
# TODO: Figure out the best approach to showing the per competition rewards.
|
73 |
gr.Label(
|
74 |
value={
|
75 |
f"{c.namespace}/{c.name} ({c.commit[0:8]}) 路 (蟿{round(c.emission, 2):,})": c.incentive
|
|
|
87 |
with gr.Accordion("Evaluation Stats"):
|
88 |
gr.HTML(EVALUATION_HEADER)
|
89 |
show_stale = gr.Checkbox(label="Show Stale", interactive=True)
|
90 |
+
competition_leaderboards = []
|
91 |
+
# TODO: Dynamically generate per-competition leaderboards based on model_data.
|
92 |
+
competition_details = competitions.COMPETITION_DETAILS[1]
|
93 |
+
with gr.Accordion(f"{competition_details.name} competition"):
|
94 |
+
gr.HTML(competition_details.html_description)
|
95 |
+
competition_leaderboards.append(gr.components.Dataframe(
|
96 |
+
value=utils.leaderboard_data(model_data, scores, show_stale.value),
|
97 |
+
headers=["Name", "Win Rate", "Average Loss", "Weight", "UID", "Block"],
|
98 |
+
datatype=["markdown", "number", "number", "number", "number", "number"],
|
99 |
+
elem_id="leaderboard-table",
|
100 |
+
interactive=False,
|
101 |
+
visible=True,
|
102 |
+
))
|
103 |
gr.HTML(EVALUATION_DETAILS)
|
104 |
show_stale.change(
|
105 |
lambda stale: utils.leaderboard_data(model_data, scores, stale),
|
106 |
inputs=[show_stale],
|
107 |
+
outputs=competition_leaderboards,
|
108 |
)
|
109 |
|
110 |
+
# TODO: Make this a multi-competition line plot
|
111 |
gr.LinePlot(
|
112 |
utils.get_losses_over_time(vali_runs),
|
113 |
x="timestamp",
|
114 |
x_title="Date",
|
115 |
+
y="SN9_MODEL",
|
116 |
y_title="Average Loss",
|
117 |
+
tooltip="SN9_MODEL",
|
118 |
interactive=True,
|
119 |
visible=True,
|
120 |
width=1024,
|
competitions.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import Dict
|
3 |
+
|
4 |
+
|
5 |
+
@dataclass(frozen=True)
|
6 |
+
class CompetitionDetails:
|
7 |
+
# The display name of the competition.
|
8 |
+
name: str
|
9 |
+
|
10 |
+
# The HTML description of the competition.
|
11 |
+
html_description: str
|
12 |
+
|
13 |
+
|
14 |
+
# A map of competition IDs to HTML descriptions.
|
15 |
+
COMPETITION_DETAILS: Dict[int, CompetitionDetails] = {
|
16 |
+
1: CompetitionDetails(
|
17 |
+
name="SN9_MODEL",
|
18 |
+
html_description="""<b>Competition ID 1</b><br/>Produce the best fine-tuned model from a Subnet 9 pretrained model. Models are evaluated using synthetic prompt/response data from Subnet 18.""",
|
19 |
+
)
|
20 |
+
}
|
requirements.txt
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
bittensor
|
2 |
requests
|
3 |
-
wandb
|
|
|
4 |
python-dotenv
|
5 |
APScheduler
|
6 |
huggingface-hub
|
|
|
1 |
bittensor
|
2 |
requests
|
3 |
+
wandb==0.17.1
|
4 |
+
numpy==1.26.4
|
5 |
python-dotenv
|
6 |
APScheduler
|
7 |
huggingface-hub
|
utils.py
CHANGED
@@ -1,25 +1,28 @@
|
|
1 |
-
import os
|
2 |
-
import math
|
3 |
-
import time
|
4 |
-
import json
|
5 |
-
import wandb
|
6 |
-
import pickle
|
7 |
-
import datetime
|
8 |
import argparse
|
|
|
9 |
import functools
|
|
|
|
|
|
|
|
|
10 |
import traceback
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
import pandas as pd
|
13 |
-
import numpy as np
|
14 |
import bittensor as bt
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
from typing import Dict, List, Any, Optional, Tuple
|
19 |
from bittensor.extrinsics.serving import get_metadata
|
|
|
|
|
20 |
|
|
|
21 |
|
22 |
-
|
|
|
23 |
DELAY_SECS = 3
|
24 |
RETRIES = 3
|
25 |
|
@@ -27,18 +30,22 @@ load_dotenv()
|
|
27 |
|
28 |
WANDB_TOKEN = os.environ.get("WANDB_API_KEY", None)
|
29 |
SUBTENSOR_ENDPOINT = os.environ.get("SUBTENSOR_ENDPOINT", None)
|
30 |
-
VALIDATOR_WANDB_PROJECT = "
|
31 |
-
BENCHMARK_WANDB_PROJECT = "
|
32 |
BENCHMARK_FLAG = os.environ.get("BENCHMARK_FLAG", None)
|
33 |
|
34 |
-
|
|
|
35 |
class ModelData:
|
36 |
uid: int
|
37 |
hotkey: str
|
|
|
38 |
namespace: str
|
39 |
name: str
|
40 |
commit: str
|
41 |
-
|
|
|
|
|
42 |
block: int
|
43 |
incentive: float
|
44 |
emission: float
|
@@ -60,8 +67,9 @@ class ModelData:
|
|
60 |
hotkey=hotkey,
|
61 |
namespace=tokens[0],
|
62 |
name=tokens[1],
|
63 |
-
commit=tokens[2]
|
64 |
-
|
|
|
65 |
block=block,
|
66 |
incentive=incentive,
|
67 |
emission=emission,
|
@@ -69,6 +77,7 @@ class ModelData:
|
|
69 |
|
70 |
|
71 |
def run_with_retries(func, *args, **kwargs):
|
|
|
72 |
for i in range(0, RETRIES):
|
73 |
try:
|
74 |
return func(*args, **kwargs)
|
@@ -81,12 +90,18 @@ def run_with_retries(func, *args, **kwargs):
|
|
81 |
|
82 |
|
83 |
def get_subtensor_and_metagraph() -> Tuple[bt.subtensor, bt.metagraph]:
|
|
|
84 |
|
85 |
def _internal() -> Tuple[bt.subtensor, bt.metagraph]:
|
86 |
if SUBTENSOR_ENDPOINT:
|
87 |
parser = argparse.ArgumentParser()
|
88 |
bt.subtensor.add_args(parser)
|
89 |
-
subtensor = bt.subtensor(
|
|
|
|
|
|
|
|
|
|
|
90 |
else:
|
91 |
subtensor = bt.subtensor("finney")
|
92 |
|
@@ -138,9 +153,10 @@ def get_subnet_data(
|
|
138 |
|
139 |
def get_wandb_runs(project: str, filters: Dict[str, Any]) -> List:
|
140 |
"""Get the latest runs from Wandb, retrying infinitely until we get them.
|
141 |
-
|
142 |
Returns:
|
143 |
-
List: List of runs matching the provided filters, newest run (by creation time) first.
|
|
|
144 |
while True:
|
145 |
api = wandb.Api(api_key=WANDB_TOKEN)
|
146 |
runs = list(
|
@@ -162,7 +178,7 @@ def get_scores(
|
|
162 |
wandb_runs: List,
|
163 |
) -> Dict[int, Dict[str, Optional[float]]]:
|
164 |
"""Returns the most recent scores for the provided UIDs.
|
165 |
-
|
166 |
Args:
|
167 |
uids (List[int]): List of UIDs to get scores for.
|
168 |
wandb_runs (List): List of validator runs from Wandb. Requires the runs are provided in descending order.
|
@@ -194,6 +210,7 @@ def get_scores(
|
|
194 |
"win_rate": uid_data.get("win_rate", None),
|
195 |
"win_total": uid_data.get("win_total", None),
|
196 |
"weight": uid_data.get("weight", None),
|
|
|
197 |
"fresh": is_fresh,
|
198 |
}
|
199 |
if len(result) == len(uids):
|
@@ -223,25 +240,48 @@ def get_validator_weights(
|
|
223 |
def get_losses_over_time(wandb_runs: List) -> pd.DataFrame:
|
224 |
"""Returns a dataframe of the best average model loss over time."""
|
225 |
timestamps = []
|
226 |
-
|
227 |
-
|
228 |
for run in wandb_runs:
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
if
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
|
246 |
|
247 |
def next_epoch(subtensor: bt.subtensor, block: int) -> int:
|
@@ -298,53 +338,67 @@ def get_benchmarks() -> Tuple[pd.DataFrame, datetime.datetime]:
|
|
298 |
if artifacts:
|
299 |
table = artifacts[-1].get("benchmarks")
|
300 |
if table:
|
301 |
-
return table.get_dataframe(), datetime.datetime.strptime(
|
|
|
|
|
302 |
bt.logging.error("Failed to get benchmarks from Wandb.")
|
303 |
return None, None
|
304 |
|
305 |
|
306 |
-
def make_validator_dataframe(
|
|
|
|
|
307 |
|
308 |
values = [
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
]
|
324 |
-
dtypes = {"UID":int, "Stake (蟿)":float, "V-Trust":float}
|
325 |
-
dtypes.update({
|
326 |
f"{c.namespace}/{c.name} ({c.commit[0:8]})": float
|
327 |
for c in model_data
|
328 |
if c.incentive
|
329 |
-
}
|
|
|
330 |
return pd.DataFrame(values, columns=dtypes.keys()).astype(dtypes)
|
331 |
|
|
|
332 |
def make_metagraph_dataframe(metagraph: bt.metagraph, weights=False) -> pd.DataFrame:
|
333 |
|
334 |
-
cols = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
335 |
|
336 |
frame = pd.DataFrame({k: getattr(metagraph, k) for k in cols})
|
337 |
-
frame[
|
338 |
-
frame[
|
339 |
-
frame[
|
340 |
-
frame[
|
341 |
-
frame[
|
342 |
if weights and metagraph.W is not None:
|
343 |
# convert NxN tensor to a list of lists so it fits into the dataframe
|
344 |
-
frame[
|
345 |
|
346 |
return frame
|
347 |
|
|
|
348 |
def load_state_vars() -> dict[Any]:
|
349 |
while True:
|
350 |
try:
|
@@ -355,8 +409,12 @@ def load_state_vars() -> dict[Any]:
|
|
355 |
model_data: List[ModelData] = get_subnet_data(subtensor, metagraph)
|
356 |
model_data.sort(key=lambda x: x.incentive, reverse=True)
|
357 |
|
358 |
-
bt.logging.success(f
|
359 |
-
vali_runs = get_wandb_runs(
|
|
|
|
|
|
|
|
|
360 |
|
361 |
scores = get_scores([x.uid for x in model_data], vali_runs)
|
362 |
|
@@ -385,40 +443,98 @@ def load_state_vars() -> dict[Any]:
|
|
385 |
time.sleep(30)
|
386 |
|
387 |
return {
|
388 |
-
|
389 |
"model_data": model_data,
|
390 |
"vali_runs": vali_runs,
|
391 |
"scores": scores,
|
392 |
"validator_df": validator_df,
|
393 |
"benchmarks": benchmarks,
|
394 |
-
"benchmark_timestamp": benchmark_timestamp
|
395 |
}
|
396 |
|
397 |
-
def test_load_state_vars():
|
398 |
|
|
|
|
|
399 |
subtensor = bt.subtensor("finney")
|
400 |
metagraph = subtensor.metagraph(NETUID, lite=True)
|
401 |
model_data = [
|
402 |
-
ModelData(
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
407 |
]
|
408 |
-
vali_runs = get_wandb_runs(
|
|
|
|
|
|
|
409 |
|
410 |
scores = get_scores([x.uid for x in model_data], vali_runs)
|
411 |
|
412 |
validator_df = {
|
413 |
28: (1.0, 33273.4453125, {253: 1.0}),
|
414 |
-
49: (
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
|
|
|
|
|
|
|
|
422 |
78: (1.0, 26730.37109375, {253: 1.0}),
|
423 |
116: (1.0, 629248.4375, {253: 1.0}),
|
424 |
150: (1.0, 272634.53125, {253: 1.0}),
|
@@ -438,11 +554,11 @@ def test_load_state_vars():
|
|
438 |
249: (1.0, 478127.3125, {253: 1.0}),
|
439 |
252: (1.0, 442395.03125, {253: 1.0}),
|
440 |
254: (1.0, 46845.2109375, {253: 1.0}),
|
441 |
-
255: (1.0, 28977.56640625, {253: 1.0})
|
442 |
}
|
443 |
|
444 |
return {
|
445 |
-
|
446 |
"model_data": model_data,
|
447 |
"vali_runs": vali_runs,
|
448 |
"scores": scores,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import argparse
|
2 |
+
import datetime
|
3 |
import functools
|
4 |
+
import json
|
5 |
+
import math
|
6 |
+
import os
|
7 |
+
import time
|
8 |
import traceback
|
9 |
+
from collections import defaultdict
|
10 |
+
from dataclasses import dataclass
|
11 |
+
from email.policy import default
|
12 |
+
from typing import Any, Dict, List, Optional, Tuple
|
13 |
|
|
|
|
|
14 |
import bittensor as bt
|
15 |
+
import numpy as np
|
16 |
+
import pandas as pd
|
17 |
+
import wandb
|
|
|
18 |
from bittensor.extrinsics.serving import get_metadata
|
19 |
+
from dotenv import load_dotenv
|
20 |
+
from wandb.apis.public.history import HistoryScan
|
21 |
|
22 |
+
import competitions
|
23 |
|
24 |
+
# TODO: Update once registered
|
25 |
+
NETUID = 179
|
26 |
DELAY_SECS = 3
|
27 |
RETRIES = 3
|
28 |
|
|
|
30 |
|
31 |
WANDB_TOKEN = os.environ.get("WANDB_API_KEY", None)
|
32 |
SUBTENSOR_ENDPOINT = os.environ.get("SUBTENSOR_ENDPOINT", None)
|
33 |
+
VALIDATOR_WANDB_PROJECT = "rusticluftig/finetuning"
|
34 |
+
BENCHMARK_WANDB_PROJECT = ""
|
35 |
BENCHMARK_FLAG = os.environ.get("BENCHMARK_FLAG", None)
|
36 |
|
37 |
+
|
38 |
+
@dataclass(frozen=True)
|
39 |
class ModelData:
|
40 |
uid: int
|
41 |
hotkey: str
|
42 |
+
competition_id: int
|
43 |
namespace: str
|
44 |
name: str
|
45 |
commit: str
|
46 |
+
|
47 |
+
# Hash of (hash(model) + hotkey)
|
48 |
+
secure_hash: str
|
49 |
block: int
|
50 |
incentive: float
|
51 |
emission: float
|
|
|
67 |
hotkey=hotkey,
|
68 |
namespace=tokens[0],
|
69 |
name=tokens[1],
|
70 |
+
commit=tokens[2],
|
71 |
+
secure_hash=tokens[3],
|
72 |
+
competition_id=int(tokens[4]),
|
73 |
block=block,
|
74 |
incentive=incentive,
|
75 |
emission=emission,
|
|
|
77 |
|
78 |
|
79 |
def run_with_retries(func, *args, **kwargs):
|
80 |
+
"""Runs a provided function with retries in the event of a failure."""
|
81 |
for i in range(0, RETRIES):
|
82 |
try:
|
83 |
return func(*args, **kwargs)
|
|
|
90 |
|
91 |
|
92 |
def get_subtensor_and_metagraph() -> Tuple[bt.subtensor, bt.metagraph]:
|
93 |
+
"""Returns a subtensor and metagraph for the finetuning subnet."""
|
94 |
|
95 |
def _internal() -> Tuple[bt.subtensor, bt.metagraph]:
|
96 |
if SUBTENSOR_ENDPOINT:
|
97 |
parser = argparse.ArgumentParser()
|
98 |
bt.subtensor.add_args(parser)
|
99 |
+
subtensor = bt.subtensor(
|
100 |
+
config=bt.config(
|
101 |
+
parser=parser,
|
102 |
+
args=["--subtensor.chain_endpoint", SUBTENSOR_ENDPOINT],
|
103 |
+
)
|
104 |
+
)
|
105 |
else:
|
106 |
subtensor = bt.subtensor("finney")
|
107 |
|
|
|
153 |
|
154 |
def get_wandb_runs(project: str, filters: Dict[str, Any]) -> List:
|
155 |
"""Get the latest runs from Wandb, retrying infinitely until we get them.
|
156 |
+
|
157 |
Returns:
|
158 |
+
List: List of runs matching the provided filters, newest run (by creation time) first.
|
159 |
+
"""
|
160 |
while True:
|
161 |
api = wandb.Api(api_key=WANDB_TOKEN)
|
162 |
runs = list(
|
|
|
178 |
wandb_runs: List,
|
179 |
) -> Dict[int, Dict[str, Optional[float]]]:
|
180 |
"""Returns the most recent scores for the provided UIDs.
|
181 |
+
|
182 |
Args:
|
183 |
uids (List[int]): List of UIDs to get scores for.
|
184 |
wandb_runs (List): List of validator runs from Wandb. Requires the runs are provided in descending order.
|
|
|
210 |
"win_rate": uid_data.get("win_rate", None),
|
211 |
"win_total": uid_data.get("win_total", None),
|
212 |
"weight": uid_data.get("weight", None),
|
213 |
+
"competition_id": uid_data.get("competition_id", None),
|
214 |
"fresh": is_fresh,
|
215 |
}
|
216 |
if len(result) == len(uids):
|
|
|
240 |
def get_losses_over_time(wandb_runs: List) -> pd.DataFrame:
|
241 |
"""Returns a dataframe of the best average model loss over time."""
|
242 |
timestamps = []
|
243 |
+
datapoints_per_comp_id = {id: [] for id in competitions.COMPETITION_DETAILS}
|
244 |
+
|
245 |
for run in wandb_runs:
|
246 |
+
# For each run, check the 10 most recent steps.
|
247 |
+
best_loss_per_competition_id = defaultdict(lambda: math.inf)
|
248 |
+
should_add_datapoint = False
|
249 |
+
min_step = max(0, run.lastHistoryStep - 10)
|
250 |
+
history_scan = HistoryScan(
|
251 |
+
run.client, run, min_step, run.lastHistoryStep, page_size=10
|
252 |
+
)
|
253 |
+
max_timestamp = None
|
254 |
+
for step in history_scan:
|
255 |
+
if "original_format_json" not in step:
|
256 |
+
continue
|
257 |
+
data = json.loads(step["original_format_json"])
|
258 |
+
all_uid_data = data["uid_data"]
|
259 |
+
timestamp = datetime.datetime.fromtimestamp(data["timestamp"])
|
260 |
+
if max_timestamp is None:
|
261 |
+
max_timestamp = timestamp
|
262 |
+
max_timestamp = max(max_timestamp, timestamp)
|
263 |
+
|
264 |
+
for _, uid_data in all_uid_data.items():
|
265 |
+
loss = uid_data.get("average_loss", math.inf)
|
266 |
+
competition_id = uid_data.get("competition_id", None)
|
267 |
+
if not competition_id:
|
268 |
+
continue
|
269 |
+
|
270 |
+
if loss < best_loss_per_competition_id[competition_id]:
|
271 |
+
best_loss_per_competition_id[competition_id] = uid_data["average_loss"]
|
272 |
+
should_add_datapoint = True
|
273 |
+
# Now that we've processed the run's most recent steps, check if we should add a datapoint.
|
274 |
+
if should_add_datapoint:
|
275 |
+
timestamps.append(max_timestamp)
|
276 |
+
# Iterate through all possible competitions and add the best loss for each.
|
277 |
+
# Set None for any that aren't active during this run.
|
278 |
+
for id, losses in datapoints_per_comp_id.items():
|
279 |
+
losses.append(best_loss_per_competition_id.get(id, None))
|
280 |
+
|
281 |
+
# Create a dictionary of competitions to lists of losses.
|
282 |
+
output_columns = {competitions.COMPETITION_DETAILS[id].name: losses for id, losses in datapoints_per_comp_id.items()}
|
283 |
+
|
284 |
+
return pd.DataFrame({"timestamp": timestamps, **output_columns})
|
285 |
|
286 |
|
287 |
def next_epoch(subtensor: bt.subtensor, block: int) -> int:
|
|
|
338 |
if artifacts:
|
339 |
table = artifacts[-1].get("benchmarks")
|
340 |
if table:
|
341 |
+
return table.get_dataframe(), datetime.datetime.strptime(
|
342 |
+
run.metadata["startedAt"], "%Y-%m-%dT%H:%M:%S.%f"
|
343 |
+
)
|
344 |
bt.logging.error("Failed to get benchmarks from Wandb.")
|
345 |
return None, None
|
346 |
|
347 |
|
348 |
+
def make_validator_dataframe(
|
349 |
+
validator_df: pd.DataFrame, model_data: ModelData
|
350 |
+
) -> pd.DataFrame:
|
351 |
|
352 |
values = [
|
353 |
+
[uid, int(validator_df[uid][1]), round(validator_df[uid][0], 4)]
|
354 |
+
+ [validator_df[uid][-1].get(c.uid) for c in model_data if c.incentive]
|
355 |
+
for uid, _ in sorted(
|
356 |
+
zip(
|
357 |
+
validator_df.keys(),
|
358 |
+
[validator_df[x][1] for x in validator_df.keys()],
|
359 |
+
),
|
360 |
+
key=lambda x: x[1],
|
361 |
+
reverse=True,
|
362 |
+
)
|
363 |
+
]
|
364 |
+
dtypes = {"UID": int, "Stake (蟿)": float, "V-Trust": float}
|
365 |
+
dtypes.update(
|
366 |
+
{
|
|
|
|
|
|
|
367 |
f"{c.namespace}/{c.name} ({c.commit[0:8]})": float
|
368 |
for c in model_data
|
369 |
if c.incentive
|
370 |
+
}
|
371 |
+
)
|
372 |
return pd.DataFrame(values, columns=dtypes.keys()).astype(dtypes)
|
373 |
|
374 |
+
|
375 |
def make_metagraph_dataframe(metagraph: bt.metagraph, weights=False) -> pd.DataFrame:
|
376 |
|
377 |
+
cols = [
|
378 |
+
"stake",
|
379 |
+
"emission",
|
380 |
+
"trust",
|
381 |
+
"validator_trust",
|
382 |
+
"dividends",
|
383 |
+
"incentive",
|
384 |
+
"R",
|
385 |
+
"consensus",
|
386 |
+
"validator_permit",
|
387 |
+
]
|
388 |
|
389 |
frame = pd.DataFrame({k: getattr(metagraph, k) for k in cols})
|
390 |
+
frame["block"] = metagraph.block.item()
|
391 |
+
frame["netuid"] = NETUID
|
392 |
+
frame["uid"] = range(len(frame))
|
393 |
+
frame["hotkey"] = [axon.hotkey for axon in metagraph.axons]
|
394 |
+
frame["coldkey"] = [axon.coldkey for axon in metagraph.axons]
|
395 |
if weights and metagraph.W is not None:
|
396 |
# convert NxN tensor to a list of lists so it fits into the dataframe
|
397 |
+
frame["weights"] = [w.tolist() for w in metagraph.W]
|
398 |
|
399 |
return frame
|
400 |
|
401 |
+
|
402 |
def load_state_vars() -> dict[Any]:
|
403 |
while True:
|
404 |
try:
|
|
|
409 |
model_data: List[ModelData] = get_subnet_data(subtensor, metagraph)
|
410 |
model_data.sort(key=lambda x: x.incentive, reverse=True)
|
411 |
|
412 |
+
bt.logging.success(f"Loaded {len(model_data)} models")
|
413 |
+
vali_runs = get_wandb_runs(
|
414 |
+
project=VALIDATOR_WANDB_PROJECT,
|
415 |
+
# TODO: Update to point to the OTF vali on finetuning
|
416 |
+
filters={"config.type": "validator", "config.uid": 0},
|
417 |
+
)
|
418 |
|
419 |
scores = get_scores([x.uid for x in model_data], vali_runs)
|
420 |
|
|
|
443 |
time.sleep(30)
|
444 |
|
445 |
return {
|
446 |
+
"metagraph": metagraph,
|
447 |
"model_data": model_data,
|
448 |
"vali_runs": vali_runs,
|
449 |
"scores": scores,
|
450 |
"validator_df": validator_df,
|
451 |
"benchmarks": benchmarks,
|
452 |
+
"benchmark_timestamp": benchmark_timestamp,
|
453 |
}
|
454 |
|
|
|
455 |
|
456 |
+
def test_load_state_vars():
|
457 |
+
# TODO: Change to finetuning data.
|
458 |
subtensor = bt.subtensor("finney")
|
459 |
metagraph = subtensor.metagraph(NETUID, lite=True)
|
460 |
model_data = [
|
461 |
+
ModelData(
|
462 |
+
uid=253,
|
463 |
+
hotkey="5DjoPAgZ54Zf6NsuiVYh8RjonnWWWREE2iXBNzM2VDBMQDPm",
|
464 |
+
namespace="jw-hf-test",
|
465 |
+
name="jw2",
|
466 |
+
commit="aad131f6b02219964e6dcf749c2a23e75a7ceca8",
|
467 |
+
secure_hash="L1ImYzWJwV+9KSnZ2TYW0Iy2KMcVjJVTd30YJoRkpbw=",
|
468 |
+
block=3131103,
|
469 |
+
incentive=1.0,
|
470 |
+
emission=209.06051635742188,
|
471 |
+
),
|
472 |
+
ModelData(
|
473 |
+
uid=1,
|
474 |
+
hotkey="5CccVtjk4yamCao6QYgEg7jc8vktdj16RbLKNUftHfEsjuJS",
|
475 |
+
namespace="borggAI",
|
476 |
+
name="bittensor-subnet9-models",
|
477 |
+
commit="d373864bc6c972872edb8db95eed570958054bac",
|
478 |
+
secure_hash="+drdTIKYEGYClW2FFVVID6A2Dh//4rLmExRFCJsH6Y4=",
|
479 |
+
block=2081837,
|
480 |
+
incentive=0.0,
|
481 |
+
emission=0.0,
|
482 |
+
),
|
483 |
+
ModelData(
|
484 |
+
uid=2,
|
485 |
+
hotkey="5HYwoXaczs3jAptbb5mk4aUCkgZqeNcNzJKxSec97GwasfLy",
|
486 |
+
namespace="jungiebeen",
|
487 |
+
name="pretrain1",
|
488 |
+
commit="4c0c6bfd0f92e243d6c8a82209142e7204c852c3",
|
489 |
+
secure_hash="ld/agc0XIWICom/Cpj0fkQLcMogMNj/F65MJogK5RLY=",
|
490 |
+
block=2467482,
|
491 |
+
incentive=0.0,
|
492 |
+
emission=0.0,
|
493 |
+
),
|
494 |
+
ModelData(
|
495 |
+
uid=3,
|
496 |
+
hotkey="5Dnb6edh9yTeEp5aasRPZVPRAkxvQ6qnERVcXw22awMZ5rxm",
|
497 |
+
namespace="jungiebeen",
|
498 |
+
name="pretrain2",
|
499 |
+
commit="e827b7281c92224adb11124489cc45356553a87a",
|
500 |
+
secure_hash="ld/agc0XIWICom/Cpj0fkQLcMogMNj/F65MJogK5RLY=",
|
501 |
+
block=2467497,
|
502 |
+
incentive=0.0,
|
503 |
+
emission=0.0,
|
504 |
+
),
|
505 |
+
ModelData(
|
506 |
+
uid=4,
|
507 |
+
hotkey="5FRfca8NbnH424WaX43PMhKBnbLA1bZpRRoXXiVs6HgsxN4K",
|
508 |
+
namespace="ZainAli60",
|
509 |
+
name="mine_modeles",
|
510 |
+
commit="8a4ed4ad1f1fb58d424fd22e8e9874b87d32917c",
|
511 |
+
secure_hash="tVcbZAFoNIOF+Ntxq31OQ2NrLXf5iFCmmPUJlpkMYYo=",
|
512 |
+
block=2508509,
|
513 |
+
incentive=0.0,
|
514 |
+
emission=0.0,
|
515 |
+
),
|
516 |
]
|
517 |
+
vali_runs = get_wandb_runs(
|
518 |
+
project=VALIDATOR_WANDB_PROJECT,
|
519 |
+
filters={"config.type": "validator", "config.uid": 238},
|
520 |
+
)
|
521 |
|
522 |
scores = get_scores([x.uid for x in model_data], vali_runs)
|
523 |
|
524 |
validator_df = {
|
525 |
28: (1.0, 33273.4453125, {253: 1.0}),
|
526 |
+
49: (
|
527 |
+
0.9127794504165649,
|
528 |
+
10401.677734375,
|
529 |
+
{
|
530 |
+
7: 0.0867,
|
531 |
+
217: 0.0001,
|
532 |
+
219: 0.0001,
|
533 |
+
241: 0.0001,
|
534 |
+
248: 0.0001,
|
535 |
+
253: 0.9128,
|
536 |
+
},
|
537 |
+
),
|
538 |
78: (1.0, 26730.37109375, {253: 1.0}),
|
539 |
116: (1.0, 629248.4375, {253: 1.0}),
|
540 |
150: (1.0, 272634.53125, {253: 1.0}),
|
|
|
554 |
249: (1.0, 478127.3125, {253: 1.0}),
|
555 |
252: (1.0, 442395.03125, {253: 1.0}),
|
556 |
254: (1.0, 46845.2109375, {253: 1.0}),
|
557 |
+
255: (1.0, 28977.56640625, {253: 1.0}),
|
558 |
}
|
559 |
|
560 |
return {
|
561 |
+
"metagraph": metagraph,
|
562 |
"model_data": model_data,
|
563 |
"vali_runs": vali_runs,
|
564 |
"scores": scores,
|