helloai0 commited on
Commit
1b391ad
1 Parent(s): c7246c1

Update metric.py

Browse files
Files changed (1) hide show
  1. metric.py +5 -4
metric.py CHANGED
@@ -47,9 +47,7 @@ def compute(params):
47
  private_submission_df = private_submission_df.sort_values(params.submission_id_col).reset_index(drop=True)
48
 
49
 
50
- print('public_solution_df', public_solution_df)
51
- print('private_solution_df', private_solution_df)
52
-
53
  # # METRICS Calculation Evaluation
54
  # # _metric = SOME METRIC FUNCTION
55
  # def _metric(outputs, targets):
@@ -59,12 +57,15 @@ def compute(params):
59
  # return score
60
 
61
 
62
-
 
 
63
  ## LLM Scoring Evaluation
64
  def _metric(outputs, targets):
65
  # inputs: public_solution_df[target_cols], public_submission_df[target_cols]
66
  # output: score
67
  for row, output in enumerate(outputs):
 
68
  answer = output['pred']
69
  label = str(targets.iloc[row]['pred'])
70
 
 
47
  private_submission_df = private_submission_df.sort_values(params.submission_id_col).reset_index(drop=True)
48
 
49
 
50
+
 
 
51
  # # METRICS Calculation Evaluation
52
  # # _metric = SOME METRIC FUNCTION
53
  # def _metric(outputs, targets):
 
57
  # return score
58
 
59
 
60
+ print('public_solution_df', public_solution_df)
61
+ print('private_solution_df', private_solution_df)
62
+
63
  ## LLM Scoring Evaluation
64
  def _metric(outputs, targets):
65
  # inputs: public_solution_df[target_cols], public_submission_df[target_cols]
66
  # output: score
67
  for row, output in enumerate(outputs):
68
+ print('output', output)
69
  answer = output['pred']
70
  label = str(targets.iloc[row]['pred'])
71