# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TODO: Add a description here.""" import evaluate import datasets # TODO: Add BibTeX citation _CITATION = """\ @InProceedings{posicube:module, title = {Mean reciprocal mean}, authors={Pocicube, Inc.}, year={2022} } """ # TODO: Add description of the module here _DESCRIPTION = """\ This module is designed to evaluate a system ranks the list of item. mean reciprocal rank is a statistic measure for evaluating any process that produces a list of possible responses to a sample of queries, ordered by probability of correctness """ # TODO: Add description of the arguments of the module here _KWARGS_DESCRIPTION = """ Calculates how good are ranks, using certain scores Args: predictions: list of predicted ranks of gold item, the first rank starts with 0 Returns: mean reciprocal rank: mean of inverse of rank of gold item Examples: >>> mrr = evaluate.load("poscicube/mean_reciprocal_rank") >>> results = mrr.compute(predictions=[0, 4]) >>> print(results) {'mrr': 0.6} """ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class MeanReciprocalRank(evaluate.Metric): """a statistic measure for evaluating any process that produces a list of possible responses to a sample of queries, ordered by probability of correctness.""" def _info(self): # TODO: Specifies the evaluate.EvaluationModuleInfo object return evaluate.MetricInfo( # This is the description that will appear on the modules page. module_type="metric", description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, # This defines the format of each prediction and reference features=datasets.Features({ 'predictions': datasets.Value('int64'), }), # Homepage of the module for documentation homepage="https://huggingface.co./spaces/posicube/mean_reciprocal_rank", # Additional links to the codebase or references codebase_urls=["https://huggingface.co./spaces/posicube/mean_reciprocal_rank"], reference_urls=["https://en.wikipedia.org/wiki/Mean_reciprocal_rank"] ) def _download_and_prepare(self, dl_manager): """Optional: download external resources useful to compute the scores""" pass def _compute(self, predictions): """Returns the scores""" # TODO: Compute the different scores of the module q = len(predictions) sum_rr = 0.0 for p in predictions: sum_rr += 1/(p+1) mrr = sum_rr / q return { "mrr": mrr }