metadata
dataset_info:
- config_name: corpus
features:
- name: _id
dtype: string
- name: partition
dtype: string
- name: text
dtype: string
- name: language
dtype: string
- name: meta_information
struct:
- name: starter_code
dtype: string
- name: url
dtype: string
- name: title
dtype: string
splits:
- name: corpus
num_bytes: 6044437
num_examples: 8765
download_size: 2699470
dataset_size: 6044437
- config_name: default
features:
- name: query-id
dtype: string
- name: corpus-id
dtype: string
- name: score
dtype: int64
splits:
- name: train
num_bytes: 127786
num_examples: 5000
- name: test
num_bytes: 97890
num_examples: 3765
download_size: 101288
dataset_size: 225676
- config_name: queries
features:
- name: _id
dtype: string
- name: partition
dtype: string
- name: text
dtype: string
- name: language
dtype: string
- name: meta_information
struct:
- name: starter_code
dtype: string
- name: url
dtype: string
- name: title
dtype: string
splits:
- name: queries
num_bytes: 13633677
num_examples: 8765
download_size: 6605803
dataset_size: 13633677
configs:
- config_name: corpus
data_files:
- split: corpus
path: corpus/corpus-*
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- config_name: queries
data_files:
- split: queries
path: queries/queries-*
Employing the MTEB evaluation framework's dataset version, utilize the code below for assessment:
import mteb
import logging
from sentence_transformers import SentenceTransformer
from mteb import MTEB
logger = logging.getLogger(__name__)
model_name = 'intfloat/e5-base-v2'
model = SentenceTransformer(model_name)
tasks = mteb.get_tasks(
tasks=[
"AppsRetrieval",
"CodeFeedbackMT",
"CodeFeedbackST",
"CodeTransOceanContest",
"CodeTransOceanDL",
"CosQA",
"SyntheticText2SQL",
"StackOverflowQA",
"COIRCodeSearchNetRetrieval",
"CodeSearchNetCCRetrieval",
]
)
evaluation = MTEB(tasks=tasks)
results = evaluation.run(
model=model,
overwrite_results=True
)
print(result)