Update README.md
Browse files
README.md
CHANGED
@@ -21,6 +21,27 @@ dataset_info:
|
|
21 |
download_size: 27660237
|
22 |
dataset_size: 52962756
|
23 |
---
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
download_size: 27660237
|
22 |
dataset_size: 52962756
|
23 |
---
|
24 |
+
Employing the CoIR evaluation framework's dataset version, utilize the code below for assessment:
|
25 |
+
```python
|
26 |
+
import coir
|
27 |
+
from coir.data_loader import get_tasks
|
28 |
+
from coir.evaluation import COIR
|
29 |
+
from coir.models import YourCustomDEModel
|
30 |
|
31 |
+
model_name = "intfloat/e5-base-v2"
|
32 |
+
|
33 |
+
# Load the model
|
34 |
+
model = YourCustomDEModel(model_name=model_name)
|
35 |
+
|
36 |
+
# Get tasks
|
37 |
+
#all task ["codetrans-dl","stackoverflow-qa","apps","codefeedback-mt","codefeedback-st","codetrans-contest","synthetic-
|
38 |
+
# text2sql","cosqa","codesearchnet","codesearchnet-ccr"]
|
39 |
+
tasks = get_tasks(tasks=["codetrans-dl"])
|
40 |
+
|
41 |
+
# Initialize evaluation
|
42 |
+
evaluation = COIR(tasks=tasks,batch_size=128)
|
43 |
+
|
44 |
+
# Run evaluation
|
45 |
+
results = evaluation.run(model, output_folder=f"results/{model_name}")
|
46 |
+
print(results)
|
47 |
+
```
|