File size: 166,273 Bytes
a2b700e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 |
/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl_eval2/lib/python3.10/site-packages/bitsandbytes/cextension.py:34: UserWarning: The installed version of bitsandbytes was compiled without GPU support. 8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.
warn("The installed version of bitsandbytes was compiled without GPU support. "
/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl_eval2/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cpu.so: undefined symbol: cadam32bit_grad_fp32
model path is /mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B
12/05 02:39:19 - OpenCompass - WARNING - No previous results to reuse!
12/05 02:39:19 - OpenCompass - INFO - Reusing experiements from 20241205_023919
12/05 02:39:19 - OpenCompass - INFO - Current exp folder: /mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B/20241205_023919
12/05 02:39:22 - OpenCompass - INFO - Partitioned into 256 tasks.
[ ] 0/256, elapsed: 0s, ETA:use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13148 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12330_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=17574 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12058_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21266 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12078_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22208 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12329_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22566 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12320_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13603 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12071_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30163 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12151_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30232 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12074_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13541 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12079_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=17489 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12119_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=24004 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12317_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=24510 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12327_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26572 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12083_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22730 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12328_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30059 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12065_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19117 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12049_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12972 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12306_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=20732 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12051_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=20538 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12047_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=23937 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12321_params.py
command torchrun --master_port=20506 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12316_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=25456 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12318_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=18257 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12110_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=25627 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12326_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=23200 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12322_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=15630 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12331_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=28026 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12319_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=28509 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12034_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22333 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12088_params.py
command torchrun --master_port=22481 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12324_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30812 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12090_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29503 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12021_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=25809 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12048_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=14074 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12302_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=15079 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12117_params.py
command torchrun --master_port=16776 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12084_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12995 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12293_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21299 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12289_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19106 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12325_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=14915 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12068_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29321 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12256_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21342 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12112_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=17010 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12081_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=31800 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12012_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=27971 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12096_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29907 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12332_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26551 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12309_params.py
use_backenduse_backend FalseFalse {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21922 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12132_params.pycommand
torchrun --master_port=13545 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12315_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=20990 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12067_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30295 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12036_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=28031 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12019_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30394 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12312_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29479 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12222_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=24307 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12050_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19108 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12045_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12411 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12250_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13981 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12148_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21973 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12301_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13954 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12089_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=15535 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12023_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=28817 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12022_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=24116 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12230_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29516 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12020_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26160 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12228_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21039 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12307_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19263 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12002_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=27913 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12241_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21097 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12229_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=28207 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12279_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=28352 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12305_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13885 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12304_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=17961 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12092_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19258 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12011_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=23795 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12155_params.py
use_backend False use_backend False{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19546 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12308_params.py
command torchrun --master_port=30502 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12018_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=15486 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12296_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19335 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12091_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=18344 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12314_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29993 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12111_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=16172 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12221_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12443 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12248_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=20906 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12137_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13375 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12284_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=20163 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12252_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13488 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12249_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=16055 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12311_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21337 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12310_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26746 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12055_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=31622 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12243_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29091 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12246_params.py
command torchrun --master_port=29031 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12231_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=27028 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12251_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
commandcommand torchrun --master_port=24496 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12118_params.pytorchrun --master_port=20367 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12313_params.py
command
torchrun --master_port=20339 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12076_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22412 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12003_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=31246 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12238_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13737 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12105_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=23633 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12013_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=24815 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12123_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12748 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12292_params.py
command torchrun --master_port=24670 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12294_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12957 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12323_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=25639 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12133_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=14531 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12233_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22422 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12104_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=25892 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12212_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=14520 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12029_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22465 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12097_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12826 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12300_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=23077 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12213_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12911 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12239_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=17915 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12234_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29460 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12295_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=28978 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12299_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26901 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12247_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19831 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12219_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12973 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12190_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=23557 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12102_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=23438 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12146_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19582 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12172_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=16877 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12152_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=14360 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12113_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30230 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12261_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=27866 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12274_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=18463 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12290_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=31393 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12298_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=25650 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12237_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12918 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12156_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=27359 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12303_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=18374 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12115_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22286 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12177_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22758 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12235_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13660 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12178_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=16077 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12007_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12895 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12227_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=17481 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12106_params.py
use_backenduse_backend False False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=25791 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12215_params.py
command torchrun --master_port=18967 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12107_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26331 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12114_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30981 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12265_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=31501 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12216_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
use_backendcommand torchrun --master_port=29716 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12070_params.py
False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26138 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12082_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=16811 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12139_params.py
command torchrun --master_port=22927 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12149_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=31404 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12220_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=16834 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12203_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12677 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12225_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29781 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12236_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=20483 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12170_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13969 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12211_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=14650 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12242_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}use_backend
False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=17981 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12158_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=31260 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12273_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command commandtorchrun --master_port=18371 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12218_params.py
torchrun --master_port=19307 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12173_params.py
command torchrun --master_port=31299 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12153_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=23200 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12267_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=18321 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12223_params.py
use_backenduse_backend FalseFalse {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=14275 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12291_params.py
command torchrun --master_port=21624 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12175_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29688 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12214_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29764 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12199_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=18184 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12138_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=23847 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12217_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=15443 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12201_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=28120 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12245_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=18497 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12244_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=24462 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12094_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21984 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12121_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=16342 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12176_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26563 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12122_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=23752 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12224_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26357 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12145_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12598 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12095_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30036 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12085_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=28865 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12191_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=14621 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12150_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29947 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12204_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22225 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12171_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29071 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12080_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19055 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12010_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13786 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12031_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22104 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12169_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=16348 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12075_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=17941 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12004_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=23338 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12202_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30293 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12143_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21971 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12159_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=28658 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12005_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30185 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12161_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30396 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12166_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=17914 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12162_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=25838 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12154_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13131 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12168_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29602 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12157_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=15031 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12140_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}use_backend
False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=28243 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12144_params.py
command torchrun --master_port=16805 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12060_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22918 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12101_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13455 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12167_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=24651 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12054_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=24054 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12108_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22443 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12142_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=15816 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12134_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21536 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12232_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=31319 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12109_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=18193 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12116_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26098 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12160_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26695 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12163_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30586 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12087_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=25824 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12052_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=18053 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12043_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13577 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12192_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12760 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12128_params.py
use_backend False use_backend{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'} False
{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=18985 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12093_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=15502 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12069_params.py
command torchrun --master_port=24543 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12141_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26133 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12164_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12435 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12066_params.py
command torchrun --master_port=16792 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12027_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=20683 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12059_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=27808 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12100_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=12243 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12086_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19369 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12030_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=27709 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12035_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22897 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12056_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26542 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12044_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
use_backend False command torchrun --master_port=14655 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12147_params.py
{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21853 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12120_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=31054 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12037_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=13282 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12038_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=30934 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12046_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19177 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12041_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=25840 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12057_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=24436 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12008_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26284 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12001_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=31649 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12017_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=31091 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12025_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=31307 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12028_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=24846 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12042_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=28845 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12024_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=22021 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12014_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=25160 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12033_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=26373 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12015_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=29057 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12026_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=27787 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12009_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=24685 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12016_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=25725 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12039_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=19818 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12040_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=17931 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12053_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=21442 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12006_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=14996 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12032_params.py
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
command torchrun --master_port=15408 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/12038_params.py
[ ] 1/256, 0.0 task/s, elapsed: 424s, ETA: 108193s
[ ] 2/256, 0.0 task/s, elapsed: 429s, ETA: 54539s
[ ] 3/256, 0.0 task/s, elapsed: 444s, ETA: 37469s
[ ] 4/256, 0.0 task/s, elapsed: 444s, ETA: 27993s
[ ] 5/256, 0.0 task/s, elapsed: 449s, ETA: 22560s
[ ] 6/256, 0.0 task/s, elapsed: 451s, ETA: 18798s
[ ] 7/256, 0.0 task/s, elapsed: 451s, ETA: 16054s
[ ] 8/256, 0.0 task/s, elapsed: 463s, ETA: 14339s
[> ] 9/256, 0.0 task/s, elapsed: 464s, ETA: 12742s
[> ] 10/256, 0.0 task/s, elapsed: 476s, ETA: 11717s
[> ] 11/256, 0.0 task/s, elapsed: 476s, ETA: 10609s
[> ] 12/256, 0.0 task/s, elapsed: 484s, ETA: 9849s
[> ] 13/256, 0.0 task/s, elapsed: 486s, ETA: 9082s
[> ] 14/256, 0.0 task/s, elapsed: 486s, ETA: 8406s
[> ] 15/256, 0.0 task/s, elapsed: 494s, ETA: 7942s
[> ] 16/256, 0.0 task/s, elapsed: 496s, ETA: 7445s
[> ] 17/256, 0.0 task/s, elapsed: 507s, ETA: 7121s
[>> ] 18/256, 0.0 task/s, elapsed: 507s, ETA: 6701s
[>> ] 19/256, 0.0 task/s, elapsed: 513s, ETA: 6394s
[>> ] 20/256, 0.0 task/s, elapsed: 514s, ETA: 6069s
[>> ] 21/256, 0.0 task/s, elapsed: 519s, ETA: 5806s
[>> ] 22/256, 0.0 task/s, elapsed: 519s, ETA: 5519s
[>> ] 23/256, 0.0 task/s, elapsed: 519s, ETA: 5257s
[>> ] 24/256, 0.0 task/s, elapsed: 520s, ETA: 5026s
[>> ] 25/256, 0.0 task/s, elapsed: 521s, ETA: 4817s
[>>> ] 26/256, 0.0 task/s, elapsed: 521s, ETA: 4612s
[>>> ] 27/256, 0.1 task/s, elapsed: 524s, ETA: 4448s
[>>> ] 28/256, 0.1 task/s, elapsed: 525s, ETA: 4276s
[>>> ] 29/256, 0.1 task/s, elapsed: 527s, ETA: 4125s
[>>> ] 30/256, 0.1 task/s, elapsed: 530s, ETA: 3992s
[>>> ] 31/256, 0.1 task/s, elapsed: 531s, ETA: 3854s
[>>> ] 32/256, 0.1 task/s, elapsed: 536s, ETA: 3752s
[>>> ] 33/256, 0.1 task/s, elapsed: 537s, ETA: 3628s
[>>> ] 34/256, 0.1 task/s, elapsed: 537s, ETA: 3506s
[>>>> ] 35/256, 0.1 task/s, elapsed: 542s, ETA: 3421s
[>>>> ] 36/256, 0.1 task/s, elapsed: 549s, ETA: 3358s
[>>>> ] 37/256, 0.1 task/s, elapsed: 552s, ETA: 3267s
[>>>> ] 38/256, 0.1 task/s, elapsed: 554s, ETA: 3178s
[>>>> ] 39/256, 0.1 task/s, elapsed: 554s, ETA: 3085s
[>>>> ] 40/256, 0.1 task/s, elapsed: 555s, ETA: 2995s
[>>>> ] 41/256, 0.1 task/s, elapsed: 562s, ETA: 2946s
[>>>> ] 42/256, 0.1 task/s, elapsed: 566s, ETA: 2882s
[>>>>> ] 43/256, 0.1 task/s, elapsed: 566s, ETA: 2805s
[>>>>> ] 44/256, 0.1 task/s, elapsed: 566s, ETA: 2729s
[>>>>> ] 45/256, 0.1 task/s, elapsed: 567s, ETA: 2656s
[>>>>> ] 46/256, 0.1 task/s, elapsed: 567s, ETA: 2586s
[>>>>> ] 47/256, 0.1 task/s, elapsed: 574s, ETA: 2551s
[>>>>> ] 48/256, 0.1 task/s, elapsed: 574s, ETA: 2487s
[>>>>> ] 49/256, 0.1 task/s, elapsed: 574s, ETA: 2426s
[>>>>> ] 50/256, 0.1 task/s, elapsed: 575s, ETA: 2369s
[>>>>> ] 51/256, 0.1 task/s, elapsed: 576s, ETA: 2317s
[>>>>>> ] 52/256, 0.1 task/s, elapsed: 579s, ETA: 2273s
[>>>>>> ] 53/256, 0.1 task/s, elapsed: 582s, ETA: 2229s
[>>>>>> ] 54/256, 0.1 task/s, elapsed: 584s, ETA: 2184s
[>>>>>> ] 55/256, 0.1 task/s, elapsed: 584s, ETA: 2135s
[>>>>>> ] 56/256, 0.1 task/s, elapsed: 584s, ETA: 2087s
[>>>>>> ] 57/256, 0.1 task/s, elapsed: 584s, ETA: 2040s
[>>>>>> ] 58/256, 0.1 task/s, elapsed: 586s, ETA: 2001s
[>>>>>> ] 59/256, 0.1 task/s, elapsed: 586s, ETA: 1957s
[>>>>>>> ] 60/256, 0.1 task/s, elapsed: 587s, ETA: 1917s
[>>>>>>> ] 61/256, 0.1 task/s, elapsed: 589s, ETA: 1884s
[>>>>>>> ] 62/256, 0.1 task/s, elapsed: 589s, ETA: 1844s
[>>>>>>> ] 63/256, 0.1 task/s, elapsed: 589s, ETA: 1806s
[>>>>>>> ] 64/256, 0.1 task/s, elapsed: 591s, ETA: 1772s
[>>>>>>> ] 65/256, 0.1 task/s, elapsed: 591s, ETA: 1738s
[>>>>>>> ] 66/256, 0.1 task/s, elapsed: 592s, ETA: 1704s
[>>>>>>> ] 67/256, 0.1 task/s, elapsed: 592s, ETA: 1670s
[>>>>>>> ] 68/256, 0.1 task/s, elapsed: 594s, ETA: 1643s
[>>>>>>>> ] 69/256, 0.1 task/s, elapsed: 595s, ETA: 1613s
[>>>>>>>> ] 70/256, 0.1 task/s, elapsed: 595s, ETA: 1582s
[>>>>>>>> ] 71/256, 0.1 task/s, elapsed: 596s, ETA: 1554s
[>>>>>>>> ] 72/256, 0.1 task/s, elapsed: 597s, ETA: 1525s
[>>>>>>>> ] 73/256, 0.1 task/s, elapsed: 600s, ETA: 1503s
[>>>>>>>> ] 74/256, 0.1 task/s, elapsed: 600s, ETA: 1476s
[>>>>>>>> ] 75/256, 0.1 task/s, elapsed: 600s, ETA: 1448s
[>>>>>>>> ] 76/256, 0.1 task/s, elapsed: 600s, ETA: 1422s
[>>>>>>>>> ] 77/256, 0.1 task/s, elapsed: 602s, ETA: 1399s
[>>>>>>>>> ] 78/256, 0.1 task/s, elapsed: 604s, ETA: 1378s
[>>>>>>>>> ] 79/256, 0.1 task/s, elapsed: 604s, ETA: 1354s
[>>>>>>>>> ] 80/256, 0.1 task/s, elapsed: 605s, ETA: 1330s
[>>>>>>>>> ] 81/256, 0.1 task/s, elapsed: 605s, ETA: 1307s
[>>>>>>>>> ] 82/256, 0.1 task/s, elapsed: 606s, ETA: 1287s
[>>>>>>>>> ] 83/256, 0.1 task/s, elapsed: 608s, ETA: 1266s
[>>>>>>>>> ] 84/256, 0.1 task/s, elapsed: 609s, ETA: 1246s
[>>>>>>>>> ] 85/256, 0.1 task/s, elapsed: 609s, ETA: 1225s
[>>>>>>>>>> ] 86/256, 0.1 task/s, elapsed: 610s, ETA: 1205s
[>>>>>>>>>> ] 87/256, 0.1 task/s, elapsed: 611s, ETA: 1186s
[>>>>>>>>>> ] 88/256, 0.1 task/s, elapsed: 611s, ETA: 1166s
[>>>>>>>>>> ] 89/256, 0.1 task/s, elapsed: 611s, ETA: 1147s
[>>>>>>>>>> ] 90/256, 0.1 task/s, elapsed: 614s, ETA: 1133s
[>>>>>>>>>> ] 91/256, 0.1 task/s, elapsed: 615s, ETA: 1116s
[>>>>>>>>>> ] 92/256, 0.1 task/s, elapsed: 616s, ETA: 1097s
[>>>>>>>>>> ] 93/256, 0.2 task/s, elapsed: 616s, ETA: 1080s
[>>>>>>>>>>> ] 94/256, 0.2 task/s, elapsed: 617s, ETA: 1063s
[>>>>>>>>>>> ] 95/256, 0.2 task/s, elapsed: 617s, ETA: 1046s
[>>>>>>>>>>> ] 96/256, 0.2 task/s, elapsed: 618s, ETA: 1029s
[>>>>>>>>>>> ] 97/256, 0.2 task/s, elapsed: 619s, ETA: 1015s
[>>>>>>>>>>> ] 98/256, 0.2 task/s, elapsed: 620s, ETA: 999s
[>>>>>>>>>>> ] 99/256, 0.2 task/s, elapsed: 624s, ETA: 990s
[>>>>>>>>>>> ] 100/256, 0.2 task/s, elapsed: 626s, ETA: 976s
[>>>>>>>>>>> ] 101/256, 0.2 task/s, elapsed: 626s, ETA: 961s
[>>>>>>>>>>> ] 102/256, 0.2 task/s, elapsed: 626s, ETA: 946s
[>>>>>>>>>>> ] 103/256, 0.2 task/s, elapsed: 629s, ETA: 934s
[>>>>>>>>>>> ] 104/256, 0.2 task/s, elapsed: 630s, ETA: 920s
[>>>>>>>>>>> ] 105/256, 0.2 task/s, elapsed: 634s, ETA: 911s
[>>>>>>>>>>>> ] 106/256, 0.2 task/s, elapsed: 635s, ETA: 898s
[>>>>>>>>>>>> ] 107/256, 0.2 task/s, elapsed: 636s, ETA: 886s
[>>>>>>>>>>>> ] 108/256, 0.2 task/s, elapsed: 636s, ETA: 872s
[>>>>>>>>>>>> ] 109/256, 0.2 task/s, elapsed: 637s, ETA: 859s
[>>>>>>>>>>>> ] 110/256, 0.2 task/s, elapsed: 640s, ETA: 849s
[>>>>>>>>>>>> ] 111/256, 0.2 task/s, elapsed: 640s, ETA: 836s
[>>>>>>>>>>>> ] 112/256, 0.2 task/s, elapsed: 641s, ETA: 824s
[>>>>>>>>>>>> ] 113/256, 0.2 task/s, elapsed: 641s, ETA: 811s
[>>>>>>>>>>>> ] 114/256, 0.2 task/s, elapsed: 642s, ETA: 800s
[>>>>>>>>>>>>> ] 115/256, 0.2 task/s, elapsed: 642s, ETA: 787s
[>>>>>>>>>>>>> ] 116/256, 0.2 task/s, elapsed: 645s, ETA: 779s
[>>>>>>>>>>>>> ] 117/256, 0.2 task/s, elapsed: 647s, ETA: 769s
[>>>>>>>>>>>>> ] 118/256, 0.2 task/s, elapsed: 649s, ETA: 759s
[>>>>>>>>>>>>> ] 119/256, 0.2 task/s, elapsed: 651s, ETA: 750s
[>>>>>>>>>>>>> ] 120/256, 0.2 task/s, elapsed: 651s, ETA: 738s
[>>>>>>>>>>>>> ] 121/256, 0.2 task/s, elapsed: 654s, ETA: 730s
[>>>>>>>>>>>>> ] 122/256, 0.2 task/s, elapsed: 656s, ETA: 721s
[>>>>>>>>>>>>> ] 123/256, 0.2 task/s, elapsed: 656s, ETA: 710s
[>>>>>>>>>>>>>> ] 124/256, 0.2 task/s, elapsed: 657s, ETA: 699s
[>>>>>>>>>>>>>> ] 125/256, 0.2 task/s, elapsed: 657s, ETA: 688s
[>>>>>>>>>>>>>> ] 126/256, 0.2 task/s, elapsed: 659s, ETA: 680s
[>>>>>>>>>>>>>> ] 127/256, 0.2 task/s, elapsed: 660s, ETA: 670s
[>>>>>>>>>>>>>> ] 128/256, 0.2 task/s, elapsed: 661s, ETA: 661s
[>>>>>>>>>>>>>> ] 129/256, 0.2 task/s, elapsed: 661s, ETA: 651s
[>>>>>>>>>>>>>> ] 130/256, 0.2 task/s, elapsed: 662s, ETA: 641s
[>>>>>>>>>>>>>> ] 131/256, 0.2 task/s, elapsed: 662s, ETA: 632s
[>>>>>>>>>>>>>> ] 132/256, 0.2 task/s, elapsed: 662s, ETA: 622s
[>>>>>>>>>>>>>>> ] 133/256, 0.2 task/s, elapsed: 667s, ETA: 617s
[>>>>>>>>>>>>>>> ] 134/256, 0.2 task/s, elapsed: 669s, ETA: 609s
[>>>>>>>>>>>>>>> ] 135/256, 0.2 task/s, elapsed: 670s, ETA: 600s
[>>>>>>>>>>>>>>> ] 136/256, 0.2 task/s, elapsed: 670s, ETA: 591s
[>>>>>>>>>>>>>>> ] 137/256, 0.2 task/s, elapsed: 670s, ETA: 582s
[>>>>>>>>>>>>>>> ] 138/256, 0.2 task/s, elapsed: 671s, ETA: 574s
[>>>>>>>>>>>>>>> ] 139/256, 0.2 task/s, elapsed: 671s, ETA: 565s
[>>>>>>>>>>>>>>> ] 140/256, 0.2 task/s, elapsed: 671s, ETA: 556s
[>>>>>>>>>>>>>>> ] 141/256, 0.2 task/s, elapsed: 673s, ETA: 549s
[>>>>>>>>>>>>>>>> ] 142/256, 0.2 task/s, elapsed: 675s, ETA: 542s
[>>>>>>>>>>>>>>>> ] 143/256, 0.2 task/s, elapsed: 675s, ETA: 533s
[>>>>>>>>>>>>>>>> ] 144/256, 0.2 task/s, elapsed: 677s, ETA: 527s
[>>>>>>>>>>>>>>>> ] 145/256, 0.2 task/s, elapsed: 677s, ETA: 518s
[>>>>>>>>>>>>>>>> ] 146/256, 0.2 task/s, elapsed: 679s, ETA: 512s
[>>>>>>>>>>>>>>>> ] 147/256, 0.2 task/s, elapsed: 679s, ETA: 504s
[>>>>>>>>>>>>>>>> ] 148/256, 0.2 task/s, elapsed: 680s, ETA: 496s
[>>>>>>>>>>>>>>>> ] 149/256, 0.2 task/s, elapsed: 681s, ETA: 489s
[>>>>>>>>>>>>>>>> ] 150/256, 0.2 task/s, elapsed: 681s, ETA: 481s
[>>>>>>>>>>>>>>>>> ] 151/256, 0.2 task/s, elapsed: 681s, ETA: 474s
[>>>>>>>>>>>>>>>>> ] 152/256, 0.2 task/s, elapsed: 684s, ETA: 468s
[>>>>>>>>>>>>>>>>> ] 153/256, 0.2 task/s, elapsed: 684s, ETA: 461s
[>>>>>>>>>>>>>>>>> ] 154/256, 0.2 task/s, elapsed: 689s, ETA: 456s
[>>>>>>>>>>>>>>>>> ] 155/256, 0.2 task/s, elapsed: 689s, ETA: 449s
[>>>>>>>>>>>>>>>>> ] 156/256, 0.2 task/s, elapsed: 690s, ETA: 443s
[>>>>>>>>>>>>>>>>> ] 157/256, 0.2 task/s, elapsed: 690s, ETA: 435s
[>>>>>>>>>>>>>>>>> ] 158/256, 0.2 task/s, elapsed: 692s, ETA: 429s
[>>>>>>>>>>>>>>>>>> ] 159/256, 0.2 task/s, elapsed: 692s, ETA: 422s
[>>>>>>>>>>>>>>>>>> ] 160/256, 0.2 task/s, elapsed: 692s, ETA: 415s
[>>>>>>>>>>>>>>>>>> ] 161/256, 0.2 task/s, elapsed: 694s, ETA: 409s
[>>>>>>>>>>>>>>>>>> ] 162/256, 0.2 task/s, elapsed: 695s, ETA: 403s
[>>>>>>>>>>>>>>>>>> ] 163/256, 0.2 task/s, elapsed: 696s, ETA: 397s
[>>>>>>>>>>>>>>>>>> ] 164/256, 0.2 task/s, elapsed: 697s, ETA: 391s
[>>>>>>>>>>>>>>>>>> ] 165/256, 0.2 task/s, elapsed: 697s, ETA: 385s
[>>>>>>>>>>>>>>>>>> ] 166/256, 0.2 task/s, elapsed: 698s, ETA: 378s
[>>>>>>>>>>>>>>>>>> ] 167/256, 0.2 task/s, elapsed: 699s, ETA: 373s
[>>>>>>>>>>>>>>>>>>> ] 168/256, 0.2 task/s, elapsed: 699s, ETA: 366s
[>>>>>>>>>>>>>>>>>>> ] 169/256, 0.2 task/s, elapsed: 701s, ETA: 361s
[>>>>>>>>>>>>>>>>>>> ] 170/256, 0.2 task/s, elapsed: 702s, ETA: 355s
[>>>>>>>>>>>>>>>>>>> ] 171/256, 0.2 task/s, elapsed: 702s, ETA: 349s
[>>>>>>>>>>>>>>>>>>> ] 172/256, 0.2 task/s, elapsed: 703s, ETA: 343s
[>>>>>>>>>>>>>>>>>>> ] 173/256, 0.2 task/s, elapsed: 704s, ETA: 338s
[>>>>>>>>>>>>>>>>>>> ] 174/256, 0.2 task/s, elapsed: 704s, ETA: 332s
[>>>>>>>>>>>>>>>>>>> ] 175/256, 0.2 task/s, elapsed: 706s, ETA: 327s
[>>>>>>>>>>>>>>>>>>> ] 176/256, 0.2 task/s, elapsed: 706s, ETA: 321s
[>>>>>>>>>>>>>>>>>>>> ] 177/256, 0.2 task/s, elapsed: 709s, ETA: 316s
[>>>>>>>>>>>>>>>>>>>> ] 178/256, 0.3 task/s, elapsed: 709s, ETA: 311s
[>>>>>>>>>>>>>>>>>>>> ] 179/256, 0.3 task/s, elapsed: 710s, ETA: 306s
[>>>>>>>>>>>>>>>>>>>> ] 180/256, 0.3 task/s, elapsed: 711s, ETA: 300s
[>>>>>>>>>>>>>>>>>>>> ] 181/256, 0.3 task/s, elapsed: 712s, ETA: 295s
[>>>>>>>>>>>>>>>>>>>> ] 182/256, 0.3 task/s, elapsed: 714s, ETA: 290s
[>>>>>>>>>>>>>>>>>>>> ] 183/256, 0.3 task/s, elapsed: 715s, ETA: 285s
[>>>>>>>>>>>>>>>>>>>> ] 184/256, 0.3 task/s, elapsed: 715s, ETA: 280s
[>>>>>>>>>>>>>>>>>>>> ] 185/256, 0.3 task/s, elapsed: 716s, ETA: 275s
[>>>>>>>>>>>>>>>>>>>>> ] 186/256, 0.3 task/s, elapsed: 717s, ETA: 270s
[>>>>>>>>>>>>>>>>>>>>> ] 187/256, 0.3 task/s, elapsed: 719s, ETA: 265s
[>>>>>>>>>>>>>>>>>>>>> ] 188/256, 0.3 task/s, elapsed: 719s, ETA: 260s
[>>>>>>>>>>>>>>>>>>>>> ] 189/256, 0.3 task/s, elapsed: 720s, ETA: 255s
[>>>>>>>>>>>>>>>>>>>>> ] 190/256, 0.3 task/s, elapsed: 721s, ETA: 250s
[>>>>>>>>>>>>>>>>>>>>> ] 191/256, 0.3 task/s, elapsed: 721s, ETA: 245s
[>>>>>>>>>>>>>>>>>>>>> ] 192/256, 0.3 task/s, elapsed: 722s, ETA: 241s
[>>>>>>>>>>>>>>>>>>>>> ] 193/256, 0.3 task/s, elapsed: 724s, ETA: 236s
[>>>>>>>>>>>>>>>>>>>>> ] 194/256, 0.3 task/s, elapsed: 726s, ETA: 232s
[>>>>>>>>>>>>>>>>>>>>>> ] 195/256, 0.3 task/s, elapsed: 727s, ETA: 228s
[>>>>>>>>>>>>>>>>>>>>>> ] 196/256, 0.3 task/s, elapsed: 732s, ETA: 224s
[>>>>>>>>>>>>>>>>>>>>>> ] 197/256, 0.3 task/s, elapsed: 734s, ETA: 220s
[>>>>>>>>>>>>>>>>>>>>>> ] 198/256, 0.3 task/s, elapsed: 737s, ETA: 216s
[>>>>>>>>>>>>>>>>>>>>>> ] 199/256, 0.3 task/s, elapsed: 741s, ETA: 212s
[>>>>>>>>>>>>>>>>>>>>>> ] 200/256, 0.3 task/s, elapsed: 742s, ETA: 208s
[>>>>>>>>>>>>>>>>>>>>>> ] 201/256, 0.3 task/s, elapsed: 742s, ETA: 203s
[>>>>>>>>>>>>>>>>>>>>>> ] 202/256, 0.3 task/s, elapsed: 744s, ETA: 199s
[>>>>>>>>>>>>>>>>>>>>>> ] 203/256, 0.3 task/s, elapsed: 745s, ETA: 194s
[>>>>>>>>>>>>>>>>>>>>>>> ] 204/256, 0.3 task/s, elapsed: 745s, ETA: 190s
[>>>>>>>>>>>>>>>>>>>>>>> ] 205/256, 0.3 task/s, elapsed: 746s, ETA: 186s
[>>>>>>>>>>>>>>>>>>>>>>> ] 206/256, 0.3 task/s, elapsed: 749s, ETA: 182s
[>>>>>>>>>>>>>>>>>>>>>>> ] 207/256, 0.3 task/s, elapsed: 751s, ETA: 178s
[>>>>>>>>>>>>>>>>>>>>>>> ] 208/256, 0.3 task/s, elapsed: 752s, ETA: 174s
[>>>>>>>>>>>>>>>>>>>>>>> ] 209/256, 0.3 task/s, elapsed: 756s, ETA: 170s
[>>>>>>>>>>>>>>>>>>>>>>> ] 210/256, 0.3 task/s, elapsed: 757s, ETA: 166s
[>>>>>>>>>>>>>>>>>>>>>>> ] 211/256, 0.3 task/s, elapsed: 758s, ETA: 162s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 212/256, 0.3 task/s, elapsed: 759s, ETA: 158s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 213/256, 0.3 task/s, elapsed: 762s, ETA: 154s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 214/256, 0.3 task/s, elapsed: 762s, ETA: 150s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 215/256, 0.3 task/s, elapsed: 764s, ETA: 146s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 216/256, 0.3 task/s, elapsed: 770s, ETA: 143s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 217/256, 0.3 task/s, elapsed: 770s, ETA: 138s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 218/256, 0.3 task/s, elapsed: 771s, ETA: 134s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 219/256, 0.3 task/s, elapsed: 775s, ETA: 131s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 220/256, 0.3 task/s, elapsed: 777s, ETA: 127s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 221/256, 0.3 task/s, elapsed: 780s, ETA: 124s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 222/256, 0.3 task/s, elapsed: 781s, ETA: 120s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 223/256, 0.3 task/s, elapsed: 782s, ETA: 116s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 224/256, 0.3 task/s, elapsed: 786s, ETA: 112s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 225/256, 0.3 task/s, elapsed: 790s, ETA: 109s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 226/256, 0.3 task/s, elapsed: 790s, ETA: 105s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 227/256, 0.3 task/s, elapsed: 792s, ETA: 101s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 228/256, 0.3 task/s, elapsed: 795s, ETA: 98s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 229/256, 0.3 task/s, elapsed: 797s, ETA: 94s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 230/256, 0.3 task/s, elapsed: 799s, ETA: 90s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 231/256, 0.3 task/s, elapsed: 800s, ETA: 87s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 232/256, 0.3 task/s, elapsed: 807s, ETA: 84s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 233/256, 0.3 task/s, elapsed: 814s, ETA: 80s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 234/256, 0.3 task/s, elapsed: 814s, ETA: 77s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 235/256, 0.3 task/s, elapsed: 814s, ETA: 73s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 236/256, 0.3 task/s, elapsed: 821s, ETA: 70s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 237/256, 0.3 task/s, elapsed: 821s, ETA: 66s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 238/256, 0.3 task/s, elapsed: 824s, ETA: 62s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 239/256, 0.3 task/s, elapsed: 831s, ETA: 59s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 240/256, 0.3 task/s, elapsed: 832s, ETA: 55s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 241/256, 0.3 task/s, elapsed: 837s, ETA: 52s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 242/256, 0.3 task/s, elapsed: 844s, ETA: 49s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 243/256, 0.3 task/s, elapsed: 845s, ETA: 45s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 244/256, 0.3 task/s, elapsed: 859s, ETA: 42s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 245/256, 0.3 task/s, elapsed: 866s, ETA: 39s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 246/256, 0.3 task/s, elapsed: 870s, ETA: 35s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 247/256, 0.3 task/s, elapsed: 877s, ETA: 32s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 248/256, 0.3 task/s, elapsed: 895s, ETA: 29s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 249/256, 0.3 task/s, elapsed: 904s, ETA: 25s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 250/256, 0.3 task/s, elapsed: 905s, ETA: 22s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 251/256, 0.3 task/s, elapsed: 912s, ETA: 18s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 252/256, 0.3 task/s, elapsed: 939s, ETA: 15s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 253/256, 0.3 task/s, elapsed: 944s, ETA: 11s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 254/256, 0.3 task/s, elapsed: 954s, ETA: 8s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 255/256, 0.3 task/s, elapsed: 957s, ETA: 4s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 256/256, 0.3 task/s, elapsed: 1014s, ETA: 0s
12/05 02:56:25 - OpenCompass - INFO - Partitioned into 287 tasks.
[ ] 0/287, elapsed: 0s, ETA:
[ ] 1/287, 0.0 task/s, elapsed: 60s, ETA: 17259s
[ ] 2/287, 0.0 task/s, elapsed: 60s, ETA: 8606s
[ ] 3/287, 0.0 task/s, elapsed: 60s, ETA: 5719s
[ ] 4/287, 0.1 task/s, elapsed: 60s, ETA: 4278s
[ ] 5/287, 0.1 task/s, elapsed: 60s, ETA: 3411s
[ ] 6/287, 0.1 task/s, elapsed: 61s, ETA: 2834s
[ ] 7/287, 0.1 task/s, elapsed: 61s, ETA: 2421s
[ ] 8/287, 0.1 task/s, elapsed: 61s, ETA: 2111s
[> ] 9/287, 0.1 task/s, elapsed: 61s, ETA: 1870s
[> ] 10/287, 0.2 task/s, elapsed: 61s, ETA: 1678s
[> ] 11/287, 0.2 task/s, elapsed: 61s, ETA: 1520s
[> ] 12/287, 0.2 task/s, elapsed: 61s, ETA: 1391s
[> ] 13/287, 0.2 task/s, elapsed: 61s, ETA: 1280s
[> ] 14/287, 0.2 task/s, elapsed: 61s, ETA: 1185s
[> ] 15/287, 0.2 task/s, elapsed: 61s, ETA: 1103s
[> ] 16/287, 0.3 task/s, elapsed: 61s, ETA: 1030s
[> ] 17/287, 0.3 task/s, elapsed: 61s, ETA: 966s
[> ] 18/287, 0.3 task/s, elapsed: 61s, ETA: 909s
[>> ] 19/287, 0.3 task/s, elapsed: 61s, ETA: 859s
[>> ] 20/287, 0.3 task/s, elapsed: 61s, ETA: 813s
[>> ] 21/287, 0.3 task/s, elapsed: 61s, ETA: 772s
[>> ] 22/287, 0.4 task/s, elapsed: 61s, ETA: 735s
[>> ] 23/287, 0.4 task/s, elapsed: 61s, ETA: 701s
[>> ] 24/287, 0.4 task/s, elapsed: 61s, ETA: 670s
[>> ] 25/287, 0.4 task/s, elapsed: 62s, ETA: 645s
[>> ] 26/287, 0.4 task/s, elapsed: 62s, ETA: 619s
[>> ] 27/287, 0.4 task/s, elapsed: 62s, ETA: 594s
[>>> ] 28/287, 0.5 task/s, elapsed: 62s, ETA: 570s
[>>> ] 29/287, 0.5 task/s, elapsed: 62s, ETA: 549s
[>>> ] 30/287, 0.5 task/s, elapsed: 62s, ETA: 529s
[>>> ] 31/287, 0.5 task/s, elapsed: 62s, ETA: 510s
[>>> ] 32/287, 0.5 task/s, elapsed: 62s, ETA: 492s
[>>> ] 33/287, 0.5 task/s, elapsed: 62s, ETA: 475s
[>>> ] 34/287, 0.6 task/s, elapsed: 62s, ETA: 460s
[>>> ] 35/287, 0.6 task/s, elapsed: 62s, ETA: 445s
[>>> ] 36/287, 0.6 task/s, elapsed: 62s, ETA: 431s
[>>> ] 37/287, 0.6 task/s, elapsed: 62s, ETA: 418s
[>>>> ] 38/287, 0.6 task/s, elapsed: 62s, ETA: 406s
[>>>> ] 39/287, 0.6 task/s, elapsed: 62s, ETA: 394s
[>>>> ] 40/287, 0.6 task/s, elapsed: 62s, ETA: 382s
[>>>> ] 41/287, 0.7 task/s, elapsed: 62s, ETA: 371s
[>>>> ] 42/287, 0.7 task/s, elapsed: 62s, ETA: 361s
[>>>> ] 43/287, 0.7 task/s, elapsed: 62s, ETA: 352s
[>>>> ] 44/287, 0.7 task/s, elapsed: 62s, ETA: 342s
[>>>> ] 45/287, 0.7 task/s, elapsed: 62s, ETA: 333s
[>>>> ] 46/287, 0.7 task/s, elapsed: 62s, ETA: 325s
[>>>>> ] 47/287, 0.8 task/s, elapsed: 62s, ETA: 317s
[>>>>> ] 48/287, 0.8 task/s, elapsed: 62s, ETA: 310s
[>>>>> ] 49/287, 0.8 task/s, elapsed: 64s, ETA: 313s
[>>>>> ] 50/287, 0.8 task/s, elapsed: 65s, ETA: 306s
[>>>>> ] 51/287, 0.8 task/s, elapsed: 65s, ETA: 299s
[>>>>> ] 52/287, 0.8 task/s, elapsed: 65s, ETA: 292s
[>>>>> ] 53/287, 0.8 task/s, elapsed: 65s, ETA: 287s
[>>>>> ] 54/287, 0.8 task/s, elapsed: 65s, ETA: 280s
[>>>>> ] 55/287, 0.8 task/s, elapsed: 65s, ETA: 274s
[>>>>>> ] 56/287, 0.9 task/s, elapsed: 65s, ETA: 269s
[>>>>>> ] 57/287, 0.9 task/s, elapsed: 65s, ETA: 263s
[>>>>>> ] 58/287, 0.9 task/s, elapsed: 65s, ETA: 258s
[>>>>>> ] 59/287, 0.9 task/s, elapsed: 65s, ETA: 252s
[>>>>>> ] 60/287, 0.9 task/s, elapsed: 65s, ETA: 247s
[>>>>>> ] 61/287, 0.9 task/s, elapsed: 65s, ETA: 242s
[>>>>>> ] 62/287, 0.9 task/s, elapsed: 65s, ETA: 237s
[>>>>>> ] 63/287, 1.0 task/s, elapsed: 65s, ETA: 232s
[>>>>>> ] 64/287, 1.0 task/s, elapsed: 65s, ETA: 228s
[>>>>>>> ] 65/287, 1.0 task/s, elapsed: 65s, ETA: 223s
[>>>>>>> ] 66/287, 1.0 task/s, elapsed: 65s, ETA: 219s
[>>>>>>> ] 67/287, 1.0 task/s, elapsed: 65s, ETA: 215s
[>>>>>>> ] 68/287, 1.0 task/s, elapsed: 65s, ETA: 211s
[>>>>>>> ] 69/287, 1.1 task/s, elapsed: 65s, ETA: 207s
[>>>>>>> ] 70/287, 1.1 task/s, elapsed: 66s, ETA: 203s
[>>>>>>> ] 71/287, 1.1 task/s, elapsed: 66s, ETA: 199s
[>>>>>>> ] 72/287, 1.1 task/s, elapsed: 66s, ETA: 196s
[>>>>>>> ] 73/287, 0.9 task/s, elapsed: 80s, ETA: 235s
[>>>>>>> ] 74/287, 0.9 task/s, elapsed: 80s, ETA: 230s
[>>>>>>>> ] 75/287, 0.9 task/s, elapsed: 80s, ETA: 226s
[>>>>>>>> ] 76/287, 0.9 task/s, elapsed: 80s, ETA: 222s
[>>>>>>>> ] 77/287, 1.0 task/s, elapsed: 80s, ETA: 218s
[>>>>>>>> ] 78/287, 1.0 task/s, elapsed: 80s, ETA: 215s
[>>>>>>>> ] 79/287, 1.0 task/s, elapsed: 80s, ETA: 211s
[>>>>>>>> ] 80/287, 1.0 task/s, elapsed: 80s, ETA: 208s
[>>>>>>>> ] 81/287, 1.0 task/s, elapsed: 81s, ETA: 206s
[>>>>>>>> ] 82/287, 1.0 task/s, elapsed: 81s, ETA: 202s
[>>>>>>>> ] 83/287, 1.0 task/s, elapsed: 81s, ETA: 199s
[>>>>>>>>> ] 84/287, 1.0 task/s, elapsed: 82s, ETA: 197s
[>>>>>>>>> ] 85/287, 1.0 task/s, elapsed: 82s, ETA: 195s
[>>>>>>>>> ] 86/287, 1.1 task/s, elapsed: 82s, ETA: 191s
[>>>>>>>>> ] 87/287, 1.1 task/s, elapsed: 82s, ETA: 189s
[>>>>>>>>> ] 88/287, 1.1 task/s, elapsed: 83s, ETA: 188s
[>>>>>>>>> ] 89/287, 1.1 task/s, elapsed: 84s, ETA: 187s
[>>>>>>>>> ] 90/287, 1.1 task/s, elapsed: 85s, ETA: 185s
[>>>>>>>>> ] 91/287, 1.1 task/s, elapsed: 85s, ETA: 183s
[>>>>>>>>> ] 92/287, 1.1 task/s, elapsed: 85s, ETA: 180s
[>>>>>>>>>> ] 93/287, 1.1 task/s, elapsed: 85s, ETA: 177s
[>>>>>>>>>> ] 94/287, 1.1 task/s, elapsed: 85s, ETA: 175s
[>>>>>>>>>> ] 95/287, 1.1 task/s, elapsed: 85s, ETA: 172s
[>>>>>>>>>> ] 96/287, 1.1 task/s, elapsed: 85s, ETA: 170s
[>>>>>>>>>> ] 97/287, 1.1 task/s, elapsed: 85s, ETA: 167s
[>>>>>>>>>> ] 98/287, 1.1 task/s, elapsed: 85s, ETA: 165s
[>>>>>>>>>> ] 99/287, 1.2 task/s, elapsed: 85s, ETA: 162s
[>>>>>>>>>> ] 100/287, 1.2 task/s, elapsed: 86s, ETA: 160s
[>>>>>>>>>> ] 101/287, 1.2 task/s, elapsed: 86s, ETA: 158s
[>>>>>>>>>> ] 102/287, 1.2 task/s, elapsed: 86s, ETA: 155s
[>>>>>>>>>> ] 103/287, 1.2 task/s, elapsed: 86s, ETA: 153s
[>>>>>>>>>> ] 104/287, 1.2 task/s, elapsed: 86s, ETA: 151s
[>>>>>>>>>> ] 105/287, 1.2 task/s, elapsed: 86s, ETA: 148s
[>>>>>>>>>>> ] 106/287, 1.2 task/s, elapsed: 86s, ETA: 146s
[>>>>>>>>>>> ] 107/287, 1.2 task/s, elapsed: 86s, ETA: 144s
[>>>>>>>>>>> ] 108/287, 1.3 task/s, elapsed: 86s, ETA: 142s
[>>>>>>>>>>> ] 109/287, 1.3 task/s, elapsed: 86s, ETA: 140s
[>>>>>>>>>>> ] 110/287, 1.3 task/s, elapsed: 86s, ETA: 138s
[>>>>>>>>>>> ] 111/287, 1.3 task/s, elapsed: 86s, ETA: 136s
[>>>>>>>>>>> ] 112/287, 1.3 task/s, elapsed: 86s, ETA: 134s
[>>>>>>>>>>> ] 113/287, 1.3 task/s, elapsed: 86s, ETA: 133s
[>>>>>>>>>>> ] 114/287, 1.3 task/s, elapsed: 86s, ETA: 131s
[>>>>>>>>>>>> ] 115/287, 1.3 task/s, elapsed: 86s, ETA: 129s
[>>>>>>>>>>>> ] 116/287, 1.3 task/s, elapsed: 87s, ETA: 128s
[>>>>>>>>>>>> ] 117/287, 1.3 task/s, elapsed: 89s, ETA: 129s
[>>>>>>>>>>>> ] 118/287, 1.3 task/s, elapsed: 89s, ETA: 127s
[>>>>>>>>>>>> ] 119/287, 1.3 task/s, elapsed: 89s, ETA: 126s
[>>>>>>>>>>>> ] 120/287, 1.3 task/s, elapsed: 89s, ETA: 124s
[>>>>>>>>>>>> ] 121/287, 1.4 task/s, elapsed: 89s, ETA: 122s
[>>>>>>>>>>>> ] 122/287, 1.4 task/s, elapsed: 89s, ETA: 121s
[>>>>>>>>>>>> ] 123/287, 1.4 task/s, elapsed: 89s, ETA: 119s
[>>>>>>>>>>>> ] 124/287, 1.4 task/s, elapsed: 89s, ETA: 118s
[>>>>>>>>>>>>> ] 125/287, 1.4 task/s, elapsed: 89s, ETA: 116s
[>>>>>>>>>>>>> ] 126/287, 1.4 task/s, elapsed: 89s, ETA: 114s
[>>>>>>>>>>>>> ] 127/287, 1.4 task/s, elapsed: 90s, ETA: 113s
[>>>>>>>>>>>>> ] 128/287, 1.4 task/s, elapsed: 90s, ETA: 111s
[>>>>>>>>>>>>> ] 129/287, 1.4 task/s, elapsed: 90s, ETA: 110s
[>>>>>>>>>>>>> ] 130/287, 1.5 task/s, elapsed: 90s, ETA: 108s
[>>>>>>>>>>>>> ] 131/287, 1.5 task/s, elapsed: 90s, ETA: 107s
[>>>>>>>>>>>>> ] 132/287, 1.5 task/s, elapsed: 90s, ETA: 105s
[>>>>>>>>>>>>> ] 133/287, 1.5 task/s, elapsed: 90s, ETA: 104s
[>>>>>>>>>>>>>> ] 134/287, 1.5 task/s, elapsed: 90s, ETA: 102s
[>>>>>>>>>>>>>> ] 135/287, 1.5 task/s, elapsed: 90s, ETA: 101s
[>>>>>>>>>>>>>> ] 136/287, 1.5 task/s, elapsed: 90s, ETA: 100s
[>>>>>>>>>>>>>> ] 137/287, 1.5 task/s, elapsed: 90s, ETA: 98s
[>>>>>>>>>>>>>> ] 138/287, 1.5 task/s, elapsed: 90s, ETA: 97s
[>>>>>>>>>>>>>> ] 139/287, 1.5 task/s, elapsed: 90s, ETA: 96s
[>>>>>>>>>>>>>> ] 140/287, 1.6 task/s, elapsed: 90s, ETA: 94s
[>>>>>>>>>>>>>> ] 141/287, 1.6 task/s, elapsed: 90s, ETA: 93s
[>>>>>>>>>>>>>> ] 142/287, 1.6 task/s, elapsed: 90s, ETA: 92s
[>>>>>>>>>>>>>> ] 143/287, 1.6 task/s, elapsed: 90s, ETA: 91s
[>>>>>>>>>>>>>>> ] 144/287, 1.6 task/s, elapsed: 90s, ETA: 89s
[>>>>>>>>>>>>>>> ] 145/287, 1.6 task/s, elapsed: 90s, ETA: 88s
[>>>>>>>>>>>>>>> ] 146/287, 1.6 task/s, elapsed: 90s, ETA: 87s
[>>>>>>>>>>>>>>> ] 147/287, 1.6 task/s, elapsed: 90s, ETA: 86s
[>>>>>>>>>>>>>>> ] 148/287, 1.6 task/s, elapsed: 92s, ETA: 86s
[>>>>>>>>>>>>>>> ] 149/287, 1.6 task/s, elapsed: 96s, ETA: 89s
[>>>>>>>>>>>>>>> ] 150/287, 1.6 task/s, elapsed: 97s, ETA: 88s
[>>>>>>>>>>>>>>> ] 151/287, 1.6 task/s, elapsed: 97s, ETA: 87s
[>>>>>>>>>>>>>>> ] 152/287, 1.6 task/s, elapsed: 97s, ETA: 86s
[>>>>>>>>>>>>>>> ] 153/287, 1.6 task/s, elapsed: 97s, ETA: 85s
[>>>>>>>>>>>>>>>> ] 154/287, 1.6 task/s, elapsed: 97s, ETA: 84s
[>>>>>>>>>>>>>>>> ] 155/287, 1.6 task/s, elapsed: 97s, ETA: 82s
[>>>>>>>>>>>>>>>> ] 156/287, 1.6 task/s, elapsed: 97s, ETA: 81s
[>>>>>>>>>>>>>>>> ] 157/287, 1.6 task/s, elapsed: 97s, ETA: 80s
[>>>>>>>>>>>>>>>> ] 158/287, 1.6 task/s, elapsed: 97s, ETA: 79s
[>>>>>>>>>>>>>>>> ] 159/287, 1.6 task/s, elapsed: 97s, ETA: 78s
[>>>>>>>>>>>>>>>> ] 160/287, 1.7 task/s, elapsed: 97s, ETA: 77s
[>>>>>>>>>>>>>>>> ] 161/287, 1.7 task/s, elapsed: 97s, ETA: 76s
[>>>>>>>>>>>>>>>> ] 162/287, 1.7 task/s, elapsed: 97s, ETA: 75s
[>>>>>>>>>>>>>>>>> ] 163/287, 1.7 task/s, elapsed: 97s, ETA: 74s
[>>>>>>>>>>>>>>>>> ] 164/287, 1.7 task/s, elapsed: 97s, ETA: 73s
[>>>>>>>>>>>>>>>>> ] 165/287, 1.7 task/s, elapsed: 97s, ETA: 72s
[>>>>>>>>>>>>>>>>> ] 166/287, 1.7 task/s, elapsed: 97s, ETA: 71s
[>>>>>>>>>>>>>>>>> ] 167/287, 1.7 task/s, elapsed: 97s, ETA: 70s
[>>>>>>>>>>>>>>>>> ] 168/287, 1.7 task/s, elapsed: 97s, ETA: 69s
[>>>>>>>>>>>>>>>>> ] 169/287, 1.7 task/s, elapsed: 97s, ETA: 68s
[>>>>>>>>>>>>>>>>> ] 170/287, 1.8 task/s, elapsed: 97s, ETA: 67s
[>>>>>>>>>>>>>>>>> ] 171/287, 1.8 task/s, elapsed: 97s, ETA: 66s
[>>>>>>>>>>>>>>>>> ] 172/287, 1.8 task/s, elapsed: 97s, ETA: 65s
[>>>>>>>>>>>>>>>>> ] 173/287, 1.7 task/s, elapsed: 100s, ETA: 66s
[>>>>>>>>>>>>>>>>> ] 174/287, 1.4 task/s, elapsed: 127s, ETA: 82s
[>>>>>>>>>>>>>>>>> ] 175/287, 1.4 task/s, elapsed: 127s, ETA: 81s
[>>>>>>>>>>>>>>>>> ] 176/287, 1.4 task/s, elapsed: 127s, ETA: 80s
[>>>>>>>>>>>>>>>>> ] 177/287, 1.4 task/s, elapsed: 127s, ETA: 79s
[>>>>>>>>>>>>>>>>> ] 178/287, 1.4 task/s, elapsed: 127s, ETA: 78s
[>>>>>>>>>>>>>>>>>> ] 179/287, 1.4 task/s, elapsed: 127s, ETA: 77s
[>>>>>>>>>>>>>>>>>> ] 180/287, 1.4 task/s, elapsed: 127s, ETA: 76s
[>>>>>>>>>>>>>>>>>> ] 181/287, 1.4 task/s, elapsed: 127s, ETA: 75s
[>>>>>>>>>>>>>>>>>> ] 182/287, 1.4 task/s, elapsed: 127s, ETA: 73s
[>>>>>>>>>>>>>>>>>> ] 183/287, 1.4 task/s, elapsed: 127s, ETA: 72s
[>>>>>>>>>>>>>>>>>> ] 184/287, 1.4 task/s, elapsed: 127s, ETA: 71s
[>>>>>>>>>>>>>>>>>> ] 185/287, 1.5 task/s, elapsed: 127s, ETA: 70s
[>>>>>>>>>>>>>>>>>> ] 186/287, 1.5 task/s, elapsed: 127s, ETA: 69s
[>>>>>>>>>>>>>>>>>> ] 187/287, 1.5 task/s, elapsed: 127s, ETA: 68s
[>>>>>>>>>>>>>>>>>> ] 188/287, 1.5 task/s, elapsed: 127s, ETA: 67s
[>>>>>>>>>>>>>>>>>>> ] 189/287, 1.5 task/s, elapsed: 127s, ETA: 66s
[>>>>>>>>>>>>>>>>>>> ] 190/287, 1.5 task/s, elapsed: 127s, ETA: 65s
[>>>>>>>>>>>>>>>>>>> ] 191/287, 1.5 task/s, elapsed: 127s, ETA: 64s
[>>>>>>>>>>>>>>>>>>> ] 192/287, 1.5 task/s, elapsed: 127s, ETA: 63s
[>>>>>>>>>>>>>>>>>>> ] 193/287, 1.5 task/s, elapsed: 127s, ETA: 62s
[>>>>>>>>>>>>>>>>>>> ] 194/287, 1.5 task/s, elapsed: 127s, ETA: 61s
[>>>>>>>>>>>>>>>>>>> ] 195/287, 1.5 task/s, elapsed: 127s, ETA: 60s
[>>>>>>>>>>>>>>>>>>> ] 196/287, 1.5 task/s, elapsed: 127s, ETA: 59s
[>>>>>>>>>>>>>>>>>>> ] 197/287, 1.5 task/s, elapsed: 127s, ETA: 58s
[>>>>>>>>>>>>>>>>>>>> ] 198/287, 1.6 task/s, elapsed: 127s, ETA: 57s
[>>>>>>>>>>>>>>>>>>>> ] 199/287, 1.6 task/s, elapsed: 127s, ETA: 56s
[>>>>>>>>>>>>>>>>>>>> ] 200/287, 1.6 task/s, elapsed: 127s, ETA: 55s
[>>>>>>>>>>>>>>>>>>>> ] 201/287, 1.6 task/s, elapsed: 127s, ETA: 55s
[>>>>>>>>>>>>>>>>>>>> ] 202/287, 1.6 task/s, elapsed: 127s, ETA: 54s
[>>>>>>>>>>>>>>>>>>>> ] 203/287, 1.6 task/s, elapsed: 127s, ETA: 53s
[>>>>>>>>>>>>>>>>>>>> ] 204/287, 1.6 task/s, elapsed: 127s, ETA: 52s
[>>>>>>>>>>>>>>>>>>>> ] 205/287, 1.6 task/s, elapsed: 127s, ETA: 51s
[>>>>>>>>>>>>>>>>>>>> ] 206/287, 1.6 task/s, elapsed: 127s, ETA: 50s
[>>>>>>>>>>>>>>>>>>>> ] 207/287, 1.6 task/s, elapsed: 127s, ETA: 49s
[>>>>>>>>>>>>>>>>>>>>> ] 208/287, 1.6 task/s, elapsed: 127s, ETA: 48s
[>>>>>>>>>>>>>>>>>>>>> ] 209/287, 1.6 task/s, elapsed: 127s, ETA: 48s
[>>>>>>>>>>>>>>>>>>>>> ] 210/287, 1.6 task/s, elapsed: 127s, ETA: 47s
[>>>>>>>>>>>>>>>>>>>>> ] 211/287, 1.7 task/s, elapsed: 127s, ETA: 46s
[>>>>>>>>>>>>>>>>>>>>> ] 212/287, 1.7 task/s, elapsed: 127s, ETA: 45s
[>>>>>>>>>>>>>>>>>>>>> ] 213/287, 1.7 task/s, elapsed: 127s, ETA: 44s
[>>>>>>>>>>>>>>>>>>>>> ] 214/287, 1.7 task/s, elapsed: 127s, ETA: 43s
[>>>>>>>>>>>>>>>>>>>>> ] 215/287, 1.7 task/s, elapsed: 127s, ETA: 43s
[>>>>>>>>>>>>>>>>>>>>> ] 216/287, 1.7 task/s, elapsed: 127s, ETA: 42s
[>>>>>>>>>>>>>>>>>>>>> ] 217/287, 1.7 task/s, elapsed: 127s, ETA: 41s
[>>>>>>>>>>>>>>>>>>>>>> ] 218/287, 1.7 task/s, elapsed: 127s, ETA: 40s
[>>>>>>>>>>>>>>>>>>>>>> ] 219/287, 1.7 task/s, elapsed: 127s, ETA: 40s
[>>>>>>>>>>>>>>>>>>>>>> ] 220/287, 1.7 task/s, elapsed: 127s, ETA: 39s
[>>>>>>>>>>>>>>>>>>>>>> ] 221/287, 1.7 task/s, elapsed: 127s, ETA: 38s
[>>>>>>>>>>>>>>>>>>>>>> ] 222/287, 1.7 task/s, elapsed: 127s, ETA: 37s
[>>>>>>>>>>>>>>>>>>>>>> ] 223/287, 1.6 task/s, elapsed: 139s, ETA: 40s
[>>>>>>>>>>>>>>>>>>>>>> ] 224/287, 1.6 task/s, elapsed: 139s, ETA: 39s
[>>>>>>>>>>>>>>>>>>>>>> ] 225/287, 1.6 task/s, elapsed: 139s, ETA: 38s
[>>>>>>>>>>>>>>>>>>>>>> ] 226/287, 1.6 task/s, elapsed: 139s, ETA: 38s
[>>>>>>>>>>>>>>>>>>>>>> ] 227/287, 1.6 task/s, elapsed: 139s, ETA: 37s
[>>>>>>>>>>>>>>>>>>>>>>> ] 228/287, 1.6 task/s, elapsed: 139s, ETA: 36s
[>>>>>>>>>>>>>>>>>>>>>>> ] 229/287, 1.6 task/s, elapsed: 139s, ETA: 35s
[>>>>>>>>>>>>>>>>>>>>>>> ] 230/287, 1.6 task/s, elapsed: 140s, ETA: 35s
[>>>>>>>>>>>>>>>>>>>>>>> ] 231/287, 1.7 task/s, elapsed: 140s, ETA: 34s
[>>>>>>>>>>>>>>>>>>>>>>> ] 232/287, 1.7 task/s, elapsed: 140s, ETA: 33s
[>>>>>>>>>>>>>>>>>>>>>>> ] 233/287, 1.7 task/s, elapsed: 140s, ETA: 32s
[>>>>>>>>>>>>>>>>>>>>>>> ] 234/287, 1.7 task/s, elapsed: 140s, ETA: 32s
[>>>>>>>>>>>>>>>>>>>>>>> ] 235/287, 1.7 task/s, elapsed: 140s, ETA: 31s
[>>>>>>>>>>>>>>>>>>>>>>> ] 236/287, 1.7 task/s, elapsed: 140s, ETA: 30s
[>>>>>>>>>>>>>>>>>>>>>>> ] 237/287, 1.7 task/s, elapsed: 140s, ETA: 29s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 238/287, 1.7 task/s, elapsed: 140s, ETA: 29s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 239/287, 1.7 task/s, elapsed: 140s, ETA: 28s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 240/287, 1.7 task/s, elapsed: 140s, ETA: 27s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 241/287, 1.7 task/s, elapsed: 140s, ETA: 27s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 242/287, 1.7 task/s, elapsed: 140s, ETA: 26s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 243/287, 1.7 task/s, elapsed: 140s, ETA: 25s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 244/287, 1.7 task/s, elapsed: 140s, ETA: 25s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 245/287, 1.8 task/s, elapsed: 140s, ETA: 24s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 246/287, 1.8 task/s, elapsed: 140s, ETA: 23s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 247/287, 1.8 task/s, elapsed: 140s, ETA: 23s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 248/287, 1.8 task/s, elapsed: 140s, ETA: 22s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 249/287, 1.8 task/s, elapsed: 140s, ETA: 21s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 250/287, 1.8 task/s, elapsed: 140s, ETA: 21s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 251/287, 1.8 task/s, elapsed: 140s, ETA: 20s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 252/287, 1.8 task/s, elapsed: 140s, ETA: 19s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 253/287, 1.8 task/s, elapsed: 140s, ETA: 19s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 254/287, 1.8 task/s, elapsed: 140s, ETA: 18s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 255/287, 1.8 task/s, elapsed: 140s, ETA: 18s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 256/287, 1.8 task/s, elapsed: 140s, ETA: 17s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 257/287, 1.8 task/s, elapsed: 140s, ETA: 16s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 258/287, 1.8 task/s, elapsed: 140s, ETA: 16s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 259/287, 1.9 task/s, elapsed: 140s, ETA: 15s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 260/287, 1.9 task/s, elapsed: 140s, ETA: 15s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 261/287, 1.9 task/s, elapsed: 140s, ETA: 14s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 262/287, 1.9 task/s, elapsed: 140s, ETA: 13s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 263/287, 1.9 task/s, elapsed: 140s, ETA: 13s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 264/287, 1.9 task/s, elapsed: 140s, ETA: 12s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 265/287, 1.9 task/s, elapsed: 140s, ETA: 12s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 266/287, 1.9 task/s, elapsed: 140s, ETA: 11s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 267/287, 1.9 task/s, elapsed: 140s, ETA: 10s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 268/287, 1.9 task/s, elapsed: 140s, ETA: 10s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 269/287, 1.9 task/s, elapsed: 140s, ETA: 9s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 270/287, 1.9 task/s, elapsed: 140s, ETA: 9s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 271/287, 1.9 task/s, elapsed: 140s, ETA: 8s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 272/287, 1.9 task/s, elapsed: 140s, ETA: 8s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 273/287, 1.9 task/s, elapsed: 140s, ETA: 7s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 274/287, 2.0 task/s, elapsed: 140s, ETA: 7s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 275/287, 2.0 task/s, elapsed: 140s, ETA: 6s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 276/287, 2.0 task/s, elapsed: 140s, ETA: 6s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 277/287, 2.0 task/s, elapsed: 140s, ETA: 5s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 278/287, 2.0 task/s, elapsed: 140s, ETA: 5s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 279/287, 2.0 task/s, elapsed: 140s, ETA: 4s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 280/287, 2.0 task/s, elapsed: 140s, ETA: 4s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 281/287, 2.0 task/s, elapsed: 140s, ETA: 3s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 282/287, 2.0 task/s, elapsed: 140s, ETA: 2s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 283/287, 2.0 task/s, elapsed: 140s, ETA: 2s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 284/287, 2.0 task/s, elapsed: 140s, ETA: 1s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 285/287, 2.0 task/s, elapsed: 140s, ETA: 1s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 286/287, 1.9 task/s, elapsed: 151s, ETA: 1s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 287/287, 1.9 task/s, elapsed: 152s, ETA: 0s
dataset version metric mode internvl-chat-20b
---------------------------- --------- ---------------------------- ------ -------------------
mmlu - naive_average gen 68.12
mmlu_pro - - - -
cmmlu - naive_average gen 68.14
ceval - naive_average gen 67.72
agieval - - - -
GaokaoBench - weighted_average gen 62.28
GPQA_extended - - - -
GPQA_main - - - -
GPQA_diamond - - - -
ARC-c - - - -
truthfulqa - - - -
triviaqa 2121ce score gen 61.77
triviaqa_wiki_1shot - - - -
nq 3dcea1 score gen 28.75
C3 8c358f accuracy gen 93.15
race-high 9a54b6 accuracy gen 86.48
flores_100 - - - -
winogrande b36770 accuracy gen 79.87
hellaswag e42710 accuracy gen 87.47
bbh - naive_average gen 69.83
gsm8k 1d7fe4 accuracy gen 79.98
math 393424 accuracy gen 35.54
TheoremQA 6f0af8 score gen 15.25
MathBench - - - -
openai_humaneval 8e312c humaneval_pass@1 gen 67.07
humaneval_plus - - - -
humanevalx - - - -
sanitized_mbpp a447ff score gen 66.15
mbpp_plus - - - -
mbpp_cn 6fb572 score gen 54.20
leval - - - -
leval_closed - - - -
leval_open - - - -
longbench - - - -
longbench_single-document-qa - - - -
longbench_multi-document-qa - - - -
longbench_summarization - - - -
longbench_few-shot-learning - - - -
longbench_synthetic-tasks - - - -
longbench_code-completion - - - -
teval - - - -
teval_zh - - - -
IFEval 3321a3 Prompt-level-strict-accuracy gen 48.80
IFEval 3321a3 Inst-level-strict-accuracy gen 59.11
IFEval 3321a3 Prompt-level-loose-accuracy gen 52.68
IFEval 3321a3 Inst-level-loose-accuracy gen 62.83
12/05 02:59:08 - OpenCompass - INFO - write summary to /mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B/20241205_023919/summary/summary_20241205_023919.txt
12/05 02:59:08 - OpenCompass - INFO - write csv to /mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-26B/20241205_023919/summary/summary_20241205_023919.csv
|