Spaces:
Build error
Build error
Merge branch 'main' of https://huggingface.co./spaces/smu-ai/logical-reasoning
Browse files- competition/{14a_GLM-4_NV4080_eval.ipynb → 10e_InternLM_NV4080_eval_4bit.ipynb} +1 -1
- competition/12b_InternLM_Push_LoRA_to_hub.ipynb +1 -0
- competition/12rp1.15_InternLM_Lora_NV4080_4bit.ipynb +1 -0
- logs/glm-4-9b-chat-1m_epoch_2.txt +60 -44
- logs/glm-4-9b-chat-1m_epoch_3.txt +60 -44
- results/glm-4-9b_lora_sft_bf16-p2.csv +0 -0
- scripts/eval-mgtv-glm-4-9b.sh +1 -1
competition/{14a_GLM-4_NV4080_eval.ipynb → 10e_InternLM_NV4080_eval_4bit.ipynb}
RENAMED
@@ -1 +1 @@
|
|
1 |
-
{"cells":[{"cell_type":"code","execution_count":1,"metadata":{"executionInfo":{"elapsed":476,"status":"ok","timestamp":1720679526275,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"uWKRSV6eZsCn"},"outputs":[],"source":["%load_ext autoreload\n","%autoreload 2"]},{"cell_type":"code","execution_count":2,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"eb33b19f-1206-41ee-84e2-e6258a12eef7","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":2534,"status":"ok","timestamp":1720679529344,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"xwFh14uiZBrI","outputId":"d767799c-34c2-46a5-f052-378146a55321"},"outputs":[],"source":["from pathlib import Path\n","\n","try:\n"," from google.colab import drive\n","\n"," drive.mount(\"/content/drive\")\n"," workding_dir = \"/content/drive/MyDrive/logical-reasoning/\"\n","except ModuleNotFoundError:\n"," workding_dir = str(Path.cwd().parent)"]},{"cell_type":"code","execution_count":3,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"6d394937-6c99-4a7c-9d32-7600a280032f","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":5,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"G5pNu3zgZBrL","outputId":"160a554f-fb08-4aa0-bc00-0422fb7c1fac"},"outputs":[{"name":"stdout","output_type":"stream","text":["workding dir: /home/inflaton/code/projects/courses/logical-reasoning\n"]}],"source":["import os\n","import sys\n","from pathlib import Path\n","\n","os.chdir(workding_dir)\n","sys.path.append(workding_dir)\n","print(\"workding dir:\", workding_dir)"]},{"cell_type":"code","execution_count":4,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"ac667aba-076e-4de6-9984-8f6a67cb09cd","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":4,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"0dVRAabNZBrL","outputId":"b977e116-df16-47cd-9160-a24f611da687"},"outputs":[{"data":{"text/plain":["False"]},"execution_count":4,"metadata":{},"output_type":"execute_result"}],"source":["need_to_setup_env = False\n","need_to_setup_env"]},{"cell_type":"code","execution_count":5,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"72f9cf79-7b0d-4d9e-90a0-1fa5251b947f","showTitle":false,"title":""},"executionInfo":{"elapsed":4,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"hKUOfP2HZBrL"},"outputs":[],"source":["if need_to_setup_env:\n"," %pip install -r requirements.txt\n"," %cd /content/\n"," %rm -rf LLaMA-Factory\n"," !git clone https://github.com/hiyouga/LLaMA-Factory.git\n"," %cd LLaMA-Factory\n"," %ls\n"," %pip install -e .[torch,bitsandbytes]\n"," \n"," os.chdir(workding_dir)\n"," sys.path.append(workding_dir)\n"," print(\"workding dir:\", workding_dir)"]},{"cell_type":"code","execution_count":6,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"9f67ec60-2f24-411c-84eb-0dd664b44775","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"hPCC-6m7ZBrM","outputId":"c7aa2c96-5e99-440a-c148-201d79465ff9"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading env vars from: /home/inflaton/code/projects/courses/logical-reasoning/.env\n"]},{"data":{"text/plain":["True"]},"execution_count":6,"metadata":{},"output_type":"execute_result"}],"source":["from dotenv import find_dotenv, load_dotenv\n","\n","found_dotenv = find_dotenv(\".env\")\n","\n","if len(found_dotenv) == 0:\n"," found_dotenv = find_dotenv(\".env.example\")\n","print(f\"loading env vars from: {found_dotenv}\")\n","load_dotenv(found_dotenv, override=True)"]},{"cell_type":"code","execution_count":7,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"f1597656-8042-4878-9d3b-9ebfb8dd86dc","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"1M3IraVtZBrM","outputId":"29ab35f6-2970-4ade-d85d-3174acf8cda0"},"outputs":[{"name":"stdout","output_type":"stream","text":["THUDM/glm-4-9b-chat-1m None False datasets/mgtv results/mgtv-results_glm_4.csv\n"]}],"source":["import os\n","\n","model_name = os.getenv(\"MODEL_NAME\")\n","adapter_name_or_path = os.getenv(\"ADAPTER_NAME_OR_PATH\")\n","load_in_4bit = os.getenv(\"LOAD_IN_4BIT\") == \"true\"\n","data_path = os.getenv(\"LOGICAL_REASONING_DATA_PATH\")\n","results_path = os.getenv(\"LOGICAL_REASONING_RESULTS_PATH\")\n","use_english_datasets = os.getenv(\"USE_ENGLISH_DATASETS\") == \"true\"\n","\n","print(model_name, adapter_name_or_path, load_in_4bit, data_path, results_path)"]},{"cell_type":"code","execution_count":8,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"b2a43943-9324-4839-9a47-cfa72de2244b","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":564,"status":"ok","timestamp":1720679529907,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"UgMvt6dIZBrM","outputId":"ce37581c-fd26-46c2-ad87-d933d99f68f7"},"outputs":[{"name":"stdout","output_type":"stream","text":["Python 3.11.9\n","\u001b[33mWARNING: Package(s) not found: flash-attn\u001b[0m\u001b[33m\n","\u001b[0mCPU times: user 1.83 ms, sys: 10 ms, total: 11.8 ms\n","Wall time: 496 ms\n"]}],"source":["%%time\n","!python --version\n","!pip show flash-attn"]},{"cell_type":"code","execution_count":9,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":1685,"status":"ok","timestamp":1720679531591,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"ZuS_FsLyZBrN","outputId":"2cba0105-c505-4395-afbd-2f2fee6581d0"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading /home/inflaton/code/projects/courses/logical-reasoning/llm_toolkit/logical_reasoning_utils.py\n","GPU is available\n"]}],"source":["from llm_toolkit.llm_utils import *\n","from llm_toolkit.logical_reasoning_utils import *\n","\n","device = check_gpu()"]},{"cell_type":"code","execution_count":12,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["loading model: THUDM/glm-4-9b-chat-1m with adapter: None\n"]},{"name":"stderr","output_type":"stream","text":["Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"50ee96be81184bf48eb03654360d439f","version_major":2,"version_minor":0},"text/plain":["Downloading shards: 0%| | 0/10 [00:00<?, ?it/s]"]},"metadata":{},"output_type":"display_data"},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"e0a1a023cdde44d3b4f3d2d23d8fda5f","version_major":2,"version_minor":0},"text/plain":["model-00002-of-00010.safetensors: 23%|##2 | 419M/1.84G [00:00<?, ?B/s]"]},"metadata":{},"output_type":"display_data"},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"321b0cca9f7e403aa2a369ef734e0c53","version_major":2,"version_minor":0},"text/plain":["model-00003-of-00010.safetensors: 0%| | 0.00/1.99G [00:00<?, ?B/s]"]},"metadata":{},"output_type":"display_data"},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"126e1eb4e1f740e4b6e0933a3a1d16ee","version_major":2,"version_minor":0},"text/plain":["model-00004-of-00010.safetensors: 0%| | 0.00/1.95G [00:00<?, ?B/s]"]},"metadata":{},"output_type":"display_data"},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"ffeaa6e50f644cb7adade4cf35b37d58","version_major":2,"version_minor":0},"text/plain":["model-00005-of-00010.safetensors: 0%| | 0.00/1.84G [00:00<?, ?B/s]"]},"metadata":{},"output_type":"display_data"},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"970d606d5f4f41959561aca77a1c0b48","version_major":2,"version_minor":0},"text/plain":["model-00006-of-00010.safetensors: 0%| | 0.00/1.99G [00:00<?, ?B/s]"]},"metadata":{},"output_type":"display_data"}],"source":["%%time\n","\n","model, tokenizer = load_model(model_name, adapter_name_or_path=adapter_name_or_path, using_llama_factory=False)"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[{"ename":"NameError","evalue":"name 'tokenizer' is not defined","output_type":"error","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)","Cell \u001b[0;32mIn[11], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m datasets \u001b[38;5;241m=\u001b[39m load_logical_reasoning_dataset(\n\u001b[1;32m 2\u001b[0m data_path,\n\u001b[0;32m----> 3\u001b[0m tokenizer\u001b[38;5;241m=\u001b[39m\u001b[43mtokenizer\u001b[49m,\n\u001b[1;32m 4\u001b[0m chinese_prompt\u001b[38;5;241m=\u001b[39m\u001b[38;5;129;01mnot\u001b[39;00m use_english_datasets,\n\u001b[1;32m 5\u001b[0m using_p1\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m 6\u001b[0m )\n","\u001b[0;31mNameError\u001b[0m: name 'tokenizer' is not defined"]}],"source":["datasets = load_logical_reasoning_dataset(\n"," data_path,\n"," tokenizer=tokenizer,\n"," chinese_prompt=not use_english_datasets,\n"," using_p1=False,\n",")"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["def evaluate_model(model, tokenizer, model_name, dataset):\n"," print(f\"Evaluating model: {model_name} on {device}\")\n"," predictions = eval_model(model, tokenizer, dataset, device=device)\n","\n"," save_results(\n"," model_name,\n"," results_path,\n"," dataset,\n"," predictions,\n"," debug=False,\n"," )\n","\n"," metrics = calc_metrics(dataset[\"label\"], predictions, debug=False)\n"," print(metrics)"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Evaluating model: internlm/internlm2_5-7b-chat-1m_llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full/checkpoint-88 on mps\n"]},{"name":"stderr","output_type":"stream","text":[" 0%| | 1/3000 [00:06<5:23:35, 6.47s/it]"]},{"name":"stdout","output_type":"stream","text":["--------\n","step 1: 不是</s>\n","--------\n","step 2: 不是\n","--------\n","step 3: 不是\n","--------\n","step 4: 不是\n","--------\n","step 5: 不是\n"]},{"name":"stderr","output_type":"stream","text":[" 22%|██▏ | 657/3000 [40:38<2:18:00, 3.53s/it]"]}],"source":["%%time\n","\n","evaluate_model(model, tokenizer, f\"{model_name}{'_' + adapter_name_or_path if adapter_name_or_path else ''}\", datasets[\"test\"])"]}],"metadata":{"accelerator":"GPU","application/vnd.databricks.v1+notebook":{"dashboards":[],"environmentMetadata":null,"language":"python","notebookMetadata":{"mostRecentlyExecutedCommandWithImplicitDF":{"commandId":-1,"dataframes":["_sqldf"]},"pythonIndentUnit":4},"notebookName":"10_eval-lf-medium-py3.11","widgets":{}},"colab":{"gpuType":"L4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.9"}},"nbformat":4,"nbformat_minor":0}
|
|
|
1 |
+
{"cells":[{"cell_type":"code","execution_count":1,"metadata":{"executionInfo":{"elapsed":476,"status":"ok","timestamp":1720679526275,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"uWKRSV6eZsCn"},"outputs":[],"source":["%load_ext autoreload\n","%autoreload 2"]},{"cell_type":"code","execution_count":2,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"eb33b19f-1206-41ee-84e2-e6258a12eef7","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":2534,"status":"ok","timestamp":1720679529344,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"xwFh14uiZBrI","outputId":"d767799c-34c2-46a5-f052-378146a55321"},"outputs":[],"source":["from pathlib import Path\n","\n","try:\n"," from google.colab import drive\n","\n"," drive.mount(\"/content/drive\")\n"," workding_dir = \"/content/drive/MyDrive/logical-reasoning/\"\n","except ModuleNotFoundError:\n"," workding_dir = str(Path.cwd().parent)"]},{"cell_type":"code","execution_count":3,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"6d394937-6c99-4a7c-9d32-7600a280032f","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":5,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"G5pNu3zgZBrL","outputId":"160a554f-fb08-4aa0-bc00-0422fb7c1fac"},"outputs":[{"name":"stdout","output_type":"stream","text":["workding dir: /home/inflaton/code/projects/courses/logical-reasoning\n"]}],"source":["import os\n","import sys\n","from pathlib import Path\n","\n","os.chdir(workding_dir)\n","sys.path.append(workding_dir)\n","print(\"workding dir:\", workding_dir)"]},{"cell_type":"code","execution_count":4,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"ac667aba-076e-4de6-9984-8f6a67cb09cd","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":4,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"0dVRAabNZBrL","outputId":"b977e116-df16-47cd-9160-a24f611da687"},"outputs":[{"data":{"text/plain":["False"]},"execution_count":4,"metadata":{},"output_type":"execute_result"}],"source":["need_to_setup_env = False\n","need_to_setup_env"]},{"cell_type":"code","execution_count":5,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"72f9cf79-7b0d-4d9e-90a0-1fa5251b947f","showTitle":false,"title":""},"executionInfo":{"elapsed":4,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"hKUOfP2HZBrL"},"outputs":[],"source":["if need_to_setup_env:\n"," %pip install -r requirements.txt\n"," %cd /content/\n"," %rm -rf LLaMA-Factory\n"," !git clone https://github.com/hiyouga/LLaMA-Factory.git\n"," %cd LLaMA-Factory\n"," %ls\n"," %pip install -e .[torch,bitsandbytes]\n"," \n"," os.chdir(workding_dir)\n"," sys.path.append(workding_dir)\n"," print(\"workding dir:\", workding_dir)"]},{"cell_type":"code","execution_count":6,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"9f67ec60-2f24-411c-84eb-0dd664b44775","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"hPCC-6m7ZBrM","outputId":"c7aa2c96-5e99-440a-c148-201d79465ff9"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading env vars from: /home/inflaton/code/projects/courses/logical-reasoning/.env\n"]},{"data":{"text/plain":["True"]},"execution_count":6,"metadata":{},"output_type":"execute_result"}],"source":["from dotenv import find_dotenv, load_dotenv\n","\n","found_dotenv = find_dotenv(\".env\")\n","\n","if len(found_dotenv) == 0:\n"," found_dotenv = find_dotenv(\".env.example\")\n","print(f\"loading env vars from: {found_dotenv}\")\n","load_dotenv(found_dotenv, override=True)"]},{"cell_type":"code","execution_count":7,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"f1597656-8042-4878-9d3b-9ebfb8dd86dc","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"1M3IraVtZBrM","outputId":"29ab35f6-2970-4ade-d85d-3174acf8cda0"},"outputs":[{"name":"stdout","output_type":"stream","text":["internlm/internlm2_5-7b-chat-1m llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full/checkpoint-88 True datasets/mgtv results/mgtv-results_internlm_best.csv\n"]}],"source":["import os\n","\n","model_name = os.getenv(\"MODEL_NAME\")\n","adapter_name_or_path = os.getenv(\"ADAPTER_NAME_OR_PATH\")\n","load_in_4bit = os.getenv(\"LOAD_IN_4BIT\") == \"true\"\n","data_path = os.getenv(\"LOGICAL_REASONING_DATA_PATH\")\n","results_path = os.getenv(\"LOGICAL_REASONING_RESULTS_PATH\")\n","use_english_datasets = os.getenv(\"USE_ENGLISH_DATASETS\") == \"true\"\n","\n","print(model_name, adapter_name_or_path, load_in_4bit, data_path, results_path)"]},{"cell_type":"code","execution_count":8,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"b2a43943-9324-4839-9a47-cfa72de2244b","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":564,"status":"ok","timestamp":1720679529907,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"UgMvt6dIZBrM","outputId":"ce37581c-fd26-46c2-ad87-d933d99f68f7"},"outputs":[{"name":"stdout","output_type":"stream","text":["Python 3.11.9\n","\u001b[33mWARNING: Package(s) not found: flash-attn\u001b[0m\u001b[33m\n","\u001b[0mCPU times: user 239 ms, sys: 103 ms, total: 342 ms\n","Wall time: 4.12 s\n"]}],"source":["%%time\n","!python --version\n","!pip show flash-attn"]},{"cell_type":"code","execution_count":9,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":1685,"status":"ok","timestamp":1720679531591,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"ZuS_FsLyZBrN","outputId":"2cba0105-c505-4395-afbd-2f2fee6581d0"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading /home/inflaton/code/projects/courses/logical-reasoning/llm_toolkit/logical_reasoning_utils.py\n","GPU is available\n"]}],"source":["from llm_toolkit.llm_utils import *\n","from llm_toolkit.logical_reasoning_utils import *\n","\n","device = check_gpu()"]},{"cell_type":"code","execution_count":10,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["loading model: internlm/internlm2_5-7b-chat-1m with adapter: llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full/checkpoint-88\n"]},{"name":"stderr","output_type":"stream","text":["[INFO|tokenization_utils_base.py:2161] 2024-07-20 13:26:56,525 >> loading file ./tokenizer.model from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/./tokenizer.model\n","[INFO|tokenization_utils_base.py:2161] 2024-07-20 13:26:56,530 >> loading file added_tokens.json from cache at None\n","[INFO|tokenization_utils_base.py:2161] 2024-07-20 13:26:56,534 >> loading file special_tokens_map.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/special_tokens_map.json\n","[INFO|tokenization_utils_base.py:2161] 2024-07-20 13:26:56,541 >> loading file tokenizer_config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/tokenizer_config.json\n","[INFO|tokenization_utils_base.py:2161] 2024-07-20 13:26:56,545 >> loading file tokenizer.json from cache at None\n"]},{"name":"stdout","output_type":"stream","text":["07/20/2024 13:27:06 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>\n","07/20/2024 13:27:06 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.\n"]},{"name":"stderr","output_type":"stream","text":["[INFO|configuration_utils.py:733] 2024-07-20 13:27:07,295 >> loading configuration file config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/config.json\n","[INFO|configuration_utils.py:733] 2024-07-20 13:27:09,367 >> loading configuration file config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/config.json\n","[INFO|configuration_utils.py:800] 2024-07-20 13:27:09,376 >> Model config InternLM2Config {\n"," \"_name_or_path\": \"internlm/internlm2_5-7b-chat-1m\",\n"," \"architectures\": [\n"," \"InternLM2ForCausalLM\"\n"," ],\n"," \"attn_implementation\": \"eager\",\n"," \"auto_map\": {\n"," \"AutoConfig\": \"internlm/internlm2_5-7b-chat-1m--configuration_internlm2.InternLM2Config\",\n"," \"AutoModel\": \"internlm/internlm2_5-7b-chat-1m--modeling_internlm2.InternLM2ForCausalLM\",\n"," \"AutoModelForCausalLM\": \"internlm/internlm2_5-7b-chat-1m--modeling_internlm2.InternLM2ForCausalLM\"\n"," },\n"," \"bias\": false,\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": 2,\n"," \"hidden_act\": \"silu\",\n"," \"hidden_size\": 4096,\n"," \"initializer_range\": 0.02,\n"," \"intermediate_size\": 14336,\n"," \"max_position_embeddings\": 262144,\n"," \"model_type\": \"internlm2\",\n"," \"num_attention_heads\": 32,\n"," \"num_hidden_layers\": 32,\n"," \"num_key_value_heads\": 8,\n"," \"pad_token_id\": 2,\n"," \"pretraining_tp\": 1,\n"," \"rms_norm_eps\": 1e-05,\n"," \"rope_scaling\": {\n"," \"factor\": 2.5,\n"," \"type\": \"dynamic\"\n"," },\n"," \"rope_theta\": 50000000,\n"," \"tie_word_embeddings\": false,\n"," \"torch_dtype\": \"bfloat16\",\n"," \"transformers_version\": \"4.43.0.dev0\",\n"," \"use_cache\": true,\n"," \"vocab_size\": 92544\n","}\n","\n"]},{"name":"stdout","output_type":"stream","text":["07/20/2024 13:27:09 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.\n","07/20/2024 13:27:09 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.\n"]},{"name":"stderr","output_type":"stream","text":["[INFO|modeling_utils.py:3603] 2024-07-20 13:27:11,102 >> loading weights file model.safetensors from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/model.safetensors.index.json\n","[INFO|modeling_utils.py:1565] 2024-07-20 13:27:12,118 >> Instantiating InternLM2ForCausalLM model under default dtype torch.bfloat16.\n","[INFO|configuration_utils.py:1038] 2024-07-20 13:27:12,137 >> Generate config GenerationConfig {\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": 2,\n"," \"pad_token_id\": 2\n","}\n","\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"9a2d94f457d64bdd9158219ca791840c","version_major":2,"version_minor":0},"text/plain":["Loading checkpoint shards: 0%| | 0/8 [00:00<?, ?it/s]"]},"metadata":{},"output_type":"display_data"},{"name":"stderr","output_type":"stream","text":["[INFO|modeling_utils.py:4425] 2024-07-20 14:48:42,924 >> All model checkpoint weights were used when initializing InternLM2ForCausalLM.\n","\n","[INFO|modeling_utils.py:4433] 2024-07-20 14:48:42,929 >> All the weights of InternLM2ForCausalLM were initialized from the model checkpoint at internlm/internlm2_5-7b-chat-1m.\n","If your task is similar to the task the model of the checkpoint was trained on, you can already use InternLM2ForCausalLM for predictions without further training.\n","[INFO|configuration_utils.py:993] 2024-07-20 14:48:43,569 >> loading configuration file generation_config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/generation_config.json\n","[INFO|configuration_utils.py:1038] 2024-07-20 14:48:43,575 >> Generate config GenerationConfig {\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": [\n"," 2,\n"," 92542\n"," ],\n"," \"pad_token_id\": 2\n","}\n","\n"]},{"name":"stdout","output_type":"stream","text":["07/20/2024 14:48:45 - INFO - llamafactory.model.model_utils.attention - Using vanilla attention implementation.\n","07/20/2024 14:48:48 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full/checkpoint-88\n","07/20/2024 14:48:48 - INFO - llamafactory.model.loader - all params: 7,756,582,912\n","CPU times: user 3min 46s, sys: 11min 59s, total: 15min 45s\n","Wall time: 1h 22min 24s\n"]}],"source":["%%time\n","\n","model, tokenizer = load_model(model_name, adapter_name_or_path=adapter_name_or_path, using_llama_factory=True, load_in_4bit=load_in_4bit)"]},{"cell_type":"code","execution_count":11,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["loading train/test data files\n","DatasetDict({\n"," train: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 25000\n"," })\n"," test: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 3000\n"," })\n","})\n"]}],"source":["datasets = load_logical_reasoning_dataset(\n"," data_path,\n"," tokenizer=tokenizer,\n"," chinese_prompt=not use_english_datasets,\n"," using_p1=False,\n",")"]},{"cell_type":"code","execution_count":12,"metadata":{},"outputs":[],"source":["def evaluate_model(model, tokenizer, model_name, dataset, batch_size=1):\n"," print(f\"Evaluating model: {model_name} on {device}\")\n"," predictions = eval_model(\n"," model, tokenizer, dataset, device=device, batch_size=batch_size\n"," )\n","\n"," save_results(\n"," model_name,\n"," results_path,\n"," dataset,\n"," predictions,\n"," debug=False,\n"," )\n","\n"," metrics = calc_metrics(dataset[\"label\"], predictions, debug=False)\n"," print(metrics)"]},{"cell_type":"code","execution_count":13,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Evaluating model: internlm/internlm2_5-7b-chat-1m_llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full/checkpoint-88_lf_4bit on cuda\n"]},{"name":"stderr","output_type":"stream","text":[" 0%| | 0/375 [00:00<?, ?it/s]"]},{"name":"stderr","output_type":"stream","text":[" 0%| | 1/375 [24:23<152:05:06, 1463.92s/it]"]},{"name":"stdout","output_type":"stream","text":["--------\n","step 1: 不是\n","--------\n","step 2: 不是\n","--------\n","step 3: 不是\n","--------\n","step 4: 不是\n","--------\n","step 5: 不是\n"]}],"source":["%%time\n","\n","evaluate_model(model, tokenizer, f\"{model_name}_{adapter_name_or_path}_lf_4bit\", datasets[\"test\"], batch_size=8)"]}],"metadata":{"accelerator":"GPU","application/vnd.databricks.v1+notebook":{"dashboards":[],"environmentMetadata":null,"language":"python","notebookMetadata":{"mostRecentlyExecutedCommandWithImplicitDF":{"commandId":-1,"dataframes":["_sqldf"]},"pythonIndentUnit":4},"notebookName":"10_eval-lf-medium-py3.11","widgets":{}},"colab":{"gpuType":"L4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.9"}},"nbformat":4,"nbformat_minor":0}
|
competition/12b_InternLM_Push_LoRA_to_hub.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells":[{"cell_type":"code","execution_count":1,"metadata":{"executionInfo":{"elapsed":476,"status":"ok","timestamp":1720679526275,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"uWKRSV6eZsCn"},"outputs":[],"source":["%load_ext autoreload\n","%autoreload 2"]},{"cell_type":"code","execution_count":2,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"eb33b19f-1206-41ee-84e2-e6258a12eef7","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":2534,"status":"ok","timestamp":1720679529344,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"xwFh14uiZBrI","outputId":"d767799c-34c2-46a5-f052-378146a55321"},"outputs":[],"source":["from pathlib import Path\n","\n","try:\n"," from google.colab import drive\n","\n"," drive.mount(\"/content/drive\")\n"," workding_dir = \"/content/drive/MyDrive/logical-reasoning/\"\n","except ModuleNotFoundError:\n"," workding_dir = str(Path.cwd().parent)"]},{"cell_type":"code","execution_count":3,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"6d394937-6c99-4a7c-9d32-7600a280032f","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":5,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"G5pNu3zgZBrL","outputId":"160a554f-fb08-4aa0-bc00-0422fb7c1fac"},"outputs":[{"name":"stdout","output_type":"stream","text":["workding dir: /home/inflaton/code/projects/courses/logical-reasoning\n"]}],"source":["import os\n","import sys\n","from pathlib import Path\n","\n","os.chdir(workding_dir)\n","sys.path.append(workding_dir)\n","print(\"workding dir:\", workding_dir)"]},{"cell_type":"code","execution_count":4,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"9f67ec60-2f24-411c-84eb-0dd664b44775","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"hPCC-6m7ZBrM","outputId":"c7aa2c96-5e99-440a-c148-201d79465ff9"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading env vars from: /home/inflaton/code/projects/courses/logical-reasoning/.env\n"]},{"data":{"text/plain":["True"]},"execution_count":4,"metadata":{},"output_type":"execute_result"}],"source":["from dotenv import find_dotenv, load_dotenv\n","\n","found_dotenv = find_dotenv(\".env\")\n","\n","if len(found_dotenv) == 0:\n"," found_dotenv = find_dotenv(\".env.example\")\n","print(f\"loading env vars from: {found_dotenv}\")\n","load_dotenv(found_dotenv, override=True)"]},{"cell_type":"code","execution_count":5,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"f1597656-8042-4878-9d3b-9ebfb8dd86dc","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"1M3IraVtZBrM","outputId":"29ab35f6-2970-4ade-d85d-3174acf8cda0"},"outputs":[{"name":"stdout","output_type":"stream","text":["internlm/internlm2_5-7b-chat-1m llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full/checkpoint-88 True datasets/mgtv results/mgtv-results_internlm_best.csv\n"]}],"source":["import os\n","\n","model_name = os.getenv(\"MODEL_NAME\")\n","adapter_name_or_path = os.getenv(\"ADAPTER_NAME_OR_PATH\")\n","load_in_4bit = os.getenv(\"LOAD_IN_4BIT\") == \"true\"\n","data_path = os.getenv(\"LOGICAL_REASONING_DATA_PATH\")\n","results_path = os.getenv(\"LOGICAL_REASONING_RESULTS_PATH\")\n","use_english_datasets = os.getenv(\"USE_ENGLISH_DATASETS\") == \"true\"\n","\n","print(model_name, adapter_name_or_path, load_in_4bit, data_path, results_path)"]},{"cell_type":"code","execution_count":6,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"b2a43943-9324-4839-9a47-cfa72de2244b","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":564,"status":"ok","timestamp":1720679529907,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"UgMvt6dIZBrM","outputId":"ce37581c-fd26-46c2-ad87-d933d99f68f7"},"outputs":[{"name":"stdout","output_type":"stream","text":["Python 3.11.9\n","\u001b[33mWARNING: Package(s) not found: flash-attn\u001b[0m\u001b[33m\n","\u001b[0mCPU times: user 284 ms, sys: 88 ms, total: 372 ms\n","Wall time: 4.21 s\n"]}],"source":["%%time\n","!python --version\n","!pip show flash-attn"]},{"cell_type":"code","execution_count":7,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":1685,"status":"ok","timestamp":1720679531591,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"ZuS_FsLyZBrN","outputId":"2cba0105-c505-4395-afbd-2f2fee6581d0"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading /home/inflaton/code/projects/courses/logical-reasoning/llm_toolkit/logical_reasoning_utils.py\n","GPU is available\n"]}],"source":["from llm_toolkit.llm_utils import *\n","from llm_toolkit.logical_reasoning_utils import *\n","\n","device = check_gpu()"]},{"cell_type":"code","execution_count":8,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["loading model: llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full/checkpoint-88 with adapter: None\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"f59b2bb4da404f3f8aba87b5a732222b","version_major":2,"version_minor":0},"text/plain":["Loading checkpoint shards: 0%| | 0/8 [00:00<?, ?it/s]"]},"metadata":{},"output_type":"display_data"}],"source":["%%time\n","\n","# model, tokenizer = load_model(model_name, adapter_name_or_path=adapter_name_or_path, using_llama_factory=False)\n","model, tokenizer = load_model(adapter_name_or_path, using_llama_factory=False)"]},{"cell_type":"code","execution_count":11,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["loading train/test data files\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"fc9a5f2556b24b42b3fb425da63fb8fc","version_major":2,"version_minor":0},"text/plain":["Map: 0%| | 0/25000 [00:00<?, ? examples/s]"]},"metadata":{},"output_type":"display_data"},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"268ca08da54c4f30be9e04477a42e40d","version_major":2,"version_minor":0},"text/plain":["Map: 0%| | 0/3000 [00:00<?, ? examples/s]"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["DatasetDict({\n"," train: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 25000\n"," })\n"," test: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 3000\n"," })\n","})\n"]}],"source":["datasets = load_logical_reasoning_dataset(\n"," data_path,\n"," tokenizer=tokenizer,\n"," chinese_prompt=not use_english_datasets,\n"," using_p1=False,\n",")"]},{"cell_type":"code","execution_count":12,"metadata":{},"outputs":[],"source":["def evaluate_model(model, tokenizer, model_name, dataset):\n"," print(f\"Evaluating model: {model_name} on {device}\")\n"," predictions = eval_model(model, tokenizer, dataset, device=device)\n","\n"," save_results(\n"," model_name,\n"," results_path,\n"," dataset,\n"," predictions,\n"," debug=False,\n"," )\n","\n"," metrics = calc_metrics(dataset[\"label\"], predictions, debug=False)\n"," print(metrics)"]},{"cell_type":"code","execution_count":13,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Evaluating model: THUDM/glm-4-9b-chat-1m_NV4080 on cuda\n"]},{"name":"stderr","output_type":"stream","text":[" 0%| | 1/3000 [1:24:27<4221:21:42, 5067.32s/it]"]},{"name":"stdout","output_type":"stream","text":["--------\n","step 1: \n","不是\n","--------\n","step 2: \n","不是\n","--------\n","step 3: \n","不是\n","--------\n","step 4: 不是\n","--------\n","step 5: 不是\n"]},{"name":"stderr","output_type":"stream","text":[" 13%|█▎ | 392/3000 [5:21:45<35:40:38, 49.25s/it] \n"]},{"ename":"KeyboardInterrupt","evalue":"","output_type":"error","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)","File \u001b[0;32m<timed eval>:1\u001b[0m\n","Cell \u001b[0;32mIn[12], line 3\u001b[0m, in \u001b[0;36mevaluate_model\u001b[0;34m(model, tokenizer, model_name, dataset)\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mevaluate_model\u001b[39m(model, tokenizer, model_name, dataset):\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mEvaluating model: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodel_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m on \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdevice\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m----> 3\u001b[0m predictions \u001b[38;5;241m=\u001b[39m \u001b[43meval_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtokenizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdataset\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 5\u001b[0m save_results(\n\u001b[1;32m 6\u001b[0m model_name,\n\u001b[1;32m 7\u001b[0m results_path,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 10\u001b[0m debug\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m 11\u001b[0m )\n\u001b[1;32m 13\u001b[0m metrics \u001b[38;5;241m=\u001b[39m calc_metrics(dataset[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlabel\u001b[39m\u001b[38;5;124m\"\u001b[39m], predictions, debug\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n","File \u001b[0;32m~/code/projects/courses/logical-reasoning/llm_toolkit/llm_utils.py:164\u001b[0m, in \u001b[0;36meval_model\u001b[0;34m(model, tokenizer, eval_dataset, device, max_new_tokens, repetition_penalty, batch_size)\u001b[0m\n\u001b[1;32m 157\u001b[0m batch_prompts \u001b[38;5;241m=\u001b[39m eval_dataset[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mprompt\u001b[39m\u001b[38;5;124m\"\u001b[39m][i:batch_end]\n\u001b[1;32m 158\u001b[0m inputs \u001b[38;5;241m=\u001b[39m tokenizer(\n\u001b[1;32m 159\u001b[0m batch_prompts,\n\u001b[1;32m 160\u001b[0m return_tensors\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpt\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 161\u001b[0m padding\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m, \u001b[38;5;66;03m# Ensure all inputs in the batch have the same length\u001b[39;00m\n\u001b[1;32m 162\u001b[0m )\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[0;32m--> 164\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 165\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 166\u001b[0m \u001b[43m \u001b[49m\u001b[43mmax_new_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmax_new_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 167\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepetition_penalty\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrepetition_penalty\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 168\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 169\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 170\u001b[0m outputs \u001b[38;5;241m=\u001b[39m outputs[:, inputs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124minput_ids\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m1\u001b[39m] :]\n\u001b[1;32m 171\u001b[0m decoded_output \u001b[38;5;241m=\u001b[39m tokenizer\u001b[38;5;241m.\u001b[39mbatch_decode(\n\u001b[1;32m 172\u001b[0m outputs, skip_special_tokens\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 173\u001b[0m ) \u001b[38;5;66;03m# Skip special tokens for clean output\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/utils/_contextlib.py:115\u001b[0m, in \u001b[0;36mcontext_decorator.<locals>.decorate_context\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mwraps(func)\n\u001b[1;32m 113\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mdecorate_context\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m 114\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m ctx_factory():\n\u001b[0;32m--> 115\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/transformers/generation/utils.py:1969\u001b[0m, in \u001b[0;36mGenerationMixin.generate\u001b[0;34m(self, inputs, generation_config, logits_processor, stopping_criteria, prefix_allowed_tokens_fn, synced_gpus, assistant_model, streamer, negative_prompt_ids, negative_prompt_attention_mask, **kwargs)\u001b[0m\n\u001b[1;32m 1961\u001b[0m input_ids, model_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_expand_inputs_for_generation(\n\u001b[1;32m 1962\u001b[0m input_ids\u001b[38;5;241m=\u001b[39minput_ids,\n\u001b[1;32m 1963\u001b[0m expand_size\u001b[38;5;241m=\u001b[39mgeneration_config\u001b[38;5;241m.\u001b[39mnum_return_sequences,\n\u001b[1;32m 1964\u001b[0m is_encoder_decoder\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39mis_encoder_decoder,\n\u001b[1;32m 1965\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmodel_kwargs,\n\u001b[1;32m 1966\u001b[0m )\n\u001b[1;32m 1968\u001b[0m \u001b[38;5;66;03m# 13. run sample (it degenerates to greedy search when `generation_config.do_sample=False`)\u001b[39;00m\n\u001b[0;32m-> 1969\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_sample\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1970\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1971\u001b[0m \u001b[43m \u001b[49m\u001b[43mlogits_processor\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mprepared_logits_processor\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1972\u001b[0m \u001b[43m \u001b[49m\u001b[43mlogits_warper\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mprepared_logits_warper\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1973\u001b[0m \u001b[43m \u001b[49m\u001b[43mstopping_criteria\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mprepared_stopping_criteria\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1974\u001b[0m \u001b[43m \u001b[49m\u001b[43mgeneration_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgeneration_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1975\u001b[0m \u001b[43m \u001b[49m\u001b[43msynced_gpus\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msynced_gpus\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1976\u001b[0m \u001b[43m \u001b[49m\u001b[43mstreamer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstreamer\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1977\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1978\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1980\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m generation_mode \u001b[38;5;129;01min\u001b[39;00m (GenerationMode\u001b[38;5;241m.\u001b[39mBEAM_SAMPLE, GenerationMode\u001b[38;5;241m.\u001b[39mBEAM_SEARCH):\n\u001b[1;32m 1981\u001b[0m \u001b[38;5;66;03m# 11. prepare logits warper\u001b[39;00m\n\u001b[1;32m 1982\u001b[0m prepared_logits_warper \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 1983\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_get_logits_warper(generation_config, device\u001b[38;5;241m=\u001b[39minput_ids\u001b[38;5;241m.\u001b[39mdevice)\n\u001b[1;32m 1984\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m generation_config\u001b[38;5;241m.\u001b[39mdo_sample\n\u001b[1;32m 1985\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 1986\u001b[0m )\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/transformers/generation/utils.py:2912\u001b[0m, in \u001b[0;36mGenerationMixin._sample\u001b[0;34m(self, input_ids, logits_processor, stopping_criteria, generation_config, synced_gpus, streamer, logits_warper, **model_kwargs)\u001b[0m\n\u001b[1;32m 2909\u001b[0m model_inputs\u001b[38;5;241m.\u001b[39mupdate({\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124moutput_hidden_states\u001b[39m\u001b[38;5;124m\"\u001b[39m: output_hidden_states} \u001b[38;5;28;01mif\u001b[39;00m output_hidden_states \u001b[38;5;28;01melse\u001b[39;00m {})\n\u001b[1;32m 2911\u001b[0m \u001b[38;5;66;03m# forward pass to get next token\u001b[39;00m\n\u001b[0;32m-> 2912\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_inputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m 2914\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m synced_gpus \u001b[38;5;129;01mand\u001b[39;00m this_peer_finished:\n\u001b[1;32m 2915\u001b[0m \u001b[38;5;28;01mcontinue\u001b[39;00m \u001b[38;5;66;03m# don't waste resources running the code we don't need\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/accelerate/hooks.py:166\u001b[0m, in \u001b[0;36madd_hook_to_module.<locals>.new_forward\u001b[0;34m(module, *args, **kwargs)\u001b[0m\n\u001b[1;32m 164\u001b[0m output \u001b[38;5;241m=\u001b[39m module\u001b[38;5;241m.\u001b[39m_old_forward(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 165\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 166\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_old_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 167\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m module\u001b[38;5;241m.\u001b[39m_hf_hook\u001b[38;5;241m.\u001b[39mpost_forward(module, output)\n","File \u001b[0;32m~/.cache/huggingface/modules/transformers_modules/THUDM/glm-4-9b-chat-1m/7d23e9e0bb8e63d85c4489b37b099489c1515370/modeling_chatglm.py:1003\u001b[0m, in \u001b[0;36mChatGLMForConditionalGeneration.forward\u001b[0;34m(self, input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels, use_cache, output_attentions, output_hidden_states, return_dict, return_last_logit)\u001b[0m\n\u001b[1;32m 1000\u001b[0m use_cache \u001b[38;5;241m=\u001b[39m use_cache \u001b[38;5;28;01mif\u001b[39;00m use_cache \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39muse_cache\n\u001b[1;32m 1001\u001b[0m return_dict \u001b[38;5;241m=\u001b[39m return_dict \u001b[38;5;28;01mif\u001b[39;00m return_dict \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39muse_return_dict\n\u001b[0;32m-> 1003\u001b[0m transformer_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtransformer\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1004\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minput_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1005\u001b[0m \u001b[43m \u001b[49m\u001b[43mposition_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mposition_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1006\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1007\u001b[0m \u001b[43m \u001b[49m\u001b[43mpast_key_values\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpast_key_values\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1008\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs_embeds\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs_embeds\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1009\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1010\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1011\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1012\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1014\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m transformer_outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 1015\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m return_last_logit:\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n","File \u001b[0;32m~/.cache/huggingface/modules/transformers_modules/THUDM/glm-4-9b-chat-1m/7d23e9e0bb8e63d85c4489b37b099489c1515370/modeling_chatglm.py:898\u001b[0m, in \u001b[0;36mChatGLMModel.forward\u001b[0;34m(self, input_ids, position_ids, attention_mask, full_attention_mask, past_key_values, inputs_embeds, use_cache, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 895\u001b[0m rotary_pos_emb \u001b[38;5;241m=\u001b[39m rotary_pos_emb[\u001b[38;5;28;01mNone\u001b[39;00m, :seq_length]\n\u001b[1;32m 897\u001b[0m \u001b[38;5;66;03m# Run encoder.\u001b[39;00m\n\u001b[0;32m--> 898\u001b[0m hidden_states, presents, all_hidden_states, all_self_attentions \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mencoder\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 899\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs_embeds\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfull_attention_mask\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrotary_pos_emb\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrotary_pos_emb\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 900\u001b[0m \u001b[43m \u001b[49m\u001b[43mkv_caches\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpast_key_values\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\n\u001b[1;32m 901\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 902\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m presents \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mtype\u001b[39m(presents) \u001b[38;5;129;01mis\u001b[39;00m torch\u001b[38;5;241m.\u001b[39mTensor:\n\u001b[1;32m 903\u001b[0m presents \u001b[38;5;241m=\u001b[39m presents\u001b[38;5;241m.\u001b[39msplit(\u001b[38;5;241m1\u001b[39m, dim\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n","File \u001b[0;32m~/.cache/huggingface/modules/transformers_modules/THUDM/glm-4-9b-chat-1m/7d23e9e0bb8e63d85c4489b37b099489c1515370/modeling_chatglm.py:727\u001b[0m, in \u001b[0;36mGLMTransformer.forward\u001b[0;34m(self, hidden_states, attention_mask, rotary_pos_emb, kv_caches, use_cache, output_hidden_states)\u001b[0m\n\u001b[1;32m 717\u001b[0m layer_ret \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mutils\u001b[38;5;241m.\u001b[39mcheckpoint\u001b[38;5;241m.\u001b[39mcheckpoint(\n\u001b[1;32m 718\u001b[0m layer,\n\u001b[1;32m 719\u001b[0m hidden_states,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 724\u001b[0m use_reentrant\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[1;32m 725\u001b[0m )\n\u001b[1;32m 726\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 727\u001b[0m layer_ret \u001b[38;5;241m=\u001b[39m \u001b[43mlayer\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 728\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 729\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 730\u001b[0m \u001b[43m \u001b[49m\u001b[43mrotary_pos_emb\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 731\u001b[0m \u001b[43m \u001b[49m\u001b[43mkv_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mkv_caches\u001b[49m\u001b[43m[\u001b[49m\u001b[43mindex\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 732\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\n\u001b[1;32m 733\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 734\u001b[0m hidden_states, kv_cache \u001b[38;5;241m=\u001b[39m layer_ret\n\u001b[1;32m 735\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m use_cache:\n\u001b[1;32m 736\u001b[0m \u001b[38;5;66;03m# token by token decoding, use tuple format\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/accelerate/hooks.py:166\u001b[0m, in \u001b[0;36madd_hook_to_module.<locals>.new_forward\u001b[0;34m(module, *args, **kwargs)\u001b[0m\n\u001b[1;32m 164\u001b[0m output \u001b[38;5;241m=\u001b[39m module\u001b[38;5;241m.\u001b[39m_old_forward(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 165\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 166\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_old_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 167\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m module\u001b[38;5;241m.\u001b[39m_hf_hook\u001b[38;5;241m.\u001b[39mpost_forward(module, output)\n","File \u001b[0;32m~/.cache/huggingface/modules/transformers_modules/THUDM/glm-4-9b-chat-1m/7d23e9e0bb8e63d85c4489b37b099489c1515370/modeling_chatglm.py:651\u001b[0m, in \u001b[0;36mGLMBlock.forward\u001b[0;34m(self, hidden_states, attention_mask, rotary_pos_emb, kv_cache, use_cache)\u001b[0m\n\u001b[1;32m 648\u001b[0m layernorm_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpost_attention_layernorm(layernorm_input)\n\u001b[1;32m 650\u001b[0m \u001b[38;5;66;03m# MLP.\u001b[39;00m\n\u001b[0;32m--> 651\u001b[0m mlp_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmlp\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlayernorm_output\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 653\u001b[0m \u001b[38;5;66;03m# Second residual connection.\u001b[39;00m\n\u001b[1;32m 654\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mapply_residual_connection_post_layernorm:\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/accelerate/hooks.py:166\u001b[0m, in \u001b[0;36madd_hook_to_module.<locals>.new_forward\u001b[0;34m(module, *args, **kwargs)\u001b[0m\n\u001b[1;32m 164\u001b[0m output \u001b[38;5;241m=\u001b[39m module\u001b[38;5;241m.\u001b[39m_old_forward(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 165\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 166\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_old_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 167\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m module\u001b[38;5;241m.\u001b[39m_hf_hook\u001b[38;5;241m.\u001b[39mpost_forward(module, output)\n","File \u001b[0;32m~/.cache/huggingface/modules/transformers_modules/THUDM/glm-4-9b-chat-1m/7d23e9e0bb8e63d85c4489b37b099489c1515370/modeling_chatglm.py:584\u001b[0m, in \u001b[0;36mMLP.forward\u001b[0;34m(self, hidden_states)\u001b[0m\n\u001b[1;32m 582\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, hidden_states):\n\u001b[1;32m 583\u001b[0m \u001b[38;5;66;03m# [s, b, 4hp]\u001b[39;00m\n\u001b[0;32m--> 584\u001b[0m intermediate_parallel \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdense_h_to_4h\u001b[49m\u001b[43m(\u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 585\u001b[0m intermediate_parallel \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mactivation_func(intermediate_parallel)\n\u001b[1;32m 586\u001b[0m \u001b[38;5;66;03m# [s, b, h]\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/accelerate/hooks.py:161\u001b[0m, in \u001b[0;36madd_hook_to_module.<locals>.new_forward\u001b[0;34m(module, *args, **kwargs)\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mnew_forward\u001b[39m(module, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 161\u001b[0m args, kwargs \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_hf_hook\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpre_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodule\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 162\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m module\u001b[38;5;241m.\u001b[39m_hf_hook\u001b[38;5;241m.\u001b[39mno_grad:\n\u001b[1;32m 163\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m torch\u001b[38;5;241m.\u001b[39mno_grad():\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/accelerate/hooks.py:347\u001b[0m, in \u001b[0;36mAlignDevicesHook.pre_forward\u001b[0;34m(self, module, *args, **kwargs)\u001b[0m\n\u001b[1;32m 339\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[1;32m 340\u001b[0m value \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 341\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtied_params_map \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 342\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m value\u001b[38;5;241m.\u001b[39mdata_ptr() \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtied_params_map\n\u001b[1;32m 343\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexecution_device \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtied_params_map[value\u001b[38;5;241m.\u001b[39mdata_ptr()]\n\u001b[1;32m 344\u001b[0m ):\n\u001b[1;32m 345\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtied_pointers_to_remove\u001b[38;5;241m.\u001b[39madd((value\u001b[38;5;241m.\u001b[39mdata_ptr(), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexecution_device))\n\u001b[0;32m--> 347\u001b[0m \u001b[43mset_module_tensor_to_device\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 348\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodule\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 349\u001b[0m \u001b[43m \u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 350\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexecution_device\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 351\u001b[0m \u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mvalue\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 352\u001b[0m \u001b[43m \u001b[49m\u001b[43mfp16_statistics\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfp16_statistics\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 353\u001b[0m \u001b[43m \u001b[49m\u001b[43mtied_params_map\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtied_params_map\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 354\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 356\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m send_to_device(args, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexecution_device), send_to_device(\n\u001b[1;32m 357\u001b[0m kwargs, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexecution_device, skip_keys\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mskip_keys\n\u001b[1;32m 358\u001b[0m )\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/accelerate/utils/modeling.py:400\u001b[0m, in \u001b[0;36mset_module_tensor_to_device\u001b[0;34m(module, tensor_name, device, value, dtype, fp16_statistics, tied_params_map)\u001b[0m\n\u001b[1;32m 398\u001b[0m module\u001b[38;5;241m.\u001b[39m_parameters[tensor_name] \u001b[38;5;241m=\u001b[39m param_cls(new_value, requires_grad\u001b[38;5;241m=\u001b[39mold_value\u001b[38;5;241m.\u001b[39mrequires_grad)\n\u001b[1;32m 399\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(value, torch\u001b[38;5;241m.\u001b[39mTensor):\n\u001b[0;32m--> 400\u001b[0m new_value \u001b[38;5;241m=\u001b[39m \u001b[43mvalue\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mto\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 401\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 402\u001b[0m new_value \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mtensor(value, device\u001b[38;5;241m=\u001b[39mdevice)\n","\u001b[0;31mKeyboardInterrupt\u001b[0m: "]}],"source":["%%time\n","\n","evaluate_model(model, tokenizer, f\"{model_name}{'_' + adapter_name_or_path if adapter_name_or_path else ''}_NV4080\", datasets[\"test\"])"]}],"metadata":{"accelerator":"GPU","application/vnd.databricks.v1+notebook":{"dashboards":[],"environmentMetadata":null,"language":"python","notebookMetadata":{"mostRecentlyExecutedCommandWithImplicitDF":{"commandId":-1,"dataframes":["_sqldf"]},"pythonIndentUnit":4},"notebookName":"10_eval-lf-medium-py3.11","widgets":{}},"colab":{"gpuType":"L4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.9"}},"nbformat":4,"nbformat_minor":0}
|
competition/12rp1.15_InternLM_Lora_NV4080_4bit.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells":[{"cell_type":"code","execution_count":1,"metadata":{"executionInfo":{"elapsed":476,"status":"ok","timestamp":1720679526275,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"uWKRSV6eZsCn"},"outputs":[],"source":["%load_ext autoreload\n","%autoreload 2"]},{"cell_type":"code","execution_count":2,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"eb33b19f-1206-41ee-84e2-e6258a12eef7","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":2534,"status":"ok","timestamp":1720679529344,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"xwFh14uiZBrI","outputId":"d767799c-34c2-46a5-f052-378146a55321"},"outputs":[],"source":["from pathlib import Path\n","\n","try:\n"," from google.colab import drive\n","\n"," drive.mount(\"/content/drive\")\n"," workding_dir = \"/content/drive/MyDrive/logical-reasoning/\"\n","except ModuleNotFoundError:\n"," workding_dir = str(Path.cwd().parent)"]},{"cell_type":"code","execution_count":3,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"6d394937-6c99-4a7c-9d32-7600a280032f","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":5,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"G5pNu3zgZBrL","outputId":"160a554f-fb08-4aa0-bc00-0422fb7c1fac"},"outputs":[{"name":"stdout","output_type":"stream","text":["workding dir: /home/inflaton/code/projects/courses/logical-reasoning\n"]}],"source":["import os\n","import sys\n","from pathlib import Path\n","\n","os.chdir(workding_dir)\n","sys.path.append(workding_dir)\n","print(\"workding dir:\", workding_dir)"]},{"cell_type":"code","execution_count":4,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"9f67ec60-2f24-411c-84eb-0dd664b44775","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"hPCC-6m7ZBrM","outputId":"c7aa2c96-5e99-440a-c148-201d79465ff9"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading env vars from: /home/inflaton/code/projects/courses/logical-reasoning/.env\n"]},{"data":{"text/plain":["True"]},"execution_count":4,"metadata":{},"output_type":"execute_result"}],"source":["from dotenv import find_dotenv, load_dotenv\n","\n","found_dotenv = find_dotenv(\".env\")\n","\n","if len(found_dotenv) == 0:\n"," found_dotenv = find_dotenv(\".env.example\")\n","print(f\"loading env vars from: {found_dotenv}\")\n","load_dotenv(found_dotenv, override=True)"]},{"cell_type":"code","execution_count":5,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"f1597656-8042-4878-9d3b-9ebfb8dd86dc","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"1M3IraVtZBrM","outputId":"29ab35f6-2970-4ade-d85d-3174acf8cda0"},"outputs":[{"name":"stdout","output_type":"stream","text":["internlm/internlm2_5-7b-chat-1m llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full/checkpoint-88 True datasets/mgtv results/mgtv-results_internlm_best.csv\n"]}],"source":["import os\n","\n","model_name = os.getenv(\"MODEL_NAME\")\n","adapter_name_or_path = os.getenv(\"ADAPTER_NAME_OR_PATH\")\n","load_in_4bit = os.getenv(\"LOAD_IN_4BIT\") == \"true\"\n","data_path = os.getenv(\"LOGICAL_REASONING_DATA_PATH\")\n","results_path = os.getenv(\"LOGICAL_REASONING_RESULTS_PATH\")\n","use_english_datasets = os.getenv(\"USE_ENGLISH_DATASETS\") == \"true\"\n","\n","print(model_name, adapter_name_or_path, load_in_4bit, data_path, results_path)"]},{"cell_type":"code","execution_count":6,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"b2a43943-9324-4839-9a47-cfa72de2244b","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":564,"status":"ok","timestamp":1720679529907,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"UgMvt6dIZBrM","outputId":"ce37581c-fd26-46c2-ad87-d933d99f68f7"},"outputs":[{"name":"stdout","output_type":"stream","text":["Python 3.11.9\n","\u001b[33mWARNING: Package(s) not found: flash-attn\u001b[0m\u001b[33m\n","\u001b[0mCPU times: user 284 ms, sys: 88 ms, total: 372 ms\n","Wall time: 4.21 s\n"]}],"source":["%%time\n","!python --version\n","!pip show flash-attn"]},{"cell_type":"code","execution_count":7,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":1685,"status":"ok","timestamp":1720679531591,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"ZuS_FsLyZBrN","outputId":"2cba0105-c505-4395-afbd-2f2fee6581d0"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading /home/inflaton/code/projects/courses/logical-reasoning/llm_toolkit/logical_reasoning_utils.py\n","GPU is available\n"]}],"source":["from llm_toolkit.llm_utils import *\n","from llm_toolkit.logical_reasoning_utils import *\n","\n","device = check_gpu()"]},{"cell_type":"code","execution_count":8,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["loading model: llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full/checkpoint-88 with adapter: None\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"f59b2bb4da404f3f8aba87b5a732222b","version_major":2,"version_minor":0},"text/plain":["Loading checkpoint shards: 0%| | 0/8 [00:00<?, ?it/s]"]},"metadata":{},"output_type":"display_data"}],"source":["%%time\n","\n","# model, tokenizer = load_model(model_name, adapter_name_or_path=adapter_name_or_path, using_llama_factory=False)\n","model, tokenizer = load_model(adapter_name_or_path, using_llama_factory=False)"]},{"cell_type":"code","execution_count":11,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["loading train/test data files\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"fc9a5f2556b24b42b3fb425da63fb8fc","version_major":2,"version_minor":0},"text/plain":["Map: 0%| | 0/25000 [00:00<?, ? examples/s]"]},"metadata":{},"output_type":"display_data"},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"268ca08da54c4f30be9e04477a42e40d","version_major":2,"version_minor":0},"text/plain":["Map: 0%| | 0/3000 [00:00<?, ? examples/s]"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["DatasetDict({\n"," train: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 25000\n"," })\n"," test: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 3000\n"," })\n","})\n"]}],"source":["datasets = load_logical_reasoning_dataset(\n"," data_path,\n"," tokenizer=tokenizer,\n"," chinese_prompt=not use_english_datasets,\n"," using_p1=False,\n",")"]},{"cell_type":"code","execution_count":12,"metadata":{},"outputs":[],"source":["def evaluate_model(model, tokenizer, model_name, dataset):\n"," print(f\"Evaluating model: {model_name} on {device}\")\n"," predictions = eval_model(model, tokenizer, dataset, device=device)\n","\n"," save_results(\n"," model_name,\n"," results_path,\n"," dataset,\n"," predictions,\n"," debug=False,\n"," )\n","\n"," metrics = calc_metrics(dataset[\"label\"], predictions, debug=False)\n"," print(metrics)"]},{"cell_type":"code","execution_count":13,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Evaluating model: THUDM/glm-4-9b-chat-1m_NV4080 on cuda\n"]},{"name":"stderr","output_type":"stream","text":[" 0%| | 1/3000 [1:24:27<4221:21:42, 5067.32s/it]"]},{"name":"stdout","output_type":"stream","text":["--------\n","step 1: \n","不是\n","--------\n","step 2: \n","不是\n","--------\n","step 3: \n","不是\n","--------\n","step 4: 不是\n","--------\n","step 5: 不是\n"]},{"name":"stderr","output_type":"stream","text":[" 13%|█▎ | 392/3000 [5:21:45<35:40:38, 49.25s/it] \n"]},{"ename":"KeyboardInterrupt","evalue":"","output_type":"error","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)","File \u001b[0;32m<timed eval>:1\u001b[0m\n","Cell \u001b[0;32mIn[12], line 3\u001b[0m, in \u001b[0;36mevaluate_model\u001b[0;34m(model, tokenizer, model_name, dataset)\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mevaluate_model\u001b[39m(model, tokenizer, model_name, dataset):\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mEvaluating model: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodel_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m on \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdevice\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m----> 3\u001b[0m predictions \u001b[38;5;241m=\u001b[39m \u001b[43meval_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtokenizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdataset\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 5\u001b[0m save_results(\n\u001b[1;32m 6\u001b[0m model_name,\n\u001b[1;32m 7\u001b[0m results_path,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 10\u001b[0m debug\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m 11\u001b[0m )\n\u001b[1;32m 13\u001b[0m metrics \u001b[38;5;241m=\u001b[39m calc_metrics(dataset[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlabel\u001b[39m\u001b[38;5;124m\"\u001b[39m], predictions, debug\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n","File \u001b[0;32m~/code/projects/courses/logical-reasoning/llm_toolkit/llm_utils.py:164\u001b[0m, in \u001b[0;36meval_model\u001b[0;34m(model, tokenizer, eval_dataset, device, max_new_tokens, repetition_penalty, batch_size)\u001b[0m\n\u001b[1;32m 157\u001b[0m batch_prompts \u001b[38;5;241m=\u001b[39m eval_dataset[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mprompt\u001b[39m\u001b[38;5;124m\"\u001b[39m][i:batch_end]\n\u001b[1;32m 158\u001b[0m inputs \u001b[38;5;241m=\u001b[39m tokenizer(\n\u001b[1;32m 159\u001b[0m batch_prompts,\n\u001b[1;32m 160\u001b[0m return_tensors\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpt\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 161\u001b[0m padding\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m, \u001b[38;5;66;03m# Ensure all inputs in the batch have the same length\u001b[39;00m\n\u001b[1;32m 162\u001b[0m )\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[0;32m--> 164\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 165\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 166\u001b[0m \u001b[43m \u001b[49m\u001b[43mmax_new_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmax_new_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 167\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepetition_penalty\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrepetition_penalty\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 168\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 169\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 170\u001b[0m outputs \u001b[38;5;241m=\u001b[39m outputs[:, inputs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124minput_ids\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m1\u001b[39m] :]\n\u001b[1;32m 171\u001b[0m decoded_output \u001b[38;5;241m=\u001b[39m tokenizer\u001b[38;5;241m.\u001b[39mbatch_decode(\n\u001b[1;32m 172\u001b[0m outputs, skip_special_tokens\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 173\u001b[0m ) \u001b[38;5;66;03m# Skip special tokens for clean output\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/utils/_contextlib.py:115\u001b[0m, in \u001b[0;36mcontext_decorator.<locals>.decorate_context\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mwraps(func)\n\u001b[1;32m 113\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mdecorate_context\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m 114\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m ctx_factory():\n\u001b[0;32m--> 115\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/transformers/generation/utils.py:1969\u001b[0m, in \u001b[0;36mGenerationMixin.generate\u001b[0;34m(self, inputs, generation_config, logits_processor, stopping_criteria, prefix_allowed_tokens_fn, synced_gpus, assistant_model, streamer, negative_prompt_ids, negative_prompt_attention_mask, **kwargs)\u001b[0m\n\u001b[1;32m 1961\u001b[0m input_ids, model_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_expand_inputs_for_generation(\n\u001b[1;32m 1962\u001b[0m input_ids\u001b[38;5;241m=\u001b[39minput_ids,\n\u001b[1;32m 1963\u001b[0m expand_size\u001b[38;5;241m=\u001b[39mgeneration_config\u001b[38;5;241m.\u001b[39mnum_return_sequences,\n\u001b[1;32m 1964\u001b[0m is_encoder_decoder\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39mis_encoder_decoder,\n\u001b[1;32m 1965\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmodel_kwargs,\n\u001b[1;32m 1966\u001b[0m )\n\u001b[1;32m 1968\u001b[0m \u001b[38;5;66;03m# 13. run sample (it degenerates to greedy search when `generation_config.do_sample=False`)\u001b[39;00m\n\u001b[0;32m-> 1969\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_sample\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1970\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1971\u001b[0m \u001b[43m \u001b[49m\u001b[43mlogits_processor\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mprepared_logits_processor\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1972\u001b[0m \u001b[43m \u001b[49m\u001b[43mlogits_warper\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mprepared_logits_warper\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1973\u001b[0m \u001b[43m \u001b[49m\u001b[43mstopping_criteria\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mprepared_stopping_criteria\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1974\u001b[0m \u001b[43m \u001b[49m\u001b[43mgeneration_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgeneration_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1975\u001b[0m \u001b[43m \u001b[49m\u001b[43msynced_gpus\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msynced_gpus\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1976\u001b[0m \u001b[43m \u001b[49m\u001b[43mstreamer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstreamer\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1977\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1978\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1980\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m generation_mode \u001b[38;5;129;01min\u001b[39;00m (GenerationMode\u001b[38;5;241m.\u001b[39mBEAM_SAMPLE, GenerationMode\u001b[38;5;241m.\u001b[39mBEAM_SEARCH):\n\u001b[1;32m 1981\u001b[0m \u001b[38;5;66;03m# 11. prepare logits warper\u001b[39;00m\n\u001b[1;32m 1982\u001b[0m prepared_logits_warper \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 1983\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_get_logits_warper(generation_config, device\u001b[38;5;241m=\u001b[39minput_ids\u001b[38;5;241m.\u001b[39mdevice)\n\u001b[1;32m 1984\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m generation_config\u001b[38;5;241m.\u001b[39mdo_sample\n\u001b[1;32m 1985\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 1986\u001b[0m )\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/transformers/generation/utils.py:2912\u001b[0m, in \u001b[0;36mGenerationMixin._sample\u001b[0;34m(self, input_ids, logits_processor, stopping_criteria, generation_config, synced_gpus, streamer, logits_warper, **model_kwargs)\u001b[0m\n\u001b[1;32m 2909\u001b[0m model_inputs\u001b[38;5;241m.\u001b[39mupdate({\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124moutput_hidden_states\u001b[39m\u001b[38;5;124m\"\u001b[39m: output_hidden_states} \u001b[38;5;28;01mif\u001b[39;00m output_hidden_states \u001b[38;5;28;01melse\u001b[39;00m {})\n\u001b[1;32m 2911\u001b[0m \u001b[38;5;66;03m# forward pass to get next token\u001b[39;00m\n\u001b[0;32m-> 2912\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_inputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m 2914\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m synced_gpus \u001b[38;5;129;01mand\u001b[39;00m this_peer_finished:\n\u001b[1;32m 2915\u001b[0m \u001b[38;5;28;01mcontinue\u001b[39;00m \u001b[38;5;66;03m# don't waste resources running the code we don't need\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/accelerate/hooks.py:166\u001b[0m, in \u001b[0;36madd_hook_to_module.<locals>.new_forward\u001b[0;34m(module, *args, **kwargs)\u001b[0m\n\u001b[1;32m 164\u001b[0m output \u001b[38;5;241m=\u001b[39m module\u001b[38;5;241m.\u001b[39m_old_forward(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 165\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 166\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_old_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 167\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m module\u001b[38;5;241m.\u001b[39m_hf_hook\u001b[38;5;241m.\u001b[39mpost_forward(module, output)\n","File \u001b[0;32m~/.cache/huggingface/modules/transformers_modules/THUDM/glm-4-9b-chat-1m/7d23e9e0bb8e63d85c4489b37b099489c1515370/modeling_chatglm.py:1003\u001b[0m, in \u001b[0;36mChatGLMForConditionalGeneration.forward\u001b[0;34m(self, input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels, use_cache, output_attentions, output_hidden_states, return_dict, return_last_logit)\u001b[0m\n\u001b[1;32m 1000\u001b[0m use_cache \u001b[38;5;241m=\u001b[39m use_cache \u001b[38;5;28;01mif\u001b[39;00m use_cache \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39muse_cache\n\u001b[1;32m 1001\u001b[0m return_dict \u001b[38;5;241m=\u001b[39m return_dict \u001b[38;5;28;01mif\u001b[39;00m return_dict \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39muse_return_dict\n\u001b[0;32m-> 1003\u001b[0m transformer_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtransformer\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1004\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minput_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1005\u001b[0m \u001b[43m \u001b[49m\u001b[43mposition_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mposition_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1006\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1007\u001b[0m \u001b[43m \u001b[49m\u001b[43mpast_key_values\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpast_key_values\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1008\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs_embeds\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs_embeds\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1009\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1010\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1011\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1012\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1014\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m transformer_outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 1015\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m return_last_logit:\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n","File \u001b[0;32m~/.cache/huggingface/modules/transformers_modules/THUDM/glm-4-9b-chat-1m/7d23e9e0bb8e63d85c4489b37b099489c1515370/modeling_chatglm.py:898\u001b[0m, in \u001b[0;36mChatGLMModel.forward\u001b[0;34m(self, input_ids, position_ids, attention_mask, full_attention_mask, past_key_values, inputs_embeds, use_cache, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 895\u001b[0m rotary_pos_emb \u001b[38;5;241m=\u001b[39m rotary_pos_emb[\u001b[38;5;28;01mNone\u001b[39;00m, :seq_length]\n\u001b[1;32m 897\u001b[0m \u001b[38;5;66;03m# Run encoder.\u001b[39;00m\n\u001b[0;32m--> 898\u001b[0m hidden_states, presents, all_hidden_states, all_self_attentions \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mencoder\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 899\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs_embeds\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfull_attention_mask\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrotary_pos_emb\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrotary_pos_emb\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 900\u001b[0m \u001b[43m \u001b[49m\u001b[43mkv_caches\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpast_key_values\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\n\u001b[1;32m 901\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 902\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m presents \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mtype\u001b[39m(presents) \u001b[38;5;129;01mis\u001b[39;00m torch\u001b[38;5;241m.\u001b[39mTensor:\n\u001b[1;32m 903\u001b[0m presents \u001b[38;5;241m=\u001b[39m presents\u001b[38;5;241m.\u001b[39msplit(\u001b[38;5;241m1\u001b[39m, dim\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n","File \u001b[0;32m~/.cache/huggingface/modules/transformers_modules/THUDM/glm-4-9b-chat-1m/7d23e9e0bb8e63d85c4489b37b099489c1515370/modeling_chatglm.py:727\u001b[0m, in \u001b[0;36mGLMTransformer.forward\u001b[0;34m(self, hidden_states, attention_mask, rotary_pos_emb, kv_caches, use_cache, output_hidden_states)\u001b[0m\n\u001b[1;32m 717\u001b[0m layer_ret \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mutils\u001b[38;5;241m.\u001b[39mcheckpoint\u001b[38;5;241m.\u001b[39mcheckpoint(\n\u001b[1;32m 718\u001b[0m layer,\n\u001b[1;32m 719\u001b[0m hidden_states,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 724\u001b[0m use_reentrant\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[1;32m 725\u001b[0m )\n\u001b[1;32m 726\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 727\u001b[0m layer_ret \u001b[38;5;241m=\u001b[39m \u001b[43mlayer\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 728\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 729\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 730\u001b[0m \u001b[43m \u001b[49m\u001b[43mrotary_pos_emb\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 731\u001b[0m \u001b[43m \u001b[49m\u001b[43mkv_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mkv_caches\u001b[49m\u001b[43m[\u001b[49m\u001b[43mindex\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 732\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\n\u001b[1;32m 733\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 734\u001b[0m hidden_states, kv_cache \u001b[38;5;241m=\u001b[39m layer_ret\n\u001b[1;32m 735\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m use_cache:\n\u001b[1;32m 736\u001b[0m \u001b[38;5;66;03m# token by token decoding, use tuple format\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/accelerate/hooks.py:166\u001b[0m, in \u001b[0;36madd_hook_to_module.<locals>.new_forward\u001b[0;34m(module, *args, **kwargs)\u001b[0m\n\u001b[1;32m 164\u001b[0m output \u001b[38;5;241m=\u001b[39m module\u001b[38;5;241m.\u001b[39m_old_forward(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 165\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 166\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_old_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 167\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m module\u001b[38;5;241m.\u001b[39m_hf_hook\u001b[38;5;241m.\u001b[39mpost_forward(module, output)\n","File \u001b[0;32m~/.cache/huggingface/modules/transformers_modules/THUDM/glm-4-9b-chat-1m/7d23e9e0bb8e63d85c4489b37b099489c1515370/modeling_chatglm.py:651\u001b[0m, in \u001b[0;36mGLMBlock.forward\u001b[0;34m(self, hidden_states, attention_mask, rotary_pos_emb, kv_cache, use_cache)\u001b[0m\n\u001b[1;32m 648\u001b[0m layernorm_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpost_attention_layernorm(layernorm_input)\n\u001b[1;32m 650\u001b[0m \u001b[38;5;66;03m# MLP.\u001b[39;00m\n\u001b[0;32m--> 651\u001b[0m mlp_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmlp\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlayernorm_output\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 653\u001b[0m \u001b[38;5;66;03m# Second residual connection.\u001b[39;00m\n\u001b[1;32m 654\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mapply_residual_connection_post_layernorm:\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/accelerate/hooks.py:166\u001b[0m, in \u001b[0;36madd_hook_to_module.<locals>.new_forward\u001b[0;34m(module, *args, **kwargs)\u001b[0m\n\u001b[1;32m 164\u001b[0m output \u001b[38;5;241m=\u001b[39m module\u001b[38;5;241m.\u001b[39m_old_forward(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 165\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 166\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_old_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 167\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m module\u001b[38;5;241m.\u001b[39m_hf_hook\u001b[38;5;241m.\u001b[39mpost_forward(module, output)\n","File \u001b[0;32m~/.cache/huggingface/modules/transformers_modules/THUDM/glm-4-9b-chat-1m/7d23e9e0bb8e63d85c4489b37b099489c1515370/modeling_chatglm.py:584\u001b[0m, in \u001b[0;36mMLP.forward\u001b[0;34m(self, hidden_states)\u001b[0m\n\u001b[1;32m 582\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, hidden_states):\n\u001b[1;32m 583\u001b[0m \u001b[38;5;66;03m# [s, b, 4hp]\u001b[39;00m\n\u001b[0;32m--> 584\u001b[0m intermediate_parallel \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdense_h_to_4h\u001b[49m\u001b[43m(\u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 585\u001b[0m intermediate_parallel \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mactivation_func(intermediate_parallel)\n\u001b[1;32m 586\u001b[0m \u001b[38;5;66;03m# [s, b, h]\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/torch/nn/modules/module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/accelerate/hooks.py:161\u001b[0m, in \u001b[0;36madd_hook_to_module.<locals>.new_forward\u001b[0;34m(module, *args, **kwargs)\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mnew_forward\u001b[39m(module, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 161\u001b[0m args, kwargs \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_hf_hook\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpre_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodule\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 162\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m module\u001b[38;5;241m.\u001b[39m_hf_hook\u001b[38;5;241m.\u001b[39mno_grad:\n\u001b[1;32m 163\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m torch\u001b[38;5;241m.\u001b[39mno_grad():\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/accelerate/hooks.py:347\u001b[0m, in \u001b[0;36mAlignDevicesHook.pre_forward\u001b[0;34m(self, module, *args, **kwargs)\u001b[0m\n\u001b[1;32m 339\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[1;32m 340\u001b[0m value \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 341\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtied_params_map \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 342\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m value\u001b[38;5;241m.\u001b[39mdata_ptr() \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtied_params_map\n\u001b[1;32m 343\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexecution_device \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtied_params_map[value\u001b[38;5;241m.\u001b[39mdata_ptr()]\n\u001b[1;32m 344\u001b[0m ):\n\u001b[1;32m 345\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtied_pointers_to_remove\u001b[38;5;241m.\u001b[39madd((value\u001b[38;5;241m.\u001b[39mdata_ptr(), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexecution_device))\n\u001b[0;32m--> 347\u001b[0m \u001b[43mset_module_tensor_to_device\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 348\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodule\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 349\u001b[0m \u001b[43m \u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 350\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexecution_device\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 351\u001b[0m \u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mvalue\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 352\u001b[0m \u001b[43m \u001b[49m\u001b[43mfp16_statistics\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfp16_statistics\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 353\u001b[0m \u001b[43m \u001b[49m\u001b[43mtied_params_map\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtied_params_map\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 354\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 356\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m send_to_device(args, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexecution_device), send_to_device(\n\u001b[1;32m 357\u001b[0m kwargs, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexecution_device, skip_keys\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mskip_keys\n\u001b[1;32m 358\u001b[0m )\n","File \u001b[0;32m~/miniconda3/envs/llama-factory/lib/python3.11/site-packages/accelerate/utils/modeling.py:400\u001b[0m, in \u001b[0;36mset_module_tensor_to_device\u001b[0;34m(module, tensor_name, device, value, dtype, fp16_statistics, tied_params_map)\u001b[0m\n\u001b[1;32m 398\u001b[0m module\u001b[38;5;241m.\u001b[39m_parameters[tensor_name] \u001b[38;5;241m=\u001b[39m param_cls(new_value, requires_grad\u001b[38;5;241m=\u001b[39mold_value\u001b[38;5;241m.\u001b[39mrequires_grad)\n\u001b[1;32m 399\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(value, torch\u001b[38;5;241m.\u001b[39mTensor):\n\u001b[0;32m--> 400\u001b[0m new_value \u001b[38;5;241m=\u001b[39m \u001b[43mvalue\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mto\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 401\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 402\u001b[0m new_value \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mtensor(value, device\u001b[38;5;241m=\u001b[39mdevice)\n","\u001b[0;31mKeyboardInterrupt\u001b[0m: "]}],"source":["%%time\n","\n","evaluate_model(model, tokenizer, f\"{model_name}{'_' + adapter_name_or_path if adapter_name_or_path else ''}_NV4080\", datasets[\"test\"])"]}],"metadata":{"accelerator":"GPU","application/vnd.databricks.v1+notebook":{"dashboards":[],"environmentMetadata":null,"language":"python","notebookMetadata":{"mostRecentlyExecutedCommandWithImplicitDF":{"commandId":-1,"dataframes":["_sqldf"]},"pythonIndentUnit":4},"notebookName":"10_eval-lf-medium-py3.11","widgets":{}},"colab":{"gpuType":"L4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.9"}},"nbformat":4,"nbformat_minor":0}
|
logs/glm-4-9b-chat-1m_epoch_2.txt
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
loading env vars from: /common2/dh.huang.2023/code/logical-reasoning/.env
|
2 |
Adding /common2/dh.huang.2023/code/logical-reasoning to sys.path
|
3 |
loading /common2/dh.huang.2023/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
|
4 |
-
THUDM/glm-4-9b-chat-1m llama-factory/saves/glm-4-9b/lora/
|
5 |
(1) GPU = NVIDIA L40. Max memory = 44.309 GB.
|
6 |
0.0 GB of memory reserved.
|
7 |
-
loading model: THUDM/glm-4-9b-chat-1m with adapter: llama-factory/saves/glm-4-9b/lora/
|
8 |
(2) GPU = NVIDIA L40. Max memory = 44.309 GB.
|
9 |
17.828 GB of memory reserved.
|
10 |
loading train/test data files
|
@@ -33,40 +33,48 @@ truth: 甄加索是一位热爱自然的画家,他每年都会来到这个海
|
|
33 |
--------------------------------------------------
|
34 |
train_text: [gMASK]<sop><|system|>
|
35 |
You are an expert in logical reasoning.<|user|>
|
36 |
-
|
37 |
-
|
38 |
-
1.
|
39 |
-
2.
|
40 |
-
3.
|
41 |
-
4.
|
42 |
-
|
|
|
|
|
|
|
|
|
43 |
|
44 |
请严格按照这些规则回答参与者提出的问题。
|
45 |
|
46 |
-
|
47 |
|
48 |
-
|
49 |
|
50 |
-
|
51 |
<|assistant|>不是<|endoftext|>
|
52 |
--------------------------------------------------
|
53 |
prompt: [gMASK]<sop><|system|>
|
54 |
You are an expert in logical reasoning.<|user|>
|
55 |
-
|
56 |
-
|
57 |
-
1.
|
58 |
-
2.
|
59 |
-
3.
|
60 |
-
4.
|
61 |
-
|
|
|
|
|
|
|
|
|
62 |
|
63 |
请严格按照这些规则回答参与者提出的问题。
|
64 |
|
65 |
-
|
66 |
|
67 |
-
|
68 |
|
69 |
-
|
70 |
<|assistant|>
|
71 |
--------------------------------------------------
|
72 |
text: 死者受伤了吗
|
@@ -83,40 +91,48 @@ truth: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷
|
|
83 |
--------------------------------------------------
|
84 |
train_text: [gMASK]<sop><|system|>
|
85 |
You are an expert in logical reasoning.<|user|>
|
86 |
-
|
87 |
-
|
88 |
-
1.
|
89 |
-
2.
|
90 |
-
3.
|
91 |
-
4.
|
92 |
-
|
|
|
|
|
|
|
|
|
93 |
|
94 |
请严格按照这些规则回答参与者提出的问题。
|
95 |
|
96 |
-
|
97 |
|
98 |
-
|
99 |
|
100 |
-
|
101 |
<|assistant|>不是<|endoftext|>
|
102 |
--------------------------------------------------
|
103 |
prompt: [gMASK]<sop><|system|>
|
104 |
You are an expert in logical reasoning.<|user|>
|
105 |
-
|
106 |
-
|
107 |
-
1.
|
108 |
-
2.
|
109 |
-
3.
|
110 |
-
4.
|
111 |
-
|
|
|
|
|
|
|
|
|
112 |
|
113 |
请严格按照这些规则回答参与者提出的问题。
|
114 |
|
115 |
-
|
116 |
|
117 |
-
|
118 |
|
119 |
-
|
120 |
<|assistant|>
|
121 |
Evaluating model: THUDM/glm-4-9b-chat-1m
|
122 |
--------
|
@@ -133,9 +149,9 @@ step 4: 不是
|
|
133 |
--------
|
134 |
step 5: 不是
|
135 |
(3) GPU = NVIDIA L40. Max memory = 44.309 GB.
|
136 |
-
18.
|
137 |
text ... THUDM/glm-4-9b-chat-1m_checkpoint-350
|
138 |
0 甄加索是自杀吗 ... 不是
|
139 |
|
140 |
[1 rows x 8 columns]
|
141 |
-
{'accuracy': 0.
|
|
|
1 |
loading env vars from: /common2/dh.huang.2023/code/logical-reasoning/.env
|
2 |
Adding /common2/dh.huang.2023/code/logical-reasoning to sys.path
|
3 |
loading /common2/dh.huang.2023/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
|
4 |
+
THUDM/glm-4-9b-chat-1m llama-factory/saves/glm-4-9b/lora/sft_bf16_p2_full/checkpoint-350 False datasets/mgtv results/glm-4-9b_lora_sft_bf16-p2.csv
|
5 |
(1) GPU = NVIDIA L40. Max memory = 44.309 GB.
|
6 |
0.0 GB of memory reserved.
|
7 |
+
loading model: THUDM/glm-4-9b-chat-1m with adapter: llama-factory/saves/glm-4-9b/lora/sft_bf16_p2_full/checkpoint-350
|
8 |
(2) GPU = NVIDIA L40. Max memory = 44.309 GB.
|
9 |
17.828 GB of memory reserved.
|
10 |
loading train/test data files
|
|
|
33 |
--------------------------------------------------
|
34 |
train_text: [gMASK]<sop><|system|>
|
35 |
You are an expert in logical reasoning.<|user|>
|
36 |
+
你是一个情景猜谜游戏的主持人。游戏规则如下:
|
37 |
+
|
38 |
+
1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。
|
39 |
+
2. 主持人知道谜底,谜底是谜面的答案。
|
40 |
+
3. 参与者可以询问任何封闭式问题来找寻事件的真相。
|
41 |
+
4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:
|
42 |
+
- 若谜面和谜底能找到问题的答案,回答:是或者不是
|
43 |
+
- 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要
|
44 |
+
- 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误
|
45 |
+
- 若参与者提问基本还原了谜底真相,回答:回答正确
|
46 |
+
5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。
|
47 |
|
48 |
请严格按照这些规则回答参与者提出的问题。
|
49 |
|
50 |
+
**谜面:** 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?
|
51 |
|
52 |
+
**谜底:** 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。
|
53 |
|
54 |
+
**参与者提出的问题:** 甄加索是自杀吗
|
55 |
<|assistant|>不是<|endoftext|>
|
56 |
--------------------------------------------------
|
57 |
prompt: [gMASK]<sop><|system|>
|
58 |
You are an expert in logical reasoning.<|user|>
|
59 |
+
你是一个情景猜谜游戏的主持人。游戏规则如下:
|
60 |
+
|
61 |
+
1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。
|
62 |
+
2. 主持人知道谜底,谜底是谜面的答案。
|
63 |
+
3. 参与者可以询问任何封闭式问题来找寻事件的真相。
|
64 |
+
4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:
|
65 |
+
- 若谜面和谜底能找到问题的答案,回答:是或者不是
|
66 |
+
- 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要
|
67 |
+
- 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误
|
68 |
+
- 若参与者提问基本还原了谜底真相,回答:回答正确
|
69 |
+
5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。
|
70 |
|
71 |
请严格按照这些规则回答参与者提出的问题。
|
72 |
|
73 |
+
**谜面:** 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?
|
74 |
|
75 |
+
**谜底:** 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。
|
76 |
|
77 |
+
**参与者提出的问题:** 甄加索是自杀吗
|
78 |
<|assistant|>
|
79 |
--------------------------------------------------
|
80 |
text: 死者受伤了吗
|
|
|
91 |
--------------------------------------------------
|
92 |
train_text: [gMASK]<sop><|system|>
|
93 |
You are an expert in logical reasoning.<|user|>
|
94 |
+
你是一个情景猜谜游戏的主持人。游戏规则如下:
|
95 |
+
|
96 |
+
1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。
|
97 |
+
2. 主持人知道谜底,谜底是谜面的答案。
|
98 |
+
3. 参与者可以询问任何封闭式问题来找寻事件的真相。
|
99 |
+
4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:
|
100 |
+
- 若谜面和谜底能找到问题的答案,回答:是或者不是
|
101 |
+
- 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要
|
102 |
+
- 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误
|
103 |
+
- 若参与者提问基本还原了谜底真相,回答:回答正确
|
104 |
+
5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不��”省略成“不”。
|
105 |
|
106 |
请严格按照这些规则回答参与者提出的问题。
|
107 |
|
108 |
+
**谜面:** 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。
|
109 |
|
110 |
+
**谜底:** 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。
|
111 |
|
112 |
+
**参与者提出的问题:** 死者受伤了吗
|
113 |
<|assistant|>不是<|endoftext|>
|
114 |
--------------------------------------------------
|
115 |
prompt: [gMASK]<sop><|system|>
|
116 |
You are an expert in logical reasoning.<|user|>
|
117 |
+
你是一个情景猜谜游戏的主持人。游戏规则如下:
|
118 |
+
|
119 |
+
1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。
|
120 |
+
2. 主持人知道谜底,谜底是谜面的答案。
|
121 |
+
3. 参与者可以询问任何封闭式问题来找寻事件的真相。
|
122 |
+
4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:
|
123 |
+
- 若谜面和谜底能找到问题的答案,回答:是或者不是
|
124 |
+
- 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要
|
125 |
+
- 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误
|
126 |
+
- 若参与者提问基本还原了谜底真相,回答:回答正确
|
127 |
+
5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。
|
128 |
|
129 |
请严格按照这些规则回答参与者提出的问题。
|
130 |
|
131 |
+
**谜面:** 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。
|
132 |
|
133 |
+
**谜底:** 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。
|
134 |
|
135 |
+
**参与者提出的问题:** 死者受伤了吗
|
136 |
<|assistant|>
|
137 |
Evaluating model: THUDM/glm-4-9b-chat-1m
|
138 |
--------
|
|
|
149 |
--------
|
150 |
step 5: 不是
|
151 |
(3) GPU = NVIDIA L40. Max memory = 44.309 GB.
|
152 |
+
18.473 GB of memory reserved.
|
153 |
text ... THUDM/glm-4-9b-chat-1m_checkpoint-350
|
154 |
0 甄加索是自杀吗 ... 不是
|
155 |
|
156 |
[1 rows x 8 columns]
|
157 |
+
{'accuracy': 0.549, 'incorrect_ids': [4, 7, 9, 10, 12, 17, 24, 27, 28, 31, 34, 43, 52, 55, 56, 58, 59, 60, 61, 65, 66, 67, 70, 71, 76, 77, 78, 83, 84, 99, 101, 104, 105, 107, 108, 112, 115, 117, 118, 120, 121, 123, 124, 127, 131, 135, 136, 138, 139, 143, 150, 152, 153, 155, 160, 164, 165, 169, 173, 174, 175, 178, 179, 180, 184, 185, 187, 188, 189, 190, 193, 200, 202, 203, 204, 209, 214, 215, 216, 218, 222, 223, 224, 226, 227, 228, 232, 233, 236, 237, 240, 243, 245, 248, 250, 251, 252, 253, 257, 258, 261, 262, 263, 268, 269, 271, 273, 275, 278, 284, 286, 289, 293, 294, 299, 303, 304, 305, 308, 312, 314, 315, 317, 321, 328, 330, 333, 334, 335, 338, 339, 340, 341, 344, 347, 348, 349, 350, 351, 352, 354, 355, 356, 359, 362, 364, 367, 368, 371, 372, 373, 374, 376, 378, 381, 386, 387, 389, 396, 402, 414, 416, 419, 423, 425, 428, 429, 430, 431, 436, 438, 440, 442, 445, 447, 450, 452, 454, 456, 458, 459, 460, 461, 463, 464, 471, 472, 473, 476, 479, 480, 481, 483, 488, 490, 493, 494, 496, 498, 500, 501, 502, 503, 506, 507, 508, 509, 511, 513, 514, 516, 517, 518, 531, 536, 538, 543, 552, 553, 560, 566, 568, 571, 576, 579, 581, 585, 589, 591, 592, 593, 594, 596, 598, 601, 612, 613, 614, 621, 625, 628, 629, 632, 636, 643, 644, 646, 647, 655, 663, 665, 666, 667, 668, 669, 671, 673, 674, 675, 676, 678, 680, 682, 683, 689, 690, 691, 694, 695, 701, 702, 705, 706, 708, 709, 711, 712, 717, 718, 719, 720, 721, 722, 729, 730, 733, 734, 735, 740, 741, 743, 747, 752, 754, 755, 757, 758, 759, 760, 763, 765, 766, 769, 770, 771, 772, 773, 774, 775, 777, 778, 782, 784, 785, 788, 790, 791, 794, 795, 798, 799, 801, 802, 805, 812, 813, 814, 817, 818, 820, 821, 822, 823, 824, 826, 827, 828, 831, 832, 833, 834, 837, 840, 841, 843, 844, 845, 847, 848, 850, 856, 857, 861, 863, 866, 869, 870, 871, 873, 876, 878, 882, 884, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 899, 901, 904, 906, 907, 908, 910, 914, 917, 920, 926, 927, 929, 930, 932, 933, 937, 939, 940, 941, 942, 944, 945, 946, 949, 953, 954, 956, 959, 961, 962, 964, 969, 970, 971, 972, 973, 980, 982, 983, 984, 986, 988, 991, 993, 995, 998, 1000, 1001, 1003, 1005, 1007, 1009, 1011, 1012, 1014, 1015, 1020, 1022, 1023, 1025, 1028, 1031, 1036, 1038, 1039, 1043, 1044, 1045, 1046, 1051, 1053, 1054, 1056, 1058, 1060, 1061, 1063, 1066, 1069, 1071, 1072, 1073, 1074, 1077, 1078, 1080, 1081, 1083, 1084, 1086, 1087, 1089, 1091, 1093, 1096, 1098, 1099, 1102, 1106, 1107, 1110, 1111, 1113, 1114, 1115, 1116, 1117, 1121, 1123, 1126, 1128, 1129, 1134, 1135, 1136, 1140, 1141, 1143, 1144, 1151, 1153, 1155, 1156, 1159, 1163, 1166, 1168, 1170, 1171, 1172, 1173, 1174, 1177, 1178, 1179, 1182, 1183, 1185, 1186, 1192, 1196, 1200, 1203, 1205, 1206, 1209, 1212, 1213, 1216, 1219, 1220, 1224, 1225, 1227, 1228, 1230, 1231, 1235, 1237, 1238, 1239, 1240, 1241, 1242, 1244, 1245, 1249, 1251, 1252, 1256, 1259, 1263, 1264, 1265, 1266, 1267, 1269, 1270, 1271, 1272, 1274, 1278, 1281, 1285, 1286, 1289, 1292, 1293, 1299, 1300, 1301, 1304, 1305, 1308, 1312, 1313, 1314, 1317, 1318, 1319, 1320, 1321, 1322, 1324, 1326, 1329, 1331, 1334, 1336, 1337, 1342, 1344, 1346, 1348, 1349, 1352, 1353, 1357, 1358, 1359, 1360, 1362, 1363, 1364, 1365, 1367, 1368, 1371, 1372, 1374, 1378, 1379, 1380, 1384, 1385, 1386, 1387, 1389, 1390, 1391, 1392, 1394, 1395, 1400, 1405, 1406, 1407, 1409, 1413, 1416, 1419, 1420, 1422, 1424, 1425, 1428, 1430, 1432, 1437, 1438, 1439, 1440, 1444, 1445, 1448, 1450, 1451, 1452, 1453, 1454, 1455, 1457, 1459, 1462, 1466, 1467, 1469, 1473, 1475, 1476, 1477, 1478, 1482, 1487, 1488, 1489, 1490, 1492, 1493, 1495, 1496, 1499, 1500, 1502, 1504, 1506, 1507, 1508, 1510, 1512, 1515, 1516, 1517, 1518, 1519, 1520, 1524, 1525, 1526, 1528, 1532, 1533, 1536, 1537, 1539, 1540, 1542, 1544, 1545, 1546, 1547, 1549, 1554, 1556, 1558, 1559, 1560, 1561, 1562, 1563, 1567, 1568, 1574, 1577, 1578, 1579, 1580, 1581, 1584, 1585, 1589, 1590, 1591, 1593, 1594, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1611, 1616, 1617, 1618, 1619, 1620, 1625, 1627, 1631, 1632, 1633, 1636, 1637, 1638, 1639, 1641, 1645, 1648, 1654, 1655, 1657, 1658, 1659, 1668, 1669, 1671, 1672, 1673, 1674, 1678, 1682, 1684, 1686, 1688, 1690, 1691, 1694, 1695, 1697, 1698, 1700, 1701, 1708, 1710, 1712, 1716, 1718, 1721, 1722, 1726, 1727, 1729, 1730, 1734, 1736, 1737, 1738, 1739, 1740, 1743, 1745, 1747, 1751, 1753, 1754, 1755, 1756, 1759, 1761, 1763, 1764, 1765, 1766, 1767, 1768, 1770, 1771, 1776, 1778, 1779, 1780, 1785, 1786, 1788, 1796, 1798, 1799, 1803, 1804, 1805, 1809, 1810, 1812, 1818, 1824, 1827, 1834, 1835, 1836, 1837, 1839, 1841, 1843, 1845, 1846, 1848, 1849, 1851, 1857, 1859, 1860, 1861, 1862, 1863, 1864, 1866, 1867, 1870, 1877, 1879, 1882, 1886, 1888, 1889, 1890, 1892, 1896, 1897, 1899, 1901, 1907, 1909, 1915, 1919, 1920, 1925, 1926, 1928, 1930, 1931, 1932, 1933, 1935, 1936, 1940, 1941, 1944, 1947, 1948, 1949, 1950, 1953, 1955, 1958, 1959, 1961, 1963, 1964, 1971, 1973, 1974, 1978, 1979, 1981, 1983, 1984, 1988, 1989, 1990, 1991, 1992, 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2006, 2013, 2015, 2017, 2020, 2025, 2029, 2034, 2035, 2036, 2038, 2039, 2040, 2041, 2042, 2044, 2046, 2047, 2048, 2049, 2052, 2054, 2058, 2059, 2062, 2063, 2064, 2070, 2071, 2072, 2073, 2074, 2077, 2081, 2085, 2087, 2088, 2099, 2100, 2102, 2106, 2107, 2109, 2110, 2114, 2118, 2119, 2121, 2123, 2124, 2126, 2129, 2130, 2131, 2132, 2133, 2134, 2138, 2139, 2140, 2141, 2142, 2146, 2147, 2149, 2152, 2156, 2161, 2162, 2163, 2164, 2167, 2169, 2171, 2173, 2176, 2177, 2178, 2180, 2181, 2182, 2184, 2186, 2188, 2189, 2190, 2194, 2195, 2196, 2197, 2198, 2199, 2204, 2205, 2208, 2212, 2215, 2219, 2221, 2222, 2223, 2226, 2230, 2233, 2237, 2240, 2241, 2242, 2243, 2245, 2246, 2247, 2248, 2250, 2251, 2256, 2257, 2260, 2261, 2262, 2265, 2267, 2274, 2275, 2278, 2279, 2280, 2283, 2285, 2287, 2289, 2292, 2297, 2298, 2301, 2306, 2307, 2311, 2312, 2313, 2320, 2321, 2322, 2324, 2325, 2326, 2330, 2333, 2335, 2337, 2338, 2339, 2340, 2343, 2344, 2350, 2351, 2359, 2360, 2361, 2362, 2364, 2365, 2366, 2370, 2372, 2378, 2379, 2388, 2395, 2400, 2404, 2406, 2408, 2410, 2412, 2420, 2422, 2423, 2424, 2425, 2426, 2429, 2437, 2440, 2442, 2444, 2445, 2446, 2448, 2450, 2451, 2453, 2454, 2457, 2459, 2461, 2463, 2464, 2465, 2466, 2469, 2470, 2472, 2475, 2477, 2479, 2480, 2481, 2484, 2488, 2491, 2494, 2495, 2496, 2497, 2500, 2501, 2508, 2509, 2511, 2512, 2513, 2515, 2516, 2517, 2520, 2522, 2523, 2529, 2530, 2532, 2535, 2537, 2538, 2539, 2540, 2544, 2547, 2548, 2549, 2550, 2553, 2554, 2555, 2556, 2558, 2559, 2560, 2562, 2566, 2570, 2571, 2574, 2575, 2576, 2577, 2586, 2587, 2589, 2590, 2592, 2594, 2602, 2604, 2605, 2606, 2607, 2609, 2610, 2619, 2622, 2623, 2624, 2625, 2629, 2631, 2632, 2633, 2635, 2636, 2637, 2638, 2641, 2644, 2647, 2649, 2651, 2652, 2655, 2657, 2660, 2663, 2665, 2667, 2675, 2676, 2678, 2680, 2681, 2684, 2685, 2686, 2687, 2688, 2691, 2694, 2695, 2697, 2702, 2703, 2707, 2708, 2711, 2713, 2714, 2716, 2717, 2718, 2722, 2726, 2727, 2729, 2730, 2731, 2732, 2733, 2735, 2738, 2740, 2742, 2744, 2746, 2749, 2754, 2757, 2760, 2762, 2764, 2766, 2767, 2768, 2769, 2771, 2774, 2776, 2777, 2778, 2783, 2786, 2787, 2788, 2789, 2791, 2793, 2798, 2801, 2802, 2803, 2810, 2811, 2814, 2816, 2817, 2820, 2821, 2824, 2829, 2831, 2836, 2837, 2841, 2843, 2845, 2846, 2849, 2850, 2851, 2857, 2858, 2860, 2863, 2865, 2867, 2868, 2870, 2873, 2874, 2877, 2878, 2879, 2880, 2881, 2882, 2884, 2887, 2888, 2892, 2896, 2897, 2899, 2902, 2904, 2905, 2912, 2916, 2920, 2921, 2922, 2925, 2927, 2930, 2932, 2933, 2937, 2939, 2940, 2942, 2944, 2946, 2949, 2950, 2951, 2953, 2954, 2955, 2956, 2958, 2960, 2964, 2967, 2968, 2971, 2972, 2973, 2975, 2976, 2978, 2980, 2981, 2982, 2983, 2985, 2994, 2996, 2998, 2999]}
|
logs/glm-4-9b-chat-1m_epoch_3.txt
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
loading env vars from: /common2/dh.huang.2023/code/logical-reasoning/.env
|
2 |
Adding /common2/dh.huang.2023/code/logical-reasoning to sys.path
|
3 |
loading /common2/dh.huang.2023/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
|
4 |
-
THUDM/glm-4-9b-chat-1m llama-factory/saves/glm-4-9b/lora/
|
5 |
(1) GPU = NVIDIA L40. Max memory = 44.309 GB.
|
6 |
0.0 GB of memory reserved.
|
7 |
-
loading model: THUDM/glm-4-9b-chat-1m with adapter: llama-factory/saves/glm-4-9b/lora/
|
8 |
(2) GPU = NVIDIA L40. Max memory = 44.309 GB.
|
9 |
17.828 GB of memory reserved.
|
10 |
loading train/test data files
|
@@ -33,40 +33,48 @@ truth: 甄加索是一位热爱自然的画家,他每年都会来到这个海
|
|
33 |
--------------------------------------------------
|
34 |
train_text: [gMASK]<sop><|system|>
|
35 |
You are an expert in logical reasoning.<|user|>
|
36 |
-
|
37 |
-
|
38 |
-
1.
|
39 |
-
2.
|
40 |
-
3.
|
41 |
-
4.
|
42 |
-
|
|
|
|
|
|
|
|
|
43 |
|
44 |
请严格按照这些规则回答参与者提出的问题。
|
45 |
|
46 |
-
|
47 |
|
48 |
-
|
49 |
|
50 |
-
|
51 |
<|assistant|>不是<|endoftext|>
|
52 |
--------------------------------------------------
|
53 |
prompt: [gMASK]<sop><|system|>
|
54 |
You are an expert in logical reasoning.<|user|>
|
55 |
-
|
56 |
-
|
57 |
-
1.
|
58 |
-
2.
|
59 |
-
3.
|
60 |
-
4.
|
61 |
-
|
|
|
|
|
|
|
|
|
62 |
|
63 |
请严格按照这些规则回答参与者提出的问题。
|
64 |
|
65 |
-
|
66 |
|
67 |
-
|
68 |
|
69 |
-
|
70 |
<|assistant|>
|
71 |
--------------------------------------------------
|
72 |
text: 死者受伤了吗
|
@@ -83,40 +91,48 @@ truth: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷
|
|
83 |
--------------------------------------------------
|
84 |
train_text: [gMASK]<sop><|system|>
|
85 |
You are an expert in logical reasoning.<|user|>
|
86 |
-
|
87 |
-
|
88 |
-
1.
|
89 |
-
2.
|
90 |
-
3.
|
91 |
-
4.
|
92 |
-
|
|
|
|
|
|
|
|
|
93 |
|
94 |
请严格按照这些规则回答参与者提出的问题。
|
95 |
|
96 |
-
|
97 |
|
98 |
-
|
99 |
|
100 |
-
|
101 |
<|assistant|>不是<|endoftext|>
|
102 |
--------------------------------------------------
|
103 |
prompt: [gMASK]<sop><|system|>
|
104 |
You are an expert in logical reasoning.<|user|>
|
105 |
-
|
106 |
-
|
107 |
-
1.
|
108 |
-
2.
|
109 |
-
3.
|
110 |
-
4.
|
111 |
-
|
|
|
|
|
|
|
|
|
112 |
|
113 |
请严格按照这些规则回答参与者提出的问题。
|
114 |
|
115 |
-
|
116 |
|
117 |
-
|
118 |
|
119 |
-
|
120 |
<|assistant|>
|
121 |
Evaluating model: THUDM/glm-4-9b-chat-1m
|
122 |
--------
|
@@ -133,9 +149,9 @@ step 4: 不是
|
|
133 |
--------
|
134 |
step 5: 不是
|
135 |
(3) GPU = NVIDIA L40. Max memory = 44.309 GB.
|
136 |
-
18.
|
137 |
text ... THUDM/glm-4-9b-chat-1m_checkpoint-525
|
138 |
0 甄加索是自杀吗 ... 不是
|
139 |
|
140 |
[1 rows x 9 columns]
|
141 |
-
{'accuracy': 0.
|
|
|
1 |
loading env vars from: /common2/dh.huang.2023/code/logical-reasoning/.env
|
2 |
Adding /common2/dh.huang.2023/code/logical-reasoning to sys.path
|
3 |
loading /common2/dh.huang.2023/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
|
4 |
+
THUDM/glm-4-9b-chat-1m llama-factory/saves/glm-4-9b/lora/sft_bf16_p2_full/checkpoint-525 False datasets/mgtv results/glm-4-9b_lora_sft_bf16-p2.csv
|
5 |
(1) GPU = NVIDIA L40. Max memory = 44.309 GB.
|
6 |
0.0 GB of memory reserved.
|
7 |
+
loading model: THUDM/glm-4-9b-chat-1m with adapter: llama-factory/saves/glm-4-9b/lora/sft_bf16_p2_full/checkpoint-525
|
8 |
(2) GPU = NVIDIA L40. Max memory = 44.309 GB.
|
9 |
17.828 GB of memory reserved.
|
10 |
loading train/test data files
|
|
|
33 |
--------------------------------------------------
|
34 |
train_text: [gMASK]<sop><|system|>
|
35 |
You are an expert in logical reasoning.<|user|>
|
36 |
+
你是一个情景猜谜游戏的主持人。游戏规则如下:
|
37 |
+
|
38 |
+
1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。
|
39 |
+
2. 主持人知道谜底,谜底是谜面的答案。
|
40 |
+
3. 参与者可以询问任何封闭式问题来找寻事件的真相。
|
41 |
+
4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:
|
42 |
+
- 若谜面和谜底能找到问题的答案,回答:是或者不是
|
43 |
+
- 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要
|
44 |
+
- 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误
|
45 |
+
- 若参与者提问基本还原了谜底真相,回答:回答正确
|
46 |
+
5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。
|
47 |
|
48 |
请严格按照这些规则回答参与者提出的问题。
|
49 |
|
50 |
+
**谜面:** 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?
|
51 |
|
52 |
+
**谜底:** 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。
|
53 |
|
54 |
+
**参与者提出的问题:** 甄加索是自杀吗
|
55 |
<|assistant|>不是<|endoftext|>
|
56 |
--------------------------------------------------
|
57 |
prompt: [gMASK]<sop><|system|>
|
58 |
You are an expert in logical reasoning.<|user|>
|
59 |
+
你是一个情景猜谜游戏的主持人。游戏规则如下:
|
60 |
+
|
61 |
+
1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。
|
62 |
+
2. 主持人知道谜底,谜底是谜面的答案。
|
63 |
+
3. 参与者可以询问任何封闭式问题来找寻事件的真相。
|
64 |
+
4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:
|
65 |
+
- 若谜面和谜底能找到问题的答案,回答:是或者不是
|
66 |
+
- 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要
|
67 |
+
- 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误
|
68 |
+
- 若参与者提问基本还原了谜底真相,回答:回答正确
|
69 |
+
5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。
|
70 |
|
71 |
请严格按照这些规则回答参与者提出的问题。
|
72 |
|
73 |
+
**谜面:** 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?
|
74 |
|
75 |
+
**谜底:** 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。
|
76 |
|
77 |
+
**参与者提出的问题:** 甄加索是自杀吗
|
78 |
<|assistant|>
|
79 |
--------------------------------------------------
|
80 |
text: 死者受伤了吗
|
|
|
91 |
--------------------------------------------------
|
92 |
train_text: [gMASK]<sop><|system|>
|
93 |
You are an expert in logical reasoning.<|user|>
|
94 |
+
你是一个情景猜谜游戏的主持人。游戏规则如下:
|
95 |
+
|
96 |
+
1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。
|
97 |
+
2. 主持人知道谜底,谜底是谜面的答案。
|
98 |
+
3. 参与者可以询问任何封闭式问题来找寻事件的真相。
|
99 |
+
4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:
|
100 |
+
- 若谜面和谜底能找到问题的答案,回答:是或者不是
|
101 |
+
- 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要
|
102 |
+
- 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误
|
103 |
+
- 若参与者提问基本还原了谜底真相,回答:回答正确
|
104 |
+
5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不��”省略成“不”。
|
105 |
|
106 |
请严格按照这些规则回答参与者提出的问题。
|
107 |
|
108 |
+
**谜面:** 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。
|
109 |
|
110 |
+
**谜底:** 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。
|
111 |
|
112 |
+
**参与者提出的问题:** 死者受伤了吗
|
113 |
<|assistant|>不是<|endoftext|>
|
114 |
--------------------------------------------------
|
115 |
prompt: [gMASK]<sop><|system|>
|
116 |
You are an expert in logical reasoning.<|user|>
|
117 |
+
你是一个情景猜谜游戏的主持人。游戏规则如下:
|
118 |
+
|
119 |
+
1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。
|
120 |
+
2. 主持人知道谜底,谜底是谜面的答案。
|
121 |
+
3. 参与者可以询问任何封闭式问题来找寻事件的真相。
|
122 |
+
4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:
|
123 |
+
- 若谜面和谜底能找到问题的答案,回答:是或者不是
|
124 |
+
- 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要
|
125 |
+
- 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误
|
126 |
+
- 若参与者提问基本还原了谜底真相,回答:回答正确
|
127 |
+
5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。
|
128 |
|
129 |
请严格按照这些规则回答参与者提出的问题。
|
130 |
|
131 |
+
**谜面:** 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。
|
132 |
|
133 |
+
**谜底:** 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。
|
134 |
|
135 |
+
**参与者提出的问题:** 死者受伤了吗
|
136 |
<|assistant|>
|
137 |
Evaluating model: THUDM/glm-4-9b-chat-1m
|
138 |
--------
|
|
|
149 |
--------
|
150 |
step 5: 不是
|
151 |
(3) GPU = NVIDIA L40. Max memory = 44.309 GB.
|
152 |
+
18.473 GB of memory reserved.
|
153 |
text ... THUDM/glm-4-9b-chat-1m_checkpoint-525
|
154 |
0 甄加索是自杀吗 ... 不是
|
155 |
|
156 |
[1 rows x 9 columns]
|
157 |
+
{'accuracy': 0.5986666666666667, 'incorrect_ids': [7, 9, 12, 13, 14, 17, 24, 25, 26, 27, 28, 31, 33, 35, 36, 38, 52, 55, 58, 59, 65, 67, 71, 72, 76, 77, 78, 81, 83, 84, 88, 93, 96, 99, 101, 104, 105, 108, 109, 112, 113, 115, 117, 118, 121, 123, 124, 127, 131, 135, 136, 137, 138, 139, 143, 144, 150, 153, 154, 155, 160, 161, 164, 171, 172, 174, 175, 178, 179, 180, 181, 187, 188, 190, 191, 192, 193, 197, 199, 200, 202, 204, 207, 214, 215, 218, 220, 223, 224, 225, 226, 227, 228, 230, 233, 234, 236, 240, 243, 245, 248, 250, 252, 253, 256, 257, 260, 262, 263, 265, 269, 271, 272, 273, 275, 276, 283, 284, 286, 289, 301, 303, 304, 305, 307, 308, 313, 314, 315, 316, 317, 320, 321, 330, 334, 335, 338, 339, 340, 341, 344, 348, 349, 350, 353, 354, 355, 356, 358, 360, 367, 368, 371, 373, 374, 376, 378, 386, 389, 395, 396, 397, 402, 403, 404, 410, 414, 422, 423, 428, 429, 430, 431, 436, 438, 445, 447, 450, 452, 454, 456, 457, 459, 460, 461, 464, 471, 472, 475, 476, 480, 482, 483, 488, 490, 492, 494, 496, 498, 500, 501, 502, 503, 506, 507, 508, 511, 513, 514, 518, 519, 520, 536, 538, 553, 560, 566, 570, 571, 576, 579, 581, 585, 589, 591, 592, 593, 597, 601, 612, 613, 614, 621, 625, 627, 628, 632, 636, 637, 643, 644, 646, 647, 649, 665, 666, 671, 673, 674, 675, 676, 682, 683, 686, 689, 690, 691, 694, 695, 697, 701, 702, 703, 706, 709, 717, 718, 721, 726, 729, 730, 732, 734, 739, 740, 742, 747, 752, 754, 757, 758, 766, 770, 771, 772, 773, 774, 778, 779, 782, 784, 785, 788, 791, 795, 798, 799, 800, 801, 805, 809, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 826, 827, 828, 833, 834, 841, 843, 845, 847, 848, 850, 851, 853, 856, 857, 860, 861, 866, 869, 870, 871, 873, 876, 878, 884, 886, 888, 889, 890, 891, 893, 895, 896, 897, 899, 901, 904, 907, 910, 913, 914, 920, 926, 927, 932, 933, 935, 936, 937, 939, 940, 942, 945, 948, 949, 952, 953, 954, 956, 959, 961, 962, 966, 969, 970, 973, 974, 980, 981, 983, 984, 987, 988, 991, 994, 998, 1001, 1003, 1006, 1007, 1009, 1011, 1012, 1014, 1015, 1022, 1023, 1025, 1026, 1028, 1036, 1038, 1039, 1043, 1044, 1045, 1046, 1048, 1051, 1054, 1056, 1058, 1063, 1069, 1071, 1073, 1074, 1077, 1080, 1081, 1084, 1087, 1089, 1091, 1093, 1096, 1098, 1102, 1106, 1107, 1108, 1110, 1111, 1114, 1116, 1120, 1121, 1124, 1125, 1126, 1128, 1129, 1134, 1135, 1136, 1140, 1143, 1156, 1158, 1160, 1161, 1162, 1163, 1164, 1166, 1167, 1168, 1171, 1172, 1177, 1178, 1180, 1181, 1185, 1192, 1196, 1202, 1203, 1206, 1209, 1212, 1216, 1219, 1220, 1224, 1227, 1228, 1229, 1230, 1231, 1235, 1237, 1238, 1239, 1241, 1242, 1245, 1251, 1252, 1256, 1259, 1264, 1266, 1267, 1270, 1271, 1272, 1273, 1274, 1281, 1282, 1286, 1289, 1290, 1292, 1293, 1299, 1300, 1301, 1304, 1305, 1308, 1312, 1313, 1316, 1317, 1318, 1322, 1324, 1329, 1331, 1342, 1344, 1346, 1348, 1349, 1353, 1359, 1360, 1363, 1365, 1370, 1371, 1372, 1373, 1378, 1379, 1384, 1385, 1386, 1387, 1389, 1392, 1393, 1395, 1400, 1402, 1406, 1407, 1409, 1413, 1418, 1419, 1420, 1423, 1424, 1425, 1426, 1430, 1431, 1432, 1438, 1440, 1441, 1443, 1444, 1450, 1452, 1453, 1454, 1455, 1457, 1459, 1462, 1469, 1473, 1477, 1478, 1481, 1482, 1484, 1487, 1488, 1489, 1490, 1491, 1495, 1496, 1502, 1503, 1504, 1507, 1508, 1510, 1512, 1516, 1517, 1518, 1525, 1528, 1533, 1535, 1536, 1542, 1544, 1545, 1546, 1547, 1549, 1551, 1555, 1558, 1560, 1561, 1562, 1568, 1574, 1580, 1581, 1585, 1587, 1590, 1591, 1593, 1594, 1600, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1611, 1612, 1613, 1619, 1622, 1627, 1631, 1632, 1633, 1636, 1637, 1639, 1641, 1647, 1648, 1650, 1654, 1655, 1657, 1659, 1660, 1662, 1665, 1671, 1672, 1674, 1677, 1678, 1679, 1680, 1682, 1683, 1684, 1686, 1691, 1694, 1695, 1697, 1698, 1704, 1710, 1711, 1713, 1716, 1718, 1720, 1721, 1722, 1726, 1729, 1733, 1734, 1735, 1736, 1737, 1738, 1740, 1743, 1745, 1751, 1753, 1754, 1755, 1756, 1758, 1759, 1765, 1766, 1767, 1769, 1770, 1771, 1778, 1779, 1780, 1783, 1785, 1786, 1788, 1790, 1791, 1798, 1799, 1800, 1805, 1809, 1810, 1812, 1816, 1818, 1824, 1827, 1834, 1835, 1839, 1848, 1853, 1854, 1855, 1856, 1857, 1859, 1860, 1862, 1866, 1867, 1869, 1873, 1877, 1879, 1880, 1882, 1888, 1890, 1891, 1892, 1897, 1900, 1913, 1914, 1915, 1919, 1920, 1923, 1928, 1931, 1933, 1934, 1935, 1936, 1938, 1940, 1941, 1946, 1948, 1949, 1951, 1953, 1955, 1958, 1959, 1961, 1962, 1963, 1964, 1970, 1971, 1973, 1974, 1978, 1981, 1984, 1988, 1989, 1990, 1992, 1994, 1995, 1998, 2000, 2001, 2008, 2013, 2014, 2015, 2017, 2019, 2020, 2022, 2028, 2029, 2034, 2035, 2036, 2038, 2041, 2042, 2044, 2046, 2048, 2049, 2052, 2053, 2054, 2056, 2058, 2062, 2063, 2064, 2070, 2071, 2077, 2081, 2085, 2087, 2088, 2092, 2099, 2100, 2106, 2109, 2110, 2112, 2114, 2118, 2119, 2121, 2122, 2124, 2126, 2131, 2132, 2133, 2139, 2141, 2142, 2145, 2146, 2147, 2152, 2156, 2159, 2160, 2161, 2162, 2167, 2169, 2172, 2173, 2177, 2180, 2181, 2184, 2185, 2186, 2187, 2188, 2190, 2192, 2193, 2194, 2196, 2197, 2198, 2199, 2200, 2203, 2212, 2213, 2214, 2215, 2219, 2222, 2223, 2226, 2230, 2231, 2233, 2240, 2241, 2242, 2243, 2245, 2246, 2247, 2248, 2250, 2251, 2261, 2262, 2263, 2265, 2267, 2274, 2278, 2279, 2280, 2287, 2289, 2290, 2294, 2295, 2297, 2298, 2301, 2306, 2311, 2312, 2313, 2314, 2320, 2324, 2331, 2333, 2335, 2337, 2338, 2339, 2343, 2344, 2348, 2351, 2352, 2353, 2360, 2364, 2366, 2367, 2376, 2378, 2379, 2388, 2389, 2390, 2395, 2396, 2398, 2400, 2404, 2405, 2406, 2408, 2410, 2412, 2420, 2421, 2423, 2425, 2426, 2437, 2440, 2442, 2444, 2445, 2448, 2450, 2453, 2454, 2463, 2464, 2465, 2466, 2470, 2472, 2473, 2475, 2476, 2479, 2484, 2488, 2491, 2493, 2495, 2496, 2497, 2499, 2500, 2501, 2502, 2503, 2507, 2509, 2510, 2511, 2513, 2515, 2517, 2518, 2520, 2522, 2525, 2526, 2529, 2530, 2535, 2537, 2538, 2539, 2540, 2544, 2547, 2548, 2549, 2551, 2555, 2556, 2558, 2559, 2560, 2562, 2563, 2570, 2571, 2574, 2575, 2576, 2577, 2586, 2589, 2590, 2592, 2593, 2594, 2602, 2605, 2606, 2607, 2609, 2620, 2622, 2624, 2625, 2628, 2629, 2630, 2631, 2633, 2638, 2641, 2642, 2644, 2645, 2647, 2653, 2657, 2660, 2663, 2664, 2667, 2672, 2675, 2678, 2684, 2685, 2687, 2688, 2691, 2694, 2697, 2702, 2703, 2707, 2713, 2714, 2716, 2717, 2722, 2726, 2727, 2729, 2730, 2731, 2732, 2733, 2738, 2742, 2744, 2745, 2746, 2747, 2752, 2754, 2756, 2757, 2759, 2760, 2761, 2762, 2763, 2764, 2766, 2767, 2768, 2769, 2771, 2774, 2776, 2777, 2778, 2781, 2788, 2791, 2792, 2793, 2795, 2798, 2800, 2801, 2805, 2809, 2810, 2811, 2813, 2816, 2820, 2821, 2824, 2829, 2836, 2837, 2840, 2841, 2843, 2845, 2850, 2851, 2854, 2857, 2858, 2860, 2861, 2863, 2870, 2871, 2873, 2875, 2877, 2878, 2879, 2880, 2881, 2882, 2884, 2892, 2896, 2897, 2899, 2902, 2904, 2905, 2910, 2912, 2915, 2916, 2921, 2930, 2933, 2935, 2937, 2938, 2939, 2942, 2944, 2946, 2949, 2951, 2954, 2955, 2956, 2960, 2963, 2964, 2968, 2969, 2971, 2972, 2976, 2978, 2979, 2980, 2981, 2982, 2985, 2991, 2994, 2998, 2999]}
|
results/glm-4-9b_lora_sft_bf16-p2.csv
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
scripts/eval-mgtv-glm-4-9b.sh
CHANGED
@@ -29,7 +29,7 @@ export USING_P1_PROMPT_TEMPLATE=true
|
|
29 |
#echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE"
|
30 |
#python llm_toolkit/eval_logical_reasoning_all_epochs.py
|
31 |
|
32 |
-
export START_EPOCH=
|
33 |
export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX-p2.csv
|
34 |
export ADAPTER_PATH_BASE=llama-factory/saves/glm-4-9b/lora/sft_bf16_p2_full
|
35 |
export USING_P1_PROMPT_TEMPLATE=false
|
|
|
29 |
#echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE"
|
30 |
#python llm_toolkit/eval_logical_reasoning_all_epochs.py
|
31 |
|
32 |
+
export START_EPOCH=4
|
33 |
export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX-p2.csv
|
34 |
export ADAPTER_PATH_BASE=llama-factory/saves/glm-4-9b/lora/sft_bf16_p2_full
|
35 |
export USING_P1_PROMPT_TEMPLATE=false
|