`prepare_model_for_int8_training` has been deprecated for quite some time, with PEFT v0.10.0, it has been removed. Please use `prepare_model_for_kbit_training` instead.

#4
lora_clm_with_additional_tokens.ipynb CHANGED
@@ -41,8 +41,8 @@
41
  " PeftConfig,\n",
42
  " PeftModel,\n",
43
  " get_peft_model,\n",
44
- " prepare_model_for_int8_training,\n",
45
  ")\n",
 
46
  "from transformers import (\n",
47
  " AutoModelForCausalLM,\n",
48
  " AutoTokenizer,\n",
 
41
  " PeftConfig,\n",
42
  " PeftModel,\n",
43
  " get_peft_model,\n",
 
44
  ")\n",
45
+ "from peft import prepare_model_for_kbit_training as prepare_model_for_int8_training\n",
46
  "from transformers import (\n",
47
  " AutoModelForCausalLM,\n",
48
  " AutoTokenizer,\n",