what can i do ?

#4
by Plasma6861 - opened

C:\Users\finnl>python 666.py
Traceback (most recent call last):
File "C:\Users\finnl\666.py", line 9, in
pipe = pipeline("text-generation", model="mlx-community/Llama-3.3-70B-Instruct-4bit")
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\finnl\AppData\Local\Programs\Python\Python312\Lib\site-packages\transformers\pipelines_init_.py", line 940, in pipeline
framework, model = infer_framework_load_model(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\finnl\AppData\Local\Programs\Python\Python312\Lib\site-packages\transformers\pipelines\base.py", line 302, in infer_framework_load_model
raise ValueError(
ValueError: Could not load model mlx-community/Llama-3.3-70B-Instruct-4bit with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForCausalLM'>, <class 'transformers.models.llama.modeling_llama.LlamaForCausalLM'>). See the original errors:

while loading with AutoModelForCausalLM, an error is thrown:
Traceback (most recent call last):
File "C:\Users\finnl\AppData\Local\Programs\Python\Python312\Lib\site-packages\transformers\pipelines\base.py", line 289, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\finnl\AppData\Local\Programs\Python\Python312\Lib\site-packages\transformers\models\auto\auto_factory.py", line 564, in from_pretrained
return model_class.from_pretrained(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\finnl\AppData\Local\Programs\Python\Python312\Lib\site-packages\transformers\modeling_utils.py", line 3659, in from_pretrained
config.quantization_config = AutoHfQuantizer.merge_quantization_configs(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\finnl\AppData\Local\Programs\Python\Python312\Lib\site-packages\transformers\quantizers\auto.py", line 173, in merge_quantization_configs
quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\finnl\AppData\Local\Programs\Python\Python312\Lib\site-packages\transformers\quantizers\auto.py", line 92, in from_dict
raise ValueError(
ValueError: The model's quantization config from the arguments has no quant_method attribute. Make sure that the model has been correctly quantized

while loading with LlamaForCausalLM, an error is thrown:
Traceback (most recent call last):
File "C:\Users\finnl\AppData\Local\Programs\Python\Python312\Lib\site-packages\transformers\pipelines\base.py", line 289, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\finnl\AppData\Local\Programs\Python\Python312\Lib\site-packages\transformers\modeling_utils.py", line 3659, in from_pretrained
config.quantization_config = AutoHfQuantizer.merge_quantization_configs(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\finnl\AppData\Local\Programs\Python\Python312\Lib\site-packages\transformers\quantizers\auto.py", line 173, in merge_quantization_configs
quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\finnl\AppData\Local\Programs\Python\Python312\Lib\site-packages\transformers\quantizers\auto.py", line 92, in from_dict
raise ValueError(
ValueError: The model's quantization config from the arguments has no quant_method attribute. Make sure that the model has been correctly quantized

Sign up or log in to comment