inference error
Traceback (most recent call last):
File "demo.py", line 35, in
outputs = pipe(messages, max_new_tokens=256)
File "/opt/conda/lib/python3.10/site-packages/transformers/pipelines/text_generation.py", line 257, in call
return super().call(Chat(text_inputs), **kwargs)
File "/opt/conda/lib/python3.10/site-packages/transformers/pipelines/base.py", line 1254, in call
return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
File "/opt/conda/lib/python3.10/site-packages/transformers/pipelines/base.py", line 1261, in run_single
model_outputs = self.forward(model_inputs, **forward_params)
File "/opt/conda/lib/python3.10/site-packages/transformers/pipelines/base.py", line 1161, in forward
model_outputs = self._forward(model_inputs, **forward_params)
File "/opt/conda/lib/python3.10/site-packages/transformers/pipelines/text_generation.py", line 349, in _forward
generated_sequence = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs)
File "/opt/conda/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/opt/conda/lib/python3.10/site-packages/transformers/generation/utils.py", line 1744, in generate
model_kwargs["past_key_values"] = self._get_cache(
File "/opt/conda/lib/python3.10/site-packages/transformers/generation/utils.py", line 1435, in _get_cache
self._cache = cache_cls(
File "/opt/conda/lib/python3.10/site-packages/transformers/cache_utils.py", line 1012, in init
torch._dynamo.mark_static_address(new_layer_key_cache)
AttributeError: module 'torch._dynamo' has no attribute 'mark_static_address'
Hi @ccbbdd ,
Could you please upgrade the Torch library by running the following command:
pip install --upgrade torch torchvision torchaudio
Additionally, ensure that your Torch version is 2.3.0 or higher, as the latest Transformers rely on cache_utils.py, which includes torch._dynamo
Thank you.