error OverflowError: cannot fit 'int' into an index-sized integer

#19
by huiye5 - opened

This is my test code:
'''
from diffusers import DiffusionPipeline
import torch

pipe = DiffusionPipeline.from_pretrained(
"./",
torch_dtype=torch.float16,
variant="fp16",
).to("cuda")

prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
image = pipe(prompt=prompt, num_inference_steps=50, guidance_scale=3).images[0]

'''

Traceback (most recent call last):
File "D:****\demo.py", line **, in
image = pipe(prompt=prompt, num_inference_steps=50, guidance_scale=3).images[0]
File "d:\anaconda3\envs\pytorch2\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "d:\anaconda3\envs\pytorch2\lib\site-packages\diffusers\pipelines\stable_diffusion_xl\pipeline_stable_diffusion_xl.py", line 1054, in call
) = self.encode_prompt(
File "d:\anaconda3\envs\pytorch2\lib\site-packages\diffusers\pipelines\stable_diffusion_xl\pipeline_stable_diffusion_xl.py", line 363, in encode_prompt
text_inputs = tokenizer(
File "d:\anaconda3\envs\pytorch2\lib\site-packages\transformers\tokenization_utils_base.py", line 2803, in call
encodings = self._call_one(text=text, text_pair=text_pair, **all_kwargs)
File "d:\anaconda3\envs\pytorch2\lib\site-packages\transformers\tokenization_utils_base.py", line 2889, in _call_one
return self.batch_encode_plus(
File "d:\anaconda3\envs\pytorch2\lib\site-packages\transformers\tokenization_utils_base.py", line 3080, in batch_encode_plus
return self._batch_encode_plus(
File "d:\anaconda3\envs\pytorch2\lib\site-packages\transformers\tokenization_utils.py", line 807, in _batch_encode_plus
batch_outputs = self._batch_prepare_for_model(
File "d:\anaconda3\envs\pytorch2\lib\site-packages\transformers\tokenization_utils.py", line 879, in _batch_prepare_for_model
batch_outputs = self.pad(
File "d:\anaconda3\envs\pytorch2\lib\site-packages\transformers\tokenization_utils_base.py", line 3287, in pad
outputs = self._pad(
File "d:\anaconda3\envs\pytorch2\lib\site-packages\transformers\tokenization_utils_base.py", line 3656, in _pad
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
OverflowError: cannot fit 'int' into an index-sized integer

hi do you have any updates to this? I'm facing the same issue as well. I think it's an issue with fp16 variant. What is your model?

hi do you have any updates to this? I'm facing the same issue as well. I think it's an issue with fp16 variant. What is your model?

nope,i don't know what to do now. This is my test code:

from diffusers import DiffusionPipeline
import torch

pipe = DiffusionPipeline.from_pretrained(
"./",
torch_dtype=torch.float16,
variant="fp16",
).to("cuda")

prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
image = pipe(prompt=prompt, num_inference_steps=50, guidance_scale=3).images[0]

Playground org

What's your python version?

What's your python version?

Python 3.10.13
torch 2.1.2+cu121

i solved it, check if your encoder token limit is overflowing, and if so, you can manually set the token limit

i solved it, check if your encoder token limit is overflowing, and if so, you can manually set the token limit

could you plz share more details about which params should i debug and which config file should be used?
i am still confused, thanks a lot.

i solved it, check if your encoder token limit is overflowing, and if so, you can manually set the token limit

Can you share the resolution steps, even I am getting this error

Manually set the maximum length like

self.tokenizer.model_max_length = 77 #

    sample['key'] = f'{i:05}'
    prompt = self.prompts[i%self.num_prompts]
    sample['text_ids'] = self.tokenizer(
        prompt, padding='max_length', max_length=self.tokenizer.model_max_length,#
        truncation=True, return_tensors='pt').input_ids[0]

Sign up or log in to comment