Datasets:
IMAGE_ID
stringlengths 8
8
| CAPTION
stringclasses 1
value | IMG
imagewidth (px) 293
8.01k
|
---|---|---|
00000000 | ||
00000001 | ||
00000002 | ||
00000003 | ||
00000004 | ||
00000005 | ||
00000006 | ||
00000007 | ||
00000008 | ||
00000009 | ||
00000010 | ||
00000011 | ||
00000012 | ||
00000013 | ||
00000014 | ||
00000015 | ||
00000016 | ||
00000017 | ||
00000018 | ||
00000019 | ||
00000020 | ||
00000021 | ||
00000022 | ||
00000023 | ||
00000024 | ||
00000025 | ||
00000026 | ||
00000027 | ||
00000028 | ||
00000029 | ||
00000030 | ||
00000031 | ||
00000032 | ||
00000033 | ||
00000034 | ||
00000035 | ||
00000036 | ||
00000037 | ||
00000038 | ||
00000039 | ||
00000040 | ||
00000041 | ||
00000042 | ||
00000043 | ||
00000044 | ||
00000045 | ||
00000046 | ||
00000047 | ||
00000048 | ||
00000049 | ||
00000050 | ||
00000051 | ||
00000052 | ||
00000053 | ||
00000054 | ||
00000055 | ||
00000056 | ||
00000057 | ||
00000058 | ||
00000059 | ||
00000060 | ||
00000061 | ||
00000062 | ||
00000063 | ||
00000064 | ||
00000065 | ||
00000066 | ||
00000067 | ||
00000068 | ||
00000069 | ||
00000070 | ||
00000071 | ||
00000072 | ||
00000073 | ||
00000074 | ||
00000075 | ||
00000076 | ||
00000077 | ||
00000078 | ||
00000079 | ||
00000080 | ||
00000081 | ||
00000082 | ||
00000083 | ||
00000084 | ||
00000085 | ||
00000086 | ||
00000087 | ||
00000088 | ||
00000089 | ||
00000090 | ||
00000091 | ||
00000092 | ||
00000093 | ||
00000094 | ||
00000095 | ||
00000096 | ||
00000097 | ||
00000098 | ||
00000099 |
End of preview. Expand
in Dataset Viewer.
It can be captioned by PaliGemma2
from datasets import load_dataset
from tqdm import tqdm
dataset = load_dataset("WeiChow/splash")
for item in dataset:
...
caption:
from transformers import PaliGemmaProcessor, PaliGemmaForConditionalGeneration
import torch
from datasets import load_dataset
from tqdm import tqdm
from termcolor import cprint
dataset = load_dataset("WeiChow/splash")
model_id = "google/paligemma2-3b-ft-docci-448"
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="cuda").eval()
processor = PaliGemmaProcessor.from_pretrained(model_id)
for item in dataset:
model_inputs = processor(text="caption en", images=item['IMG'], return_tensors="pt").to(torch.bfloat16).to(model.device)
input_len = model_inputs["input_ids"].shape[-1]
with torch.inference_mode():
generation = model.generate(**model_inputs, max_new_tokens=30, do_sample=False)
generation = generation[0][input_len:]
decoded = processor.decode(generation, skip_special_tokens=True)
print(item['IMAGE_ID'])
cprint(decoded, 'cyan')
- Downloads last month
- 27