pscotti commited on
Commit
d7b858a
1 Parent(s): 899fca2

Upload utils.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. utils.py +565 -0
utils.py ADDED
@@ -0,0 +1,565 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from torchvision import transforms
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ import PIL
7
+ import random
8
+ import os
9
+ import matplotlib.pyplot as plt
10
+ import pandas as pd
11
+ import math
12
+ import webdataset as wds
13
+ import tempfile
14
+ from torchvision.utils import make_grid
15
+
16
+ import json
17
+ from torchmetrics.image.fid import FrechetInceptionDistance
18
+ from PIL import Image
19
+ import requests
20
+ import io
21
+ import time
22
+
23
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
24
+
25
+ def is_interactive():
26
+ import __main__ as main
27
+ return not hasattr(main, '__file__')
28
+
29
+ def seed_everything(seed=0, cudnn_deterministic=True):
30
+ random.seed(seed)
31
+ os.environ['PYTHONHASHSEED'] = str(seed)
32
+ np.random.seed(seed)
33
+ torch.manual_seed(seed)
34
+ torch.cuda.manual_seed(seed)
35
+ torch.cuda.manual_seed_all(seed)
36
+ if cudnn_deterministic:
37
+ torch.backends.cudnn.deterministic = True
38
+ else:
39
+ ## needs to be False to use conv3D
40
+ print('Note: not using cudnn.deterministic')
41
+
42
+ def np_to_Image(x):
43
+ if x.ndim==4:
44
+ x=x[0]
45
+ return PIL.Image.fromarray((x.transpose(1, 2, 0)*127.5+128).clip(0,255).astype('uint8'))
46
+
47
+ def torch_to_Image(x):
48
+ if x.ndim==4:
49
+ x=x[0]
50
+ return transforms.ToPILImage()(x)
51
+
52
+ def Image_to_torch(x):
53
+ try:
54
+ x = (transforms.ToTensor()(x)[:3].unsqueeze(0)-.5)/.5
55
+ except:
56
+ x = (transforms.ToTensor()(x[0])[:3].unsqueeze(0)-.5)/.5
57
+ return x
58
+
59
+ def torch_to_matplotlib(x,device=device):
60
+ if torch.mean(x)>10:
61
+ x = (x.permute(0, 2, 3, 1)).clamp(0, 255).to(torch.uint8)
62
+ else:
63
+ x = (x.permute(0, 2, 3, 1) * 255).clamp(0, 255).to(torch.uint8)
64
+ if device=='cpu':
65
+ return x[0]
66
+ else:
67
+ return x.cpu().numpy()[0]
68
+
69
+ def pairwise_cosine_similarity(A, B, dim=1, eps=1e-8):
70
+ #https://stackoverflow.com/questions/67199317/pytorch-cosine-similarity-nxn-elements
71
+ numerator = A @ B.T
72
+ A_l2 = torch.mul(A, A).sum(axis=dim)
73
+ B_l2 = torch.mul(B, B).sum(axis=dim)
74
+ denominator = torch.max(torch.sqrt(torch.outer(A_l2, B_l2)), torch.tensor(eps))
75
+ return torch.div(numerator, denominator)
76
+
77
+ def batchwise_pearson_correlation(Z, B):
78
+ # Calculate means
79
+ Z_mean = torch.mean(Z, dim=1, keepdim=True)
80
+ B_mean = torch.mean(B, dim=1, keepdim=True)
81
+
82
+ # Subtract means
83
+ Z_centered = Z - Z_mean
84
+ B_centered = B - B_mean
85
+
86
+ # Calculate Pearson correlation coefficient
87
+ numerator = Z_centered @ B_centered.T
88
+ Z_centered_norm = torch.linalg.norm(Z_centered, dim=1, keepdim=True)
89
+ B_centered_norm = torch.linalg.norm(B_centered, dim=1, keepdim=True)
90
+ denominator = Z_centered_norm @ B_centered_norm.T
91
+
92
+ pearson_correlation = (numerator / denominator)
93
+ return pearson_correlation
94
+
95
+ def batchwise_cosine_similarity(Z,B):
96
+ Z = Z.flatten(1)
97
+ B = B.flatten(1).T
98
+ Z_norm = torch.linalg.norm(Z, dim=1, keepdim=True) # Size (n, 1).
99
+ B_norm = torch.linalg.norm(B, dim=0, keepdim=True) # Size (1, b).
100
+ cosine_similarity = ((Z @ B) / (Z_norm @ B_norm)).T
101
+ return cosine_similarity
102
+
103
+ def prenormed_batchwise_cosine_similarity(Z,B):
104
+ return (Z @ B.T).T
105
+
106
+ def cosine_similarity(Z,B,l=0):
107
+ Z = nn.functional.normalize(Z, p=2, dim=1)
108
+ B = nn.functional.normalize(B, p=2, dim=1)
109
+ # if l>0, use distribution normalization
110
+ # https://twitter.com/YifeiZhou02/status/1716513495087472880
111
+ Z = Z - l * torch.mean(Z,dim=0)
112
+ B = B - l * torch.mean(B,dim=0)
113
+ cosine_similarity = (Z @ B.T).T
114
+ return cosine_similarity
115
+
116
+ def topk(similarities,labels,k=5):
117
+ if k > similarities.shape[0]:
118
+ k = similarities.shape[0]
119
+ topsum=0
120
+ for i in range(k):
121
+ topsum += torch.sum(torch.argsort(similarities,axis=1)[:,-(i+1)] == labels)/len(labels)
122
+ return topsum
123
+
124
+ def get_non_diagonals(a):
125
+ a = torch.triu(a,diagonal=1)+torch.tril(a,diagonal=-1)
126
+ # make diagonals -1
127
+ a=a.fill_diagonal_(-1)
128
+ return a
129
+
130
+ def gather_features(image_features, voxel_features, accelerator):
131
+ all_image_features = accelerator.gather(image_features.contiguous())
132
+ if voxel_features is not None:
133
+ all_voxel_features = accelerator.gather(voxel_features.contiguous())
134
+ return all_image_features, all_voxel_features
135
+ return all_image_features
136
+
137
+ def soft_clip_loss(preds, targs, temp=0.125): #, distributed=False, accelerator=None):
138
+ # if not distributed:
139
+ clip_clip = (targs @ targs.T)/temp
140
+ brain_clip = (preds @ targs.T)/temp
141
+ # else:
142
+ # all_targs = gather_features(targs, None, accelerator)
143
+ # clip_clip = (targs @ all_targs.T)/temp
144
+ # brain_clip = (preds @ all_targs.T)/temp
145
+
146
+ loss1 = -(brain_clip.log_softmax(-1) * clip_clip.softmax(-1)).sum(-1).mean()
147
+ loss2 = -(brain_clip.T.log_softmax(-1) * clip_clip.softmax(-1)).sum(-1).mean()
148
+
149
+ loss = (loss1 + loss2)/2
150
+ return loss
151
+
152
+ def soft_siglip_loss(preds, targs, temp, bias):
153
+ temp = torch.exp(temp)
154
+
155
+ logits = (preds @ targs.T) * temp + bias
156
+ # diagonals (aka paired samples) should be >0 and off-diagonals <0
157
+ labels = (targs @ targs.T) - 1 + (torch.eye(len(targs)).to(targs.dtype).to(targs.device))
158
+
159
+ loss1 = -torch.sum(nn.functional.logsigmoid(logits * labels[:len(preds)])) / len(preds)
160
+ loss2 = -torch.sum(nn.functional.logsigmoid(logits.T * labels[:,:len(preds)])) / len(preds)
161
+ loss = (loss1 + loss2)/2
162
+ return loss
163
+
164
+ def mixco_hard_siglip_loss(preds, targs, temp, bias, perm, betas):
165
+ temp = torch.exp(temp)
166
+
167
+ probs = torch.diag(betas)
168
+ probs[torch.arange(preds.shape[0]).to(preds.device), perm] = 1 - betas
169
+
170
+ logits = (preds @ targs.T) * temp + bias
171
+ labels = probs * 2 - 1
172
+ #labels = torch.eye(len(targs)).to(targs.dtype).to(targs.device) * 2 - 1
173
+
174
+ loss1 = -torch.sum(nn.functional.logsigmoid(logits * labels)) / len(preds)
175
+ loss2 = -torch.sum(nn.functional.logsigmoid(logits.T * labels)) / len(preds)
176
+ loss = (loss1 + loss2)/2
177
+ return loss
178
+
179
+ def mixco(voxels, beta=0.15, s_thresh=0.5, perm=None, betas=None, select=None):
180
+ if perm is None:
181
+ perm = torch.randperm(voxels.shape[0])
182
+ voxels_shuffle = voxels[perm].to(voxels.device,dtype=voxels.dtype)
183
+ if betas is None:
184
+ betas = torch.distributions.Beta(beta, beta).sample([voxels.shape[0]]).to(voxels.device,dtype=voxels.dtype)
185
+ if select is None:
186
+ select = (torch.rand(voxels.shape[0]) <= s_thresh).to(voxels.device)
187
+ betas_shape = [-1] + [1]*(len(voxels.shape)-1)
188
+ voxels[select] = voxels[select] * betas[select].reshape(*betas_shape) + \
189
+ voxels_shuffle[select] * (1 - betas[select]).reshape(*betas_shape)
190
+ betas[~select] = 1
191
+ return voxels, perm, betas, select
192
+
193
+ def mixco_clip_target(clip_target, perm, select, betas):
194
+ clip_target_shuffle = clip_target[perm]
195
+ clip_target[select] = clip_target[select] * betas[select].reshape(-1, 1) + \
196
+ clip_target_shuffle[select] * (1 - betas[select]).reshape(-1, 1)
197
+ return clip_target
198
+
199
+ def mixco_nce(preds, targs, temp=0.1, perm=None, betas=None, select=None, distributed=False,
200
+ accelerator=None, local_rank=None, bidirectional=True):
201
+ brain_clip = (preds @ targs.T)/temp
202
+
203
+ if perm is not None and betas is not None and select is not None:
204
+ probs = torch.diag(betas)
205
+ probs[torch.arange(preds.shape[0]).to(preds.device), perm] = 1 - betas
206
+
207
+ loss = -(brain_clip.log_softmax(-1) * probs).sum(-1).mean()
208
+ if bidirectional:
209
+ loss2 = -(brain_clip.T.log_softmax(-1) * probs.T).sum(-1).mean()
210
+ loss = (loss + loss2)/2
211
+ return loss
212
+ else:
213
+ loss = F.cross_entropy(brain_clip, torch.arange(brain_clip.shape[0]).to(brain_clip.device))
214
+ if bidirectional:
215
+ loss2 = F.cross_entropy(brain_clip.T, torch.arange(brain_clip.shape[0]).to(brain_clip.device))
216
+ loss = (loss + loss2)/2
217
+ return loss
218
+
219
+ def count_params(model):
220
+ total = sum(p.numel() for p in model.parameters())
221
+ trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
222
+ print('param counts:\n{:,} total\n{:,} trainable'.format(total, trainable))
223
+ return trainable
224
+
225
+ def image_grid(imgs, rows, cols):
226
+ w, h = imgs[0].size
227
+ grid = PIL.Image.new('RGB', size=(cols*w, rows*h))
228
+ for i, img in enumerate(imgs):
229
+ grid.paste(img, box=(i%cols*w, i//cols*h))
230
+ return grid
231
+
232
+ def check_loss(loss):
233
+ if loss.isnan().any():
234
+ raise ValueError('NaN loss')
235
+
236
+ def cosine_anneal(start, end, steps):
237
+ return end + (start - end)/2 * (1 + torch.cos(torch.pi*torch.arange(steps)/(steps-1)))
238
+
239
+ def resize(img, img_size=128):
240
+ if img.ndim == 3: img = img[None]
241
+ return nn.functional.interpolate(img, size=(img_size, img_size), mode='nearest')
242
+
243
+ import braceexpand
244
+ def get_dataloaders(
245
+ batch_size,
246
+ image_var='images',
247
+ num_devices=None,
248
+ num_workers=None,
249
+ train_url=None,
250
+ val_url=None,
251
+ meta_url=None,
252
+ num_train=None,
253
+ num_val=None,
254
+ cache_dir="/scratch/tmp/wds-cache",
255
+ seed=0,
256
+ voxels_key="nsdgeneral.npy",
257
+ val_batch_size=None,
258
+ to_tuple=["voxels", "images", "trial"],
259
+ local_rank=0,
260
+ world_size=1,
261
+ ):
262
+ print("Getting dataloaders...")
263
+ assert image_var == 'images'
264
+
265
+ def my_split_by_node(urls):
266
+ return urls
267
+
268
+ train_url = list(braceexpand.braceexpand(train_url))
269
+ val_url = list(braceexpand.braceexpand(val_url))
270
+
271
+ if num_devices is None:
272
+ num_devices = torch.cuda.device_count()
273
+
274
+ if num_workers is None:
275
+ num_workers = num_devices
276
+
277
+ if num_train is None:
278
+ metadata = json.load(open(meta_url))
279
+ num_train = metadata['totals']['train']
280
+ if num_val is None:
281
+ metadata = json.load(open(meta_url))
282
+ num_val = metadata['totals']['val']
283
+
284
+ if val_batch_size is None:
285
+ val_batch_size = batch_size
286
+
287
+ global_batch_size = batch_size * num_devices
288
+ num_batches = math.floor(num_train / global_batch_size)
289
+ num_worker_batches = math.floor(num_batches / num_workers)
290
+ if num_worker_batches == 0: num_worker_batches = 1
291
+
292
+ print("\nnum_train",num_train)
293
+ print("global_batch_size",global_batch_size)
294
+ print("batch_size",batch_size)
295
+ print("num_workers",num_workers)
296
+ print("num_batches",num_batches)
297
+ print("num_worker_batches", num_worker_batches)
298
+
299
+ # train_url = train_url[local_rank:world_size]
300
+ train_data = wds.WebDataset(train_url, resampled=False, cache_dir=cache_dir, nodesplitter=my_split_by_node)\
301
+ .shuffle(500, initial=500, rng=random.Random(42))\
302
+ .decode("torch")\
303
+ .rename(images="jpg;png", voxels=voxels_key, trial="trial.npy", coco="coco73k.npy", reps="num_uniques.npy")\
304
+ .to_tuple(*to_tuple)#\
305
+ # .batched(batch_size, partial=True)#\
306
+ # .with_epoch(num_worker_batches)
307
+
308
+ # BATCH SIZE SHOULD BE NONE!!! FOR TRAIN AND VAL | resampled=True for train | .batched(val_batch_size, partial=False)
309
+ train_dl = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=1, shuffle=False)
310
+
311
+ # Validation
312
+ print("val_batch_size",val_batch_size)
313
+ val_data = wds.WebDataset(val_url, resampled=False, cache_dir=cache_dir, nodesplitter=my_split_by_node)\
314
+ .shuffle(500, initial=500, rng=random.Random(42))\
315
+ .decode("torch")\
316
+ .rename(images="jpg;png", voxels=voxels_key, trial="trial.npy", coco="coco73k.npy", reps="num_uniques.npy")\
317
+ .to_tuple(*to_tuple)#\
318
+ # .batched(val_batch_size, partial=True)
319
+ val_dl = torch.utils.data.DataLoader(val_data, batch_size=val_batch_size, num_workers=1, shuffle=False, drop_last=True)
320
+
321
+ return train_dl, val_dl, num_train, num_val
322
+
323
+ pixcorr_preprocess = transforms.Compose([
324
+ transforms.Resize(425, interpolation=transforms.InterpolationMode.BILINEAR),
325
+ ])
326
+ def pixcorr(images,brains,nan=True):
327
+ all_images_flattened = pixcorr_preprocess(images).reshape(len(images), -1)
328
+ all_brain_recons_flattened = pixcorr_preprocess(brains).view(len(brains), -1)
329
+ if nan:
330
+ corrmean = torch.nanmean(torch.diag(batchwise_pearson_correlation(all_images_flattened, all_brain_recons_flattened)))
331
+ else:
332
+ corrmean = torch.mean(torch.diag(batchwise_pearson_correlation(all_images_flattened, all_brain_recons_flattened)))
333
+ return corrmean
334
+
335
+ def select_annotations(annots, random=True):
336
+ """
337
+ There are 5 annotations per image. Select one of them for each image.
338
+ """
339
+ for i, b in enumerate(annots):
340
+ t = ''
341
+ if random:
342
+ # select random non-empty annotation
343
+ while t == '':
344
+ rand = torch.randint(5, (1,1))[0][0]
345
+ t = b[rand]
346
+ else:
347
+ # select first non-empty annotation
348
+ for j in range(5):
349
+ if b[j] != '':
350
+ t = b[j]
351
+ break
352
+ if i == 0:
353
+ txt = np.array(t)
354
+ else:
355
+ txt = np.vstack((txt, t))
356
+ txt = txt.flatten()
357
+ return txt
358
+
359
+ def add_saturation(image, alpha=2):
360
+ gray_image = 0.2989 * image[:, 0, :, :] + 0.5870 * image[:, 1, :, :] + 0.1140 * image[:, 2, :, :]
361
+ gray_image = gray_image.unsqueeze(1).expand_as(image)
362
+ saturated_image = alpha * image + (1 - alpha) * gray_image
363
+ return torch.clamp(saturated_image, 0, 1)
364
+
365
+ def find_prompt_by_image_number(image_number, data):
366
+ target_image_filename = f"img_t{image_number}.jpg"
367
+ for entry in data:
368
+ if 'target' in entry and entry['target'].endswith(target_image_filename):
369
+ return entry['prompt']
370
+ return -1
371
+
372
+ def compute_negative_l1_losses(preds, targets):
373
+ batch_size = preds.size(0)
374
+
375
+ # Expand dimensions for broadcasting
376
+ expanded_preds = preds.unsqueeze(1) # Shape: [batch_size, 1, 100]
377
+ expanded_targets = targets.unsqueeze(0) # Shape: [1, batch_size, 100]
378
+
379
+ # Compute pairwise L1 differences
380
+ l1_diffs = torch.abs(expanded_preds - expanded_targets) # Shape: [batch_size, batch_size, 100]
381
+
382
+ # Mask the diagonal to exclude positive pairs
383
+ mask = torch.eye(batch_size).bool().to(l1_diffs.device)
384
+ l1_diffs[mask] = 0
385
+
386
+ # Sum L1 differences for each sample against all negatives
387
+ negative_losses = l1_diffs.sum(dim=-1).mean()
388
+
389
+ return negative_losses
390
+
391
+
392
+ def unclip_recon(x, diffusion_engine, vector_suffix,
393
+ num_samples=1, offset_noise_level=0.04):
394
+ from generative_models.sgm.util import append_dims
395
+ assert x.ndim==3
396
+ if x.shape[0]==1:
397
+ x = x[[0]]
398
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=torch.float16), diffusion_engine.ema_scope():
399
+ z = torch.randn(num_samples,4,96,96).to(device) # starting noise, can change to VAE outputs of initial image for img2img
400
+
401
+ # clip_img_tokenized = clip_img_embedder(image)
402
+ # tokens = clip_img_tokenized
403
+ token_shape = x.shape
404
+ tokens = x
405
+ c = {"crossattn": tokens.repeat(num_samples,1,1), "vector": vector_suffix.repeat(num_samples,1)}
406
+
407
+ tokens = torch.randn_like(x)
408
+ uc = {"crossattn": tokens.repeat(num_samples,1,1), "vector": vector_suffix.repeat(num_samples,1)}
409
+
410
+ for k in c:
411
+ c[k], uc[k] = map(lambda y: y[k][:num_samples].to(device), (c, uc))
412
+
413
+ noise = torch.randn_like(z)
414
+ sigmas = diffusion_engine.sampler.discretization(diffusion_engine.sampler.num_steps)
415
+ sigma = sigmas[0].to(z.device)
416
+
417
+ if offset_noise_level > 0.0:
418
+ noise = noise + offset_noise_level * append_dims(
419
+ torch.randn(z.shape[0], device=z.device), z.ndim
420
+ )
421
+ noised_z = z + noise * append_dims(sigma, z.ndim)
422
+ noised_z = noised_z / torch.sqrt(
423
+ 1.0 + sigmas[0] ** 2.0
424
+ ) # Note: hardcoded to DDPM-like scaling. need to generalize later.
425
+
426
+ def denoiser(x, sigma, c):
427
+ return diffusion_engine.denoiser(diffusion_engine.model, x, sigma, c)
428
+
429
+ samples_z = diffusion_engine.sampler(denoiser, noised_z, cond=c, uc=uc)
430
+ samples_x = diffusion_engine.decode_first_stage(samples_z)
431
+ samples = torch.clamp((samples_x*.8+.2), min=0.0, max=1.0)
432
+ # samples = torch.clamp((samples_x + .5) / 2.0, min=0.0, max=1.0)
433
+ return samples
434
+
435
+ def soft_cont_loss(student_preds, teacher_preds, teacher_aug_preds, temp=0.125):
436
+ teacher_teacher_aug = (teacher_preds @ teacher_aug_preds.T)/temp
437
+ teacher_teacher_aug_t = (teacher_aug_preds @ teacher_preds.T)/temp
438
+ student_teacher_aug = (student_preds @ teacher_aug_preds.T)/temp
439
+ student_teacher_aug_t = (teacher_aug_preds @ student_preds.T)/temp
440
+
441
+ loss1 = -(student_teacher_aug.log_softmax(-1) * teacher_teacher_aug.softmax(-1)).sum(-1).mean()
442
+ loss2 = -(student_teacher_aug_t.log_softmax(-1) * teacher_teacher_aug_t.softmax(-1)).sum(-1).mean()
443
+
444
+ loss = (loss1 + loss2)/2
445
+ return loss
446
+
447
+ def iterate_range(start, length, batchsize):
448
+ batch_count = int(length // batchsize )
449
+ residual = int(length % batchsize)
450
+ for i in range(batch_count):
451
+ yield range(start+i*batchsize, start+(i+1)*batchsize),batchsize
452
+ if(residual>0):
453
+ yield range(start+batch_count*batchsize,start+length),residual
454
+
455
+
456
+ # Torch fwRF
457
+ def get_value(_x):
458
+ return np.copy(_x.data.cpu().numpy())
459
+
460
+
461
+ #subject: nsd subject index between 1-8
462
+ #mode: vision, imagery
463
+ #stimtype: all, simple, complex, concepts
464
+ #average: whether to average across trials, will produce x that is (stimuli, 1, voxels)
465
+ #nest: whether to nest the data according to stimuli, will produce x that is (stimuli, trials, voxels)
466
+ import pickle
467
+ def condition_average(x, y, cond, nest=False):
468
+ idx, idx_count = np.unique(cond, return_counts=True)
469
+ idx_list = [np.array(cond)==i for i in np.sort(idx)]
470
+ if nest:
471
+ avg_x = torch.zeros((len(idx), idx_count.max(), x.shape[1]), dtype=torch.float32)
472
+ else:
473
+ avg_x = torch.zeros((len(idx), 1, x.shape[1]), dtype=torch.float32)
474
+ for i, m in enumerate(idx_list):
475
+ if nest:
476
+ avg_x[i] = x[m]
477
+ else:
478
+ avg_x[i] = torch.mean(x[m], axis=0)
479
+
480
+ return avg_x, y, len(idx_count)
481
+ def load_nsd_mental_imagery(subject, mode, stimtype="all", average=False, nest=False):
482
+ # This file has a bunch of information about the stimuli and cue associations that will make loading it easier
483
+ img_stim_file = "imagery/nsd_imagery/data/nsddata_stimuli/stimuli/nsdimagery_stimuli.pkl3"
484
+ ex_file = open(img_stim_file, 'rb')
485
+ imagery_dict = pickle.load(ex_file)
486
+ ex_file.close()
487
+ # Indicates what experiments trials belong to
488
+ exps = imagery_dict['exps']
489
+ # Indicates the cues for different stimuli
490
+ cues = imagery_dict['cues']
491
+ # Maps the cues to the stimulus image information
492
+ image_map = imagery_dict['image_map']
493
+ # Organize the indices of the trials according to the modality and the type of stimuli
494
+ cond_idx = {
495
+ 'visionsimple': np.arange(len(exps))[exps=='visA'],
496
+ 'visioncomplex': np.arange(len(exps))[exps=='visB'],
497
+ 'visionconcepts': np.arange(len(exps))[exps=='visC'],
498
+ 'visionall': np.arange(len(exps))[np.logical_or(np.logical_or(exps=='visA', exps=='visB'), exps=='visC')],
499
+ 'imagerysimple': np.arange(len(exps))[np.logical_or(exps=='imgA_1', exps=='imgA_2')],
500
+ 'imagerycomplex': np.arange(len(exps))[np.logical_or(exps=='imgB_1', exps=='imgB_2')],
501
+ 'imageryconcepts': np.arange(len(exps))[np.logical_or(exps=='imgC_1', exps=='imgC_2')],
502
+ 'imageryall': np.arange(len(exps))[np.logical_or(
503
+ np.logical_or(
504
+ np.logical_or(exps=='imgA_1', exps=='imgA_2'),
505
+ np.logical_or(exps=='imgB_1', exps=='imgB_2')),
506
+ np.logical_or(exps=='imgC_1', exps=='imgC_2'))]}
507
+ # Load normalized betas
508
+ x = torch.load("imagery/nsd_imagery/data/preprocessed_data/subject{}/nsd_imagery.pt".format(subject)).requires_grad_(False).to("cpu")
509
+ # Find the trial indices conditioned on the type of trials we want to load
510
+ cond_im_idx = {n: [image_map[c] for c in cues[idx]] for n,idx in cond_idx.items()}
511
+ conditionals = cond_im_idx[mode+stimtype]
512
+ # Stimuli file is of shape (18,3,425,425), these can be converted back into PIL images using transforms.ToPILImage()
513
+ y = torch.load("imagery/nsd_imagery/data/nsddata_stimuli/stimuli/imagery_stimuli_18.pt").requires_grad_(False).to("cpu")
514
+ # Prune the beta file down to specific experimental mode/stimuli type
515
+ x = x[cond_idx[mode+stimtype]]
516
+ # If stimtype is not all, then prune the image data down to the specific stimuli type
517
+ if stimtype == "simple":
518
+ y = y[:6]
519
+ elif stimtype == "complex":
520
+ y = y[6:12]
521
+ elif stimtype == "concepts":
522
+ y = y[12:]
523
+
524
+ # Average or nest the betas across trials
525
+ if average or nest:
526
+ x, y, sample_count = condition_average(x, y, conditionals, nest=nest)
527
+ else:
528
+ x = x.reshape((x.shape[0], 1, x.shape[1]))
529
+
530
+ # print(x.shape)
531
+ return x, y
532
+
533
+ def bb_soft_clip_loss(preds, targs, temp=0.125):
534
+ temp = np.exp(temp)
535
+ clip_clip = (targs @ targs.T)/temp
536
+ brain_brain = (preds @ preds.T)/temp
537
+
538
+ # loss1 = -(brain_brain.log_softmax(-1) * clip_clip.softmax(-1)).sum(-1).mean()
539
+ # loss2 = -(brain_brain.T.log_softmax(-1) * clip_clip.softmax(-1)).sum(-1).mean()
540
+ # loss = (loss1 + loss2)/2
541
+
542
+ loss = nn.functional.kl_div(brain_brain.log_softmax(-1), clip_clip.softmax(-1), reduction='batchmean')
543
+ return loss #* 1e5
544
+
545
+ def bb_cossim_loss(preds, targs, temp=None):
546
+ clip_clip = (targs @ targs.T)
547
+ brain_brain = (preds @ preds.T)
548
+ loss = 1 - nn.functional.cosine_similarity(brain_brain, clip_clip).mean()
549
+ return loss
550
+
551
+ def load_images_to_numpy(folder_path):
552
+ file_names = [f for f in os.listdir(folder_path) if (f.endswith('.png') or f.endswith('.jpg') or f.endswith('.jpeg'))]
553
+ image_data = []
554
+ image_names = []
555
+ for file_name in file_names:
556
+ image_path = os.path.join(folder_path, file_name)
557
+ image_names.append(file_name)
558
+ with Image.open(image_path) as img:
559
+ img_array = np.array(img)
560
+ if img_array.shape[1] != 224:
561
+ img = img.resize((224,224))
562
+ img_array = np.array(img)
563
+ image_data.append(img_array)
564
+ images_np = np.stack(image_data, axis=0)
565
+ return images_np, image_names