Upload configuration.json
Browse files- configuration.json +44 -0
configuration.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"framework": "pytorch",
|
3 |
+
"task": "text-to-video-synthesis",
|
4 |
+
"model": {
|
5 |
+
"type": "latent-text-to-video-synthesis",
|
6 |
+
"model_args": {
|
7 |
+
"ckpt_clip": "open_clip_pytorch_model.bin",
|
8 |
+
"ckpt_unet": "text2video_pytorch_model.pth",
|
9 |
+
"ckpt_autoencoder": "../VQGAN_autoencoder.pth",
|
10 |
+
"max_frames": 16,
|
11 |
+
"tiny_gpu": 1
|
12 |
+
},
|
13 |
+
"model_cfg": {
|
14 |
+
"unet_in_dim": 4,
|
15 |
+
"unet_dim": 320,
|
16 |
+
"unet_y_dim": 768,
|
17 |
+
"unet_context_dim": 1024,
|
18 |
+
"unet_out_dim": 4,
|
19 |
+
"unet_dim_mult": [
|
20 |
+
1,
|
21 |
+
2,
|
22 |
+
4,
|
23 |
+
4
|
24 |
+
],
|
25 |
+
"unet_num_heads": 8,
|
26 |
+
"unet_head_dim": 64,
|
27 |
+
"unet_res_blocks": 2,
|
28 |
+
"unet_attn_scales": [
|
29 |
+
1,
|
30 |
+
0.5,
|
31 |
+
0.25
|
32 |
+
],
|
33 |
+
"unet_dropout": 0.1,
|
34 |
+
"temporal_attention": "True",
|
35 |
+
"num_timesteps": 1000,
|
36 |
+
"mean_type": "eps",
|
37 |
+
"var_type": "fixed_small",
|
38 |
+
"loss_type": "mse"
|
39 |
+
}
|
40 |
+
},
|
41 |
+
"pipeline": {
|
42 |
+
"type": "latent-text-to-video-synthesis"
|
43 |
+
}
|
44 |
+
}
|