End of training
Browse files- README.md +49 -0
- image_encoder/config.json +23 -0
- image_encoder/model.safetensors +3 -0
- image_processor/preprocessor_config.json +27 -0
- model_index.json +28 -0
- prior/config.json +20 -0
- prior/diffusion_pytorch_model.safetensors +3 -0
- scheduler/scheduler_config.json +10 -0
- text_encoder/config.json +25 -0
- text_encoder/model.safetensors +3 -0
- tokenizer/merges.txt +0 -0
- tokenizer/special_tokens_map.json +24 -0
- tokenizer/tokenizer_config.json +30 -0
- tokenizer/vocab.json +0 -0
- val_imgs_grid.png +0 -0
README.md
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
---
|
3 |
+
license: creativeml-openrail-m
|
4 |
+
base_model: kandinsky-community/kandinsky-2-2-prior
|
5 |
+
datasets:
|
6 |
+
- lambdalabs/naruto-blip-captions
|
7 |
+
tags:
|
8 |
+
- kandinsky
|
9 |
+
- text-to-image
|
10 |
+
- diffusers
|
11 |
+
inference: true
|
12 |
+
---
|
13 |
+
|
14 |
+
# Finetuning - daehan17/prior-weightupdate
|
15 |
+
|
16 |
+
This pipeline was finetuned from **kandinsky-community/kandinsky-2-2-prior** on the **lambdalabs/naruto-blip-captions** dataset. Below are some example images generated with the finetuned pipeline using the following prompts: ['A robot pokemon, 4k photo']:
|
17 |
+
|
18 |
+
![val_imgs_grid](./val_imgs_grid.png)
|
19 |
+
|
20 |
+
|
21 |
+
## Pipeline usage
|
22 |
+
|
23 |
+
You can use the pipeline like so:
|
24 |
+
|
25 |
+
```python
|
26 |
+
from diffusers import DiffusionPipeline
|
27 |
+
import torch
|
28 |
+
|
29 |
+
pipe_prior = DiffusionPipeline.from_pretrained("daehan17/prior-weightupdate", torch_dtype=torch.float16)
|
30 |
+
pipe_t2i = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16)
|
31 |
+
prompt = "A robot pokemon, 4k photo"
|
32 |
+
image_embeds, negative_image_embeds = pipe_prior(prompt, guidance_scale=1.0).to_tuple()
|
33 |
+
image = pipe_t2i(image_embeds=image_embeds, negative_image_embeds=negative_image_embeds).images[0]
|
34 |
+
image.save("my_image.png")
|
35 |
+
```
|
36 |
+
|
37 |
+
## Training info
|
38 |
+
|
39 |
+
These are the key hyperparameters used during training:
|
40 |
+
|
41 |
+
* Epochs: 7
|
42 |
+
* Learning rate: 1e-05
|
43 |
+
* Batch size: 1
|
44 |
+
* Gradient accumulation steps: 4
|
45 |
+
* Image resolution: 768
|
46 |
+
* Mixed-precision: None
|
47 |
+
|
48 |
+
|
49 |
+
More information on all the CLI arguments and the environment are available on your [`wandb` run page](https://wandb.ai/kakooz/text2image-fine-tune/runs/lfxt1lhp).
|
image_encoder/config.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "kandinsky-community/kandinsky-2-2-prior",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPVisionModelWithProjection"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"dropout": 0.0,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_size": 1664,
|
10 |
+
"image_size": 224,
|
11 |
+
"initializer_factor": 1.0,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 8192,
|
14 |
+
"layer_norm_eps": 1e-05,
|
15 |
+
"model_type": "clip_vision_model",
|
16 |
+
"num_attention_heads": 16,
|
17 |
+
"num_channels": 3,
|
18 |
+
"num_hidden_layers": 48,
|
19 |
+
"patch_size": 14,
|
20 |
+
"projection_dim": 1280,
|
21 |
+
"torch_dtype": "float16",
|
22 |
+
"transformers_version": "4.38.2"
|
23 |
+
}
|
image_encoder/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dabf125eaf7f339da6b900a7a14f8190f02913b7d46cab80e09cd0755c4ce39c
|
3 |
+
size 3689910512
|
image_processor/preprocessor_config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"crop_size": {
|
3 |
+
"height": 224,
|
4 |
+
"width": 224
|
5 |
+
},
|
6 |
+
"do_center_crop": true,
|
7 |
+
"do_convert_rgb": true,
|
8 |
+
"do_normalize": true,
|
9 |
+
"do_rescale": true,
|
10 |
+
"do_resize": true,
|
11 |
+
"image_mean": [
|
12 |
+
0.48145466,
|
13 |
+
0.4578275,
|
14 |
+
0.40821073
|
15 |
+
],
|
16 |
+
"image_processor_type": "CLIPImageProcessor",
|
17 |
+
"image_std": [
|
18 |
+
0.26862954,
|
19 |
+
0.26130258,
|
20 |
+
0.27577711
|
21 |
+
],
|
22 |
+
"resample": 3,
|
23 |
+
"rescale_factor": 0.00392156862745098,
|
24 |
+
"size": {
|
25 |
+
"shortest_edge": 224
|
26 |
+
}
|
27 |
+
}
|
model_index.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "KandinskyV22PriorPipeline",
|
3 |
+
"_diffusers_version": "0.27.0.dev0",
|
4 |
+
"image_encoder": [
|
5 |
+
"transformers",
|
6 |
+
"CLIPVisionModelWithProjection"
|
7 |
+
],
|
8 |
+
"image_processor": [
|
9 |
+
"transformers",
|
10 |
+
"CLIPImageProcessor"
|
11 |
+
],
|
12 |
+
"prior": [
|
13 |
+
"diffusers",
|
14 |
+
"PriorTransformer"
|
15 |
+
],
|
16 |
+
"scheduler": [
|
17 |
+
"diffusers",
|
18 |
+
"UnCLIPScheduler"
|
19 |
+
],
|
20 |
+
"text_encoder": [
|
21 |
+
"transformers",
|
22 |
+
"CLIPTextModelWithProjection"
|
23 |
+
],
|
24 |
+
"tokenizer": [
|
25 |
+
"transformers",
|
26 |
+
"CLIPTokenizer"
|
27 |
+
]
|
28 |
+
}
|
prior/config.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "PriorTransformer",
|
3 |
+
"_diffusers_version": "0.27.0.dev0",
|
4 |
+
"_name_or_path": "kandinsky-community/kandinsky-2-2-prior",
|
5 |
+
"added_emb_type": "prd",
|
6 |
+
"additional_embeddings": 4,
|
7 |
+
"attention_head_dim": 64,
|
8 |
+
"clip_embed_dim": null,
|
9 |
+
"dropout": 0.0,
|
10 |
+
"embedding_dim": 1280,
|
11 |
+
"embedding_proj_dim": null,
|
12 |
+
"embedding_proj_norm_type": null,
|
13 |
+
"encoder_hid_proj_type": "linear",
|
14 |
+
"norm_in_type": null,
|
15 |
+
"num_attention_heads": 32,
|
16 |
+
"num_embeddings": 77,
|
17 |
+
"num_layers": 20,
|
18 |
+
"time_embed_act_fn": "silu",
|
19 |
+
"time_embed_dim": null
|
20 |
+
}
|
prior/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:50437d9fa267163ba717075eb021c71d6f8b9a1734eb71417ed216c79a09c9f3
|
3 |
+
size 2052488952
|
scheduler/scheduler_config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UnCLIPScheduler",
|
3 |
+
"_diffusers_version": "0.27.0.dev0",
|
4 |
+
"beta_schedule": "squaredcos_cap_v2",
|
5 |
+
"clip_sample": true,
|
6 |
+
"clip_sample_range": 10.0,
|
7 |
+
"num_train_timesteps": 1000,
|
8 |
+
"prediction_type": "sample",
|
9 |
+
"variance_type": "fixed_small_log"
|
10 |
+
}
|
text_encoder/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "kandinsky-community/kandinsky-2-2-prior",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPTextModelWithProjection"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"dropout": 0.0,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_size": 1280,
|
12 |
+
"initializer_factor": 1.0,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 5120,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 77,
|
17 |
+
"model_type": "clip_text_model",
|
18 |
+
"num_attention_heads": 20,
|
19 |
+
"num_hidden_layers": 32,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"projection_dim": 1280,
|
22 |
+
"torch_dtype": "float16",
|
23 |
+
"transformers_version": "4.38.2",
|
24 |
+
"vocab_size": 49408
|
25 |
+
}
|
text_encoder/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec310df2af79c318e24d20511b601a591ca8cd4f1fce1d8dff822a356bcdb1f4
|
3 |
+
size 1389382176
|
tokenizer/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|startoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "<|endoftext|>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"49406": {
|
5 |
+
"content": "<|startoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"49407": {
|
13 |
+
"content": "<|endoftext|>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": true,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
}
|
20 |
+
},
|
21 |
+
"bos_token": "<|startoftext|>",
|
22 |
+
"clean_up_tokenization_spaces": true,
|
23 |
+
"do_lower_case": true,
|
24 |
+
"eos_token": "<|endoftext|>",
|
25 |
+
"errors": "replace",
|
26 |
+
"model_max_length": 77,
|
27 |
+
"pad_token": "<|endoftext|>",
|
28 |
+
"tokenizer_class": "CLIPTokenizer",
|
29 |
+
"unk_token": "<|endoftext|>"
|
30 |
+
}
|
tokenizer/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
val_imgs_grid.png
ADDED