anton-l HF staff commited on
Commit
a304b1a
1 Parent(s): f99eae7

Upload fp16 weights

Browse files
model_index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
- "_diffusers_version": "0.2.2",
4
  "feature_extractor": [
5
  "transformers",
6
  "CLIPFeatureExtractor"
 
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.2.3",
4
  "feature_extractor": [
5
  "transformers",
6
  "CLIPFeatureExtractor"
safety_checker/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "./safety_module",
3
  "architectures": [
4
  "StableDiffusionSafetyChecker"
5
  ],
@@ -68,6 +68,7 @@
68
  "sep_token_id": null,
69
  "task_specific_params": null,
70
  "temperature": 1.0,
 
71
  "tie_encoder_decoder": false,
72
  "tie_word_embeddings": true,
73
  "tokenizer_class": null,
@@ -75,7 +76,7 @@
75
  "top_p": 1.0,
76
  "torch_dtype": null,
77
  "torchscript": false,
78
- "transformers_version": "4.21.0.dev0",
79
  "typical_p": 1.0,
80
  "use_bfloat16": false,
81
  "vocab_size": 49408
@@ -86,7 +87,7 @@
86
  "num_attention_heads": 12,
87
  "num_hidden_layers": 12
88
  },
89
- "torch_dtype": "float32",
90
  "transformers_version": null,
91
  "vision_config": {
92
  "_name_or_path": "",
@@ -133,6 +134,7 @@
133
  "num_attention_heads": 16,
134
  "num_beam_groups": 1,
135
  "num_beams": 1,
 
136
  "num_hidden_layers": 24,
137
  "num_return_sequences": 1,
138
  "output_attentions": false,
@@ -150,6 +152,7 @@
150
  "sep_token_id": null,
151
  "task_specific_params": null,
152
  "temperature": 1.0,
 
153
  "tie_encoder_decoder": false,
154
  "tie_word_embeddings": true,
155
  "tokenizer_class": null,
@@ -157,7 +160,7 @@
157
  "top_p": 1.0,
158
  "torch_dtype": null,
159
  "torchscript": false,
160
- "transformers_version": "4.21.0.dev0",
161
  "typical_p": 1.0,
162
  "use_bfloat16": false
163
  },
 
1
  {
2
+ "_name_or_path": "./safety_checker",
3
  "architectures": [
4
  "StableDiffusionSafetyChecker"
5
  ],
 
68
  "sep_token_id": null,
69
  "task_specific_params": null,
70
  "temperature": 1.0,
71
+ "tf_legacy_loss": false,
72
  "tie_encoder_decoder": false,
73
  "tie_word_embeddings": true,
74
  "tokenizer_class": null,
 
76
  "top_p": 1.0,
77
  "torch_dtype": null,
78
  "torchscript": false,
79
+ "transformers_version": "4.21.1",
80
  "typical_p": 1.0,
81
  "use_bfloat16": false,
82
  "vocab_size": 49408
 
87
  "num_attention_heads": 12,
88
  "num_hidden_layers": 12
89
  },
90
+ "torch_dtype": "float16",
91
  "transformers_version": null,
92
  "vision_config": {
93
  "_name_or_path": "",
 
134
  "num_attention_heads": 16,
135
  "num_beam_groups": 1,
136
  "num_beams": 1,
137
+ "num_channels": 3,
138
  "num_hidden_layers": 24,
139
  "num_return_sequences": 1,
140
  "output_attentions": false,
 
152
  "sep_token_id": null,
153
  "task_specific_params": null,
154
  "temperature": 1.0,
155
+ "tf_legacy_loss": false,
156
  "tie_encoder_decoder": false,
157
  "tie_word_embeddings": true,
158
  "tokenizer_class": null,
 
160
  "top_p": 1.0,
161
  "torch_dtype": null,
162
  "torchscript": false,
163
+ "transformers_version": "4.21.1",
164
  "typical_p": 1.0,
165
  "use_bfloat16": false
166
  },
safety_checker/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:193490b58ef62739077262e833bf091c66c29488058681ac25cf7df3d8190974
3
- size 1216061799
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d37ca6e57ace94e4c2f03ed0f67b6dc83e1ef1160892074917aa68b28e2afc1
3
+ size 608098599
scheduler/scheduler_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_class_name": "PNDMScheduler",
3
- "_diffusers_version": "0.2.2",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
 
1
  {
2
  "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.2.3",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
text_encoder/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "openai/clip-vit-large-patch14",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
@@ -18,7 +18,7 @@
18
  "num_attention_heads": 12,
19
  "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
- "torch_dtype": "float32",
22
- "transformers_version": "4.21.0.dev0",
23
  "vocab_size": 49408
24
  }
 
1
  {
2
+ "_name_or_path": "./text_encoder",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
 
18
  "num_attention_heads": 12,
19
  "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.21.1",
23
  "vocab_size": 49408
24
  }
text_encoder/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:770a47a9ffdcfda0b05506a7888ed714d06131d60267e6cf52765d61cf59fd67
3
- size 492305335
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88bd85efb0f84e70521633f578715afb2873db4f2615fdfb1f66e99934715865
3
+ size 246184375
tokenizer/tokenizer_config.json CHANGED
@@ -19,7 +19,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
- "name_or_path": "openai/clip-vit-large-patch14",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
 
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
+ "name_or_path": "./tokenizer",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
unet/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
- "_diffusers_version": "0.2.2",
 
4
  "act_fn": "silu",
5
  "attention_head_dim": 8,
6
  "block_out_channels": [
 
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.2.3",
4
+ "_name_or_path": "./unet",
5
  "act_fn": "silu",
6
  "attention_head_dim": 8,
7
  "block_out_channels": [
unet/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:62d48b4d841a3178511fa453df0dae59b22089ace64609cc9d5353d0a7f37c26
3
- size 3438354725
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d98edd280d5e040ee77f5802b8e3be3513de757335d1dedc4e495647e7c2d573
3
+ size 1719312805
vae/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.2.2",
 
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
 
1
  {
2
  "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.2.3",
4
+ "_name_or_path": "./vae",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
vae/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b134cded8eb78b184aefb8805b6b572f36fa77b255c483665dda931fa0130c5
3
- size 334707217
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51c8904bc921e1e6f354b5fa8e99a1c82ead2f0540114de21557b8abfbb24ad0
3
+ size 167399505