Update README.md
Browse filesFix minor bugs in usage example
README.md
CHANGED
@@ -56,7 +56,7 @@ from diffusers import UniDiffuserPipeline
|
|
56 |
|
57 |
device = "cuda"
|
58 |
model_id_or_path = "thu-ml/unidiffuser-v0"
|
59 |
-
pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path)
|
60 |
pipe.to(device)
|
61 |
|
62 |
# Joint image-text generation. The generation task is automatically inferred.
|
@@ -89,7 +89,7 @@ init_image = init_image.resize((512, 512))
|
|
89 |
|
90 |
sample = pipe(image=init_image, num_inference_steps=20, guidance_scale=8.0)
|
91 |
i2t_text = sample.text[0]
|
92 |
-
print(
|
93 |
|
94 |
# Image variation can be performed with a image-to-text generation followed by a text-to-image generation:
|
95 |
sample = pipe(prompt=i2t_text, num_inference_steps=20, guidance_scale=8.0)
|
|
|
56 |
|
57 |
device = "cuda"
|
58 |
model_id_or_path = "thu-ml/unidiffuser-v0"
|
59 |
+
pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
|
60 |
pipe.to(device)
|
61 |
|
62 |
# Joint image-text generation. The generation task is automatically inferred.
|
|
|
89 |
|
90 |
sample = pipe(image=init_image, num_inference_steps=20, guidance_scale=8.0)
|
91 |
i2t_text = sample.text[0]
|
92 |
+
print(i2t_text)
|
93 |
|
94 |
# Image variation can be performed with a image-to-text generation followed by a text-to-image generation:
|
95 |
sample = pipe(prompt=i2t_text, num_inference_steps=20, guidance_scale=8.0)
|