Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
- Tuned by
|
2 |
+
- https://huggingface.co/datasets/svjack/video-dataset-Lily-Bikini-organized
|
3 |
+
- To test Mochi-1 have ability to learn concept (object or person) in tiny dataset
|
4 |
+
|
5 |
+
# Installtion
|
6 |
+
```bash
|
7 |
+
pip install git+https://github.com/huggingface/diffusers.git peft transformers torch sentencepiece opencv-python
|
8 |
+
```
|
9 |
+
|
10 |
+
# Example
|
11 |
+
## LandScape Example
|
12 |
+
```python
|
13 |
+
from diffusers import MochiPipeline
|
14 |
+
from diffusers.utils import export_to_video
|
15 |
+
import torch
|
16 |
+
|
17 |
+
pipe = MochiPipeline.from_pretrained("genmo/mochi-1-preview", torch_dtype = torch.float16)
|
18 |
+
pipe.load_lora_weights("svjack/mochi_Lily_Bikini_early_lora")
|
19 |
+
pipe.enable_model_cpu_offload()
|
20 |
+
pipe.enable_sequential_cpu_offload()
|
21 |
+
pipe.vae.enable_slicing()
|
22 |
+
pipe.vae.enable_tiling()
|
23 |
+
|
24 |
+
i = 50
|
25 |
+
generator = torch.Generator("cpu").manual_seed(i)
|
26 |
+
prompt = "Lily: The video features a woman with blonde hair wearing a black one-piece swimsuit. She is standing on a sandy beach with the ocean in the background, where waves are visible crashing onto the shore. The sky is clear with a few scattered clouds, suggesting a sunny day. The woman appears to be holding a piece of driftwood or a similar object in her right hand. Her stance and expression suggest she is posing for the camera."
|
27 |
+
pipeline_args = {
|
28 |
+
"prompt": prompt,
|
29 |
+
"num_inference_steps": 64,
|
30 |
+
"height": 480,
|
31 |
+
"width": 848,
|
32 |
+
"max_sequence_length": 1024,
|
33 |
+
"output_type": "np",
|
34 |
+
"num_frames": 19,
|
35 |
+
"generator": generator
|
36 |
+
}
|
37 |
+
|
38 |
+
video = pipe(**pipeline_args).frames[0]
|
39 |
+
export_to_video(video, "Lily_Lora.mp4")
|
40 |
+
from IPython import display
|
41 |
+
display.clear_output(wait = True)
|
42 |
+
display.Video("Lily_Lora.mp4")
|
43 |
+
```
|
44 |
+
|
45 |
+
- With lora
|
46 |
+
|
47 |
+
<video controls autoplay src="https://cdn-uploads.huggingface.co/production/uploads/634dffc49b777beec3bc6448/IABei-Sv13bXv1dnWkCI2.mp4"></video>
|
48 |
+
|
49 |
+
- With lora + Upscale
|
50 |
+
|
51 |
+
<video controls autoplay src="https://cdn-uploads.huggingface.co/production/uploads/634dffc49b777beec3bc6448/VVafjjROHoTubGK7H2z99.mp4"></video>
|