File size: 4,733 Bytes
6505421
c1bb50f
 
c567799
c1bb50f
c567799
a9a88e5
57c7b16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a9a88e5
 
1a00071
a9a88e5
 
 
6505421
c567799
f55585f
c1bb50f
c567799
f55585f
f318312
 
f55585f
c1bb50f
c567799
cea1c73
c567799
c1bb50f
c567799
172c734
f55585f
172c734
 
 
 
c567799
 
172c734
 
f55585f
172c734
 
 
a9a88e5
9180ce7
172c734
 
f55585f
c567799
a9a88e5
c567799
f55585f
 
 
 
c567799
f55585f
 
 
 
 
 
 
a2d6136
a9a88e5
 
f55585f
172c734
 
f55585f
c567799
f55585f
9180ce7
 
 
 
2cbb5a1
9180ce7
 
 
 
 
 
 
 
 
 
 
2cbb5a1
 
9180ce7
 
2cbb5a1
9180ce7
 
 
a9a88e5
2cbb5a1
 
 
c1bb50f
 
f55585f
 
9180ce7
c1bb50f
9180ce7
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline
import torch
import time
import psutil
from huggingface_hub import snapshot_download # New import

# ... (Other imports and variables remain the same)

def load_pipeline(model_id):
    if model_id in PIPELINES:
        return PIPELINES[model_id]
    else:
        # Download model using snapshot_download to handle LFS files
        model_path = snapshot_download(repo_id=model_id, local_dir="./models")  # Download and cache models
        pipe = DiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float32) 
        pipe.to(DEVICE)
        PIPELINES[model_id] = pipe
        return pipe


# ... (Rest of your code remains the same)


# Get the number of physical CPU cores (excluding hyperthreads)
NUM_CPU_CORES = psutil.cpu_count(logical=True)

# Cap the number of threads to the available physical cores
MAX_THREADS = min(8, NUM_CPU_CORES)

# Device and hardware configuration
DEVICE = "cpu"

# Model Options (optimized for CPU and memory constraints)
MODEL_OPTIONS = {
    "Модель штучного інтелекту середня (Довше-краще якість)": "CompVis/stable-diffusion-v1-4",
    "Модель штучного інтелекту мала (Швидко-гірша якість)": "hf-internal-testing/tiny-stable-diffusion-pipe",
}

# Default to fastest model and lower image size for limited resources
DEFAULT_MODEL_ID = MODEL_OPTIONS["Модель штучного інтелекту мала (Швидко-гірша якість)"]
DEFAULT_IMAGE_SIZE = 512  # Lower default resolution

# Cache models to avoid reloading
PIPELINES = {}

def load_pipeline(model_id):
    if model_id in PIPELINES:
        return PIPELINES[model_id]
    else:
        pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
        pipe.to(DEVICE)
        PIPELINES[model_id] = pipe
        return pipe

def generate_image(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, num_images, model_choice):
    if not prompt:
        raise gr.Error("Будь ласка, введіть опис для зображення.")

    torch.set_num_threads(MAX_THREADS) 

    pipe = load_pipeline(MODEL_OPTIONS[model_choice])

    # Adjust memory usage based on available RAM
    torch.cuda.empty_cache()  

    generator = torch.Generator(device=DEVICE)
    if not randomize_seed:
        generator = generator.manual_seed(seed)

    start_time = time.time()
    images = pipe(
        prompt,
        negative_prompt=negative_prompt,
        width=width,
        height=height,
        guidance_scale=guidance_scale,
        num_inference_steps=num_inference_steps,
        num_images_per_prompt=num_images,
        generator=generator,
    ).images

    end_time = time.time()
    generation_time = end_time - start_time

    return images, f"Час генерації: {generation_time:.2f} секунд"

with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column(scale=5):
            prompt = gr.Textbox(label="Опис зображення")
            negative_prompt = gr.Textbox(label="Не показувати", value="")
        with gr.Column(scale=1):
            model_choice = gr.Radio(
                choices=list(MODEL_OPTIONS.keys()),
                label="Якість моделі",
                value=list(MODEL_OPTIONS.keys())[0],
            )
    with gr.Row():
        seed = gr.Slider(label="Seed", minimum=0, maximum=1000000, step=1, value=42)
        randomize_seed = gr.Checkbox(label="Випадковий Seed", value=True)

    with gr.Row():
        width = gr.Slider(label="Ширина", minimum=512, maximum=1024, step=64, value=DEFAULT_IMAGE_SIZE)
        height = gr.Slider(label="Висота", minimum=512, maximum=1024, step=64, value=DEFAULT_IMAGE_SIZE)
    with gr.Row():
        guidance_scale = gr.Slider(label="Guidance Scale", minimum=0, maximum=20, step=0.5, value=7.5)
        num_inference_steps = gr.Slider(label="Кроки інференсу", minimum=20, maximum=50, step=10, value=20)

    with gr.Row():
        num_images = gr.Slider(label="Кількість зображень", minimum=1, maximum=4, step=1, value=1)

    run_button = gr.Button("Створити")
    gallery = gr.Gallery(label="Створені зображення")
    status_text = gr.Textbox(label="Виконання") 

    run_button.click(
        fn=generate_image,
        inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, num_images, model_choice],
        outputs=[gallery, status_text],
    )
        

demo.launch(share=True)