File size: 2,847 Bytes
6505421
c1bb50f
 
c567799
c1bb50f
c567799
a9a88e5
 
 
 
 
 
 
6505421
c567799
f55585f
c1bb50f
c567799
f55585f
172c734
 
f55585f
c1bb50f
c567799
172c734
c567799
c1bb50f
c567799
172c734
f55585f
172c734
 
 
 
c567799
 
172c734
 
f55585f
172c734
 
 
a9a88e5
 
172c734
 
f55585f
c567799
a9a88e5
c567799
f55585f
 
 
 
c567799
f55585f
 
 
 
 
 
 
a2d6136
a9a88e5
 
f55585f
172c734
 
f55585f
c567799
f55585f
a9a88e5
 
 
 
 
 
f55585f
 
172c734
c1bb50f
 
f55585f
 
172c734
c1bb50f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline
import torch
import time
import psutil

# Get the number of physical CPU cores (excluding hyperthreads)
NUM_CPU_CORES = psutil.cpu_count(logical=False)

# Cap the number of threads to the available physical cores
MAX_THREADS = min(8, NUM_CPU_CORES)

# Device and hardware configuration
DEVICE = "cpu"

# Model Options (optimized for CPU and memory constraints)
MODEL_OPTIONS = {
    "Medium Quality (Faster)": "stabilityai/stable-diffusion-2-base",
    "Fastest (Draft Quality)": "hf-internal-testing/tiny-stable-diffusion-pipe",
}

# Default to fastest model and lower image size for limited resources
DEFAULT_MODEL_ID = MODEL_OPTIONS["Fastest (Draft Quality)"]
DEFAULT_IMAGE_SIZE = 512  # Lower default resolution

# Cache models to avoid reloading
PIPELINES = {}

def load_pipeline(model_id):
    if model_id in PIPELINES:
        return PIPELINES[model_id]
    else:
        pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
        pipe.to(DEVICE)
        PIPELINES[model_id] = pipe
        return pipe

def generate_image(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, num_images, model_choice):
    if not prompt:
        raise gr.Error("Будь ласка, введіть опис для зображення.")

    torch.set_num_threads(MAX_THREADS)  # Set the maximum number of threads

    pipe = load_pipeline(MODEL_OPTIONS[model_choice])

    # Adjust memory usage based on available RAM
    torch.cuda.empty_cache()  

    generator = torch.Generator(device=DEVICE)
    if not randomize_seed:
        generator = generator.manual_seed(seed)

    start_time = time.time()
    images = pipe(
        prompt,
        negative_prompt=negative_prompt,
        width=width,
        height=height,
        guidance_scale=guidance_scale,
        num_inference_steps=num_inference_steps,
        num_images_per_prompt=num_images,
        generator=generator,
    ).images

    end_time = time.time()
    generation_time = end_time - start_time

    return images, f"Час генерації: {generation_time:.2f} секунд"

# ... (Gradio interface remains the same)

    generation_time = end_time - start_time

    return images, f"Час генерації: {generation_time:.2f} секунд"

    run_button = gr.Button("Згенерувати")
    gallery = gr.Gallery(label="Згенеровані зображення")
    status_text = gr.Textbox(label="Статус") 

    run_button.click(
        fn=generate_image,
        inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, num_images, model_choice],
        outputs=[gallery, status_text],  # Output both the gallery and status text
    )