RoniFinTech commited on
Commit
fb3f880
β€’
1 Parent(s): 88b96f1
Files changed (4) hide show
  1. Dockerfile.hf +28 -0
  2. README.md +7 -1
  3. main.py +64 -0
  4. requirements.txt +7 -0
Dockerfile.hf ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python 3.9 image
2
+ FROM python:3.9
3
+
4
+ # Set the working directory to /code
5
+ WORKDIR /code
6
+
7
+ # Copy the current directory contents into the container at /code
8
+ COPY ./requirements.txt /code/requirements.txt
9
+
10
+ # Install requirements.txt
11
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
12
+
13
+ # Set up a new user named "user" with user ID 1000
14
+ RUN useradd -m -u 1000 user
15
+ # Switch to the "user" user
16
+ USER user
17
+ # Set home to the user's home directory
18
+ ENV HOME=/home/user \\
19
+ PATH=/home/user/.local/bin:$PATH
20
+
21
+ # Set the working directory to the user's home directory
22
+ WORKDIR $HOME/app
23
+
24
+ # Copy the current directory contents into the container at $HOME/app setting the owner to the user
25
+ COPY --chown=user . $HOME/app
26
+
27
+ # Start the FastAPI app on port 7860, the default port expected by Spaces
28
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,10 +1,16 @@
1
  ---
2
- title: Sd Xl Refiner 1.0
 
3
  emoji: πŸ“ˆ
 
4
  colorFrom: pink
 
5
  colorTo: gray
 
6
  sdk: docker
 
7
  pinned: false
 
8
  license: openrail
9
  ---
10
 
 
1
  ---
2
+ title: FastApi Stable Diffusion Xl Refiner 1.0
3
+
4
  emoji: πŸ“ˆ
5
+
6
  colorFrom: pink
7
+
8
  colorTo: gray
9
+
10
  sdk: docker
11
+
12
  pinned: false
13
+
14
  license: openrail
15
  ---
16
 
main.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+
3
+ import torch
4
+ from diffusers import DiffusionPipeline
5
+ from fastapi import FastAPI
6
+ from fastapi.responses import StreamingResponse
7
+
8
+ # load both base & refiner
9
+ base = DiffusionPipeline.from_pretrained(
10
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
11
+ )
12
+ base.to("cuda")
13
+ # base.enable_model_cpu_offload()
14
+ base.enable_attention_slicing()
15
+ refiner = DiffusionPipeline.from_pretrained(
16
+ "stabilityai/stable-diffusion-xl-refiner-1.0",
17
+ text_encoder_2=base.text_encoder_2,
18
+ vae=base.vae,
19
+ torch_dtype=torch.float16,
20
+ use_safetensors=True,
21
+ variant="fp16",
22
+ )
23
+ refiner.to("cuda")
24
+ # refiner.enable_model_cpu_offload()
25
+ refiner.enable_attention_slicing()
26
+
27
+ # Create a new FastAPI app instance
28
+ app = FastAPI()
29
+
30
+
31
+ # Define a function to handle the GET request at `/generate`
32
+ # The generate() function is defined as a FastAPI route that takes a
33
+ # string parameter called text. The function generates text based on the # input using the pipeline() object, and returns a JSON response
34
+ # containing the generated text under the key "output"
35
+ @app.get("/generate")
36
+ def generate(text: str):
37
+ """
38
+ generate image
39
+ """
40
+ # Define how many steps and what % of steps to be run on each experts (80/20) here
41
+ n_steps = 40
42
+ high_noise_frac = 0.8
43
+ negative = "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly. bad anatomy, disfigured, poorly drawn face, mutation, mutated, extra limb, ugly, disgusting, poorly drawn hands, missing limb, floating limbs, disconnected limbs, malformed hands, blurry, mutated hands and fingers, watermark, watermarked, oversaturated, censored, distorted hands, amputation, missing hands, obese, doubled face, double hands, two women, anime style, cartoon, toon."
44
+ prompt = "Designs should play with different textures and layering but stick to a monochrome palette. Think leather jackets over mesh tops, or satin draped over matte cotton. in a studio. zoomed-in. single model."
45
+
46
+ # run both experts
47
+ image = base(
48
+ prompt=prompt,
49
+ negative_prompt=negative,
50
+ num_inference_steps=n_steps,
51
+ denoising_end=high_noise_frac,
52
+ output_type="latent",
53
+ ).images
54
+ final_image = refiner(
55
+ prompt=prompt,
56
+ negative_prompt=negative,
57
+ num_inference_steps=n_steps,
58
+ denoising_start=high_noise_frac,
59
+ image=image,
60
+ ).images[0]
61
+
62
+ return StreamingResponse(BytesIO(final_image), media_type="image/png")
63
+ # Return the generated text in a JSON response
64
+ # return {"output": output[0]["generated_text"]}
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ fastapi==0.100.1
2
+ pydantic==2.1.1
3
+ pylint==2.17.5
4
+ uvicorn>=0.23.2
5
+ torch==2.0.1
6
+ transformers==4.31.0
7
+ accelerate==0.21.0