Spaces:
Runtime error
Runtime error
upd
Browse files
model.py
CHANGED
@@ -28,6 +28,11 @@ from cldm.model import create_model, load_state_dict
|
|
28 |
from ldm.models.diffusion.ddim import DDIMSampler
|
29 |
from share import *
|
30 |
|
|
|
|
|
|
|
|
|
|
|
31 |
ORIGINAL_MODEL_NAMES = {
|
32 |
'canny': 'control_sd15_canny.pth',
|
33 |
'hough': 'control_sd15_mlsd.pth',
|
@@ -632,9 +637,9 @@ class Model:
|
|
632 |
|
633 |
results = [x_samples[i] for i in range(num_samples)]
|
634 |
|
635 |
-
print(f"type of results ^^ - {type(results)}")
|
636 |
-
print(f"
|
637 |
-
|
638 |
filename = results[0] #['name']
|
639 |
#def encode(img_array):
|
640 |
print(f"type of input_image ^^ - {type(input_image)}")
|
@@ -655,7 +660,8 @@ class Model:
|
|
655 |
htmltag = '<img src= "data:image/jpeg;base64,' + encoded_string + '" alt="Original Image"/></div> <img src= "https://ysharma-controlnet-image-comparison.hf.space/file=' + filename + '" alt="Control Net Image"/>'
|
656 |
#https://ysharma-controlnet-image-comparison.hf.space/file=/tmp/tmpg4qx22xy.png - sample
|
657 |
print(f"htmltag is ^^ - {htmltag}")
|
658 |
-
|
|
|
659 |
<!DOCTYPE html>
|
660 |
<html lang="en">
|
661 |
<head>
|
@@ -704,12 +710,13 @@ class Model:
|
|
704 |
<body>
|
705 |
<div style="margin: 3rem;
|
706 |
font-family: Roboto, sans-serif">
|
707 |
-
<
|
708 |
-
</div> <div> <div class="image-slider"> <div>
|
709 |
#return desc
|
|
|
710 |
|
711 |
|
712 |
-
return [detected_map] + results, desc
|
713 |
|
714 |
@torch.inference_mode()
|
715 |
def process_depth(self, input_image, prompt, a_prompt, n_prompt,
|
|
|
28 |
from ldm.models.diffusion.ddim import DDIMSampler
|
29 |
from share import *
|
30 |
|
31 |
+
from PIL import Image
|
32 |
+
import gradio as gr
|
33 |
+
import numpy as np
|
34 |
+
import base64
|
35 |
+
|
36 |
ORIGINAL_MODEL_NAMES = {
|
37 |
'canny': 'control_sd15_canny.pth',
|
38 |
'hough': 'control_sd15_mlsd.pth',
|
|
|
637 |
|
638 |
results = [x_samples[i] for i in range(num_samples)]
|
639 |
|
640 |
+
tmp = """print(f"type of results ^^ - {type(results)}")
|
641 |
+
print(f"length of results list ^^ - {len(results)}")
|
642 |
+
print(f"value of results[0] ^^ - {results[0]}")
|
643 |
filename = results[0] #['name']
|
644 |
#def encode(img_array):
|
645 |
print(f"type of input_image ^^ - {type(input_image)}")
|
|
|
660 |
htmltag = '<img src= "data:image/jpeg;base64,' + encoded_string + '" alt="Original Image"/></div> <img src= "https://ysharma-controlnet-image-comparison.hf.space/file=' + filename + '" alt="Control Net Image"/>'
|
661 |
#https://ysharma-controlnet-image-comparison.hf.space/file=/tmp/tmpg4qx22xy.png - sample
|
662 |
print(f"htmltag is ^^ - {htmltag}")
|
663 |
+
|
664 |
+
desc =
|
665 |
<!DOCTYPE html>
|
666 |
<html lang="en">
|
667 |
<head>
|
|
|
710 |
<body>
|
711 |
<div style="margin: 3rem;
|
712 |
font-family: Roboto, sans-serif">
|
713 |
+
<h4 style="color: green"> Observe the Ingenuity of ControlNet by comparing Input and Output images</h4>
|
714 |
+
</div> <div> <div class="image-slider"> <div> + htmltag + "</div> </div> </body> </html> "
|
715 |
#return desc
|
716 |
+
"""
|
717 |
|
718 |
|
719 |
+
return results[0] #[detected_map] + results, desc
|
720 |
|
721 |
@torch.inference_mode()
|
722 |
def process_depth(self, input_image, prompt, a_prompt, n_prompt,
|