ysharma HF staff commited on
Commit
2df6a62
·
1 Parent(s): 3dafdd9
Files changed (1) hide show
  1. model.py +75 -1
model.py CHANGED
@@ -631,8 +631,82 @@ class Model:
631
  127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
632
 
633
  results = [x_samples[i] for i in range(num_samples)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
634
 
635
- return [detected_map] + results
636
 
637
  @torch.inference_mode()
638
  def process_depth(self, input_image, prompt, a_prompt, n_prompt,
 
631
  127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
632
 
633
  results = [x_samples[i] for i in range(num_samples)]
634
+
635
+ filename = results['name']
636
+ #def encode(img_array):
637
+ print(f"type of input_image ^^ - {type(input_image)}")
638
+ # Convert NumPy array to image
639
+ img = Image.fromarray(input_image)
640
+ # Save image to file
641
+ img_path = "temp_image.jpeg"
642
+ img.save(img_path)
643
+ # Encode image file using Base64
644
+ with open(img_path, "rb") as image_file:
645
+ encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
646
+ # Print the partial encoded string
647
+ print(encoded_string[:20])
648
+ #return encoded_string
649
+ #def create_imgcomp(input_image, filename):
650
+ #encoded_string = encode(input_image)
651
+ #dummyfun(result_gallery)
652
+ htmltag = '<img src= "data:image/jpeg;base64,' + encoded_string + '" alt="Original Image"/></div> <img src= "https://ysharma-controlnet-image-comparison.hf.space/file=' + filename + '" alt="Control Net Image"/>'
653
+ #https://ysharma-controlnet-image-comparison.hf.space/file=/tmp/tmpg4qx22xy.png - sample
654
+ print(f"htmltag is ^^ - {htmltag}")
655
+ desc = """
656
+ <!DOCTYPE html>
657
+ <html lang="en">
658
+ <head>
659
+ <style>
660
+ body {
661
+ background: rgb(17, 17, 17);
662
+ }
663
+
664
+ .image-slider {
665
+ margin-left: 3rem;
666
+ position: relative;
667
+ display: inline-block;
668
+ line-height: 0;
669
+ }
670
+
671
+ .image-slider img {
672
+ user-select: none;
673
+ max-width: 400px;
674
+ }
675
+
676
+ .image-slider > div {
677
+ position: absolute;
678
+ width: 25px;
679
+ max-width: 100%;
680
+ overflow: hidden;
681
+ resize: horizontal;
682
+ }
683
+
684
+ .image-slider > div:before {
685
+ content: '';
686
+ display: block;
687
+ width: 13px;
688
+ height: 13px;
689
+ overflow: hidden;
690
+ position: absolute;
691
+ resize: horizontal;
692
+ right: 3px;
693
+ bottom: 3px;
694
+ background-clip: content-box;
695
+ background: linear-gradient(-45deg, black 50%, transparent 0);
696
+ -webkit-filter: drop-shadow(0 0 2px black);
697
+ filter: drop-shadow(0 0 2px black);
698
+ }
699
+ </style>
700
+ </head>
701
+ <body>
702
+ <div style="margin: 3rem;
703
+ font-family: Roboto, sans-serif">
704
+ <h1 style="color: green"> Testing image comp</h1>
705
+ </div> <div> <div class="image-slider"> <div> """ + htmltag + "</div> </div> </body> </html> "
706
+ #return desc
707
+
708
 
709
+ return [detected_map] + results, desc
710
 
711
  @torch.inference_mode()
712
  def process_depth(self, input_image, prompt, a_prompt, n_prompt,