Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -56,7 +56,7 @@ def download_image(url):
|
|
56 |
#device = "cpu" #"cuda" if torch.cuda.is_available() else "cpu"
|
57 |
|
58 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
59 |
-
print("The model will be running on :: ", device, "device")
|
60 |
# Convert model parameters and buffers to CPU or Cuda
|
61 |
|
62 |
model_id_or_path = "CompVis/stable-diffusion-v1-4"
|
@@ -65,12 +65,11 @@ pipe = StableDiffusionInpaintingPipeline.from_pretrained(
|
|
65 |
revision="fp16",
|
66 |
torch_dtype=torch.double, #float16
|
67 |
use_auth_token=auth_token
|
68 |
-
)
|
|
|
69 |
#self.register_buffer('n_', ...)
|
70 |
print ("torch.backends.mps.is_available: ", torch.backends.mps.is_available())
|
71 |
|
72 |
-
pipe = pipe.to(device)
|
73 |
-
|
74 |
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, complex_trans_conv=True)
|
75 |
|
76 |
model = model.to(torch.device(device))
|
@@ -79,8 +78,8 @@ model.eval() #.half()
|
|
79 |
|
80 |
weightsPATH = './clipseg/weights/rd64-uni.pth'
|
81 |
|
82 |
-
state = {'model': model.state_dict()}
|
83 |
-
torch.save(state, weightsPATH)
|
84 |
|
85 |
model.load_state_dict(torch.load(weightsPATH, map_location=torch.device(device)), strict=False) #False
|
86 |
#model.load_state_dict(torch.load(weightsPATH)['model'])
|
@@ -89,8 +88,7 @@ print ("Torch load(model) : ", model)
|
|
89 |
print ("Weights : ")
|
90 |
# print weights
|
91 |
for k, v in model.named_parameters():
|
92 |
-
print(k, v)
|
93 |
-
|
94 |
|
95 |
imgRes = 256
|
96 |
|
|
|
56 |
#device = "cpu" #"cuda" if torch.cuda.is_available() else "cpu"
|
57 |
|
58 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
59 |
+
print("The model will be running on :: ", device, " ~device")
|
60 |
# Convert model parameters and buffers to CPU or Cuda
|
61 |
|
62 |
model_id_or_path = "CompVis/stable-diffusion-v1-4"
|
|
|
65 |
revision="fp16",
|
66 |
torch_dtype=torch.double, #float16
|
67 |
use_auth_token=auth_token
|
68 |
+
).to(device)
|
69 |
+
#pipe = pipe.to(device)
|
70 |
#self.register_buffer('n_', ...)
|
71 |
print ("torch.backends.mps.is_available: ", torch.backends.mps.is_available())
|
72 |
|
|
|
|
|
73 |
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, complex_trans_conv=True)
|
74 |
|
75 |
model = model.to(torch.device(device))
|
|
|
78 |
|
79 |
weightsPATH = './clipseg/weights/rd64-uni.pth'
|
80 |
|
81 |
+
#state = {'model': model.state_dict()}
|
82 |
+
#torch.save(state, weightsPATH)
|
83 |
|
84 |
model.load_state_dict(torch.load(weightsPATH, map_location=torch.device(device)), strict=False) #False
|
85 |
#model.load_state_dict(torch.load(weightsPATH)['model'])
|
|
|
88 |
print ("Weights : ")
|
89 |
# print weights
|
90 |
for k, v in model.named_parameters():
|
91 |
+
print(k, v)
|
|
|
92 |
|
93 |
imgRes = 256
|
94 |
|