Spaces:
Running
on
A10G
Running
on
A10G
Commit
•
ed764b4
1
Parent(s):
557a2dd
Update app.py
Browse files
app.py
CHANGED
@@ -27,12 +27,7 @@ def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, tok
|
|
27 |
|
28 |
# cast to dtype of text_encoder
|
29 |
dtype = text_encoder.get_input_embeddings().weight.dtype
|
30 |
-
|
31 |
-
try:
|
32 |
-
embeds.to(dtype)
|
33 |
-
except:
|
34 |
-
continue
|
35 |
-
|
36 |
# add the token in tokenizer
|
37 |
token = token if token is not None else trained_token
|
38 |
num_added_tokens = tokenizer.add_tokens(token)
|
@@ -83,8 +78,11 @@ for model in models_list:
|
|
83 |
file.close()
|
84 |
images.append(f"{model_id}/{i}.jpeg")
|
85 |
model_content["images"] = images
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
88 |
model_content["token"] = learned_token
|
89 |
models.append(model_content)
|
90 |
|
|
|
27 |
|
28 |
# cast to dtype of text_encoder
|
29 |
dtype = text_encoder.get_input_embeddings().weight.dtype
|
30 |
+
|
|
|
|
|
|
|
|
|
|
|
31 |
# add the token in tokenizer
|
32 |
token = token if token is not None else trained_token
|
33 |
num_added_tokens = tokenizer.add_tokens(token)
|
|
|
78 |
file.close()
|
79 |
images.append(f"{model_id}/{i}.jpeg")
|
80 |
model_content["images"] = images
|
81 |
+
#if token cannot be loaded, skip it
|
82 |
+
try:
|
83 |
+
learned_token = load_learned_embed_in_clip(f"{model_id}/learned_embeds.bin", pipe.text_encoder, pipe.tokenizer, token_name)
|
84 |
+
except:
|
85 |
+
continue
|
86 |
model_content["token"] = learned_token
|
87 |
models.append(model_content)
|
88 |
|