Spaces:
Runtime error
Runtime error
File size: 4,256 Bytes
95a02fd 52f9197 be66f33 52f9197 be66f33 d71875f be66f33 95a02fd be66f33 95a02fd be66f33 52f9197 95a02fd bf573cf 52f9197 cd21582 52f9197 7387897 52f9197 cd21582 7387897 be66f33 7387897 52f9197 be66f33 7387897 be66f33 cd21582 7387897 52f9197 7387897 cd21582 52f9197 cd21582 d71875f bf573cf d71875f cd21582 d71875f cd21582 d71875f cd21582 7387897 cd21582 95a02fd bf573cf 95a02fd d71875f bf573cf d71875f cd21582 d71875f 95a02fd d71875f cd21582 bf573cf cd21582 d71875f 7387897 bf573cf d71875f bf573cf be66f33 cd21582 95a02fd be66f33 52f9197 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
import json
from functools import cache
from pickle import load
import gradio as gr
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image as Image
import torch
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
from msma import ScoreFlow, build_model_from_pickle, config_presets
@cache
def load_model(modeldir, preset="edm2-img64-s-fid", device="cpu"):
model = ScoreFlow(preset, num_flows=8, device=device)
model.flow.load_state_dict(torch.load(f"{modeldir}/nb8/{preset}/flow.pt"))
return model
@cache
def load_model_from_hub(preset, device):
scorenet = build_model_from_pickle(preset)
hf_config = hf_hub_download(
repo_id="ahsanMah/localizing-edm",
subfolder=preset,
filename="config.json",
cache_dir="/tmp/",
)
with open(hf_config, "rb") as f:
model_params = json.load(f)
print("Loaded:", model_params)
hf_checkpoint = hf_hub_download(
repo_id="ahsanMah/localizing-edm",
subfolder=preset,
filename="model.safetensors",
cache_dir="/tmp/",
)
model = ScoreFlow(scorenet, device=device, **model_params['PatchFlow'])
model.load_state_dict(load_file(hf_checkpoint), strict=True)
return model
@cache
def load_reference_scores(model_dir):
with np.load(f"{model_dir}/refscores.npz", "rb") as f:
ref_nll = f["arr_0"]
return ref_nll
def compute_gmm_likelihood(x_score, model_dir):
with open(f"{model_dir}/gmm.pkl", "rb") as f:
clf = load(f)
nll = -clf.score(x_score)
ref_nll = load_reference_scores(model_dir)
percentile = (ref_nll < nll).mean() * 100
return nll, percentile, ref_nll
def plot_against_reference(nll, ref_nll):
fig, ax = plt.subplots()
ax.hist(ref_nll, label="Reference Scores")
ax.axvline(nll, label="Image Score", c="red", ls="--")
plt.legend()
fig.tight_layout()
return fig
def plot_heatmap(img: Image, heatmap: np.array):
fig, ax = plt.subplots()
cmap = plt.get_cmap("gist_heat")
h = -heatmap[0, 0].copy()
qmin, qmax = np.quantile(h, 0.8), np.quantile(h, 0.999)
h = np.clip(h, a_min=qmin, a_max=qmax)
h = (h - h.min()) / (h.max() - h.min())
h = cmap(h, bytes=True)[:, :, :3]
h = Image.fromarray(h).resize(img.size, resample=Image.Resampling.BILINEAR)
im = Image.blend(img, h, alpha=0.6)
# im = ax.imshow(np.array(im))
# # fig.colorbar(im)
# # plt.grid(False)
# # plt.axis("off")
# fig.tight_layout()
return im
def run_inference(input_img, preset="edm2-img64-s-fid"):
device = "cuda" if torch.cuda.is_available() else "cpu"
# img = center_crop_imagenet(64, img)
input_img = input_img.resize(size=(64, 64), resample=Image.Resampling.LANCZOS)
with torch.inference_mode():
img = np.array(input_img)
img = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0)
img = img.float().to(device)
# model = load_model(modeldir="models", preset=preset, device=device)
model = load_model_from_hub(preset=preset, device=device)
img_likelihood = model(img).cpu().numpy()
# img_likelihood = model.scorenet(img).square().sum(1).sum(1).contiguous().float().cpu().unsqueeze(1).numpy()
# print(img_likelihood.shape, img_likelihood.dtype)
img = torch.nn.functional.interpolate(img, size=64, mode="bilinear")
x = model.scorenet(img)
x = x.square().sum(dim=(2, 3, 4)) ** 0.5
nll, pct, ref_nll = compute_gmm_likelihood(
x.cpu(), model_dir=f"models/{preset}"
)
outstr = f"Anomaly score: {nll:.3f} / {pct:.2f} percentile"
histplot = plot_against_reference(nll, ref_nll)
heatmapplot = plot_heatmap(input_img, img_likelihood)
return outstr, heatmapplot, histplot
demo = gr.Interface(
fn=run_inference,
inputs=[
gr.Image(type="pil", label="Input Image"),
gr.Dropdown(choices=config_presets.keys(), label="Score Model"),
],
outputs=[
"text",
gr.Image(label="Anomaly Heatmap", min_width=64),
gr.Plot(label="Comparing to Imagenette"),
],
examples=[["goldfish.JPEG", "edm2-img64-s-fid"]],
)
if __name__ == "__main__":
demo.launch()
|