cmckinle commited on
Commit
600aee6
·
verified ·
1 Parent(s): 5ca31bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -221
app.py CHANGED
@@ -1,257 +1,93 @@
1
  import gradio as gr
2
  import torch
3
- from transformers import AutoFeatureExtractor, AutoModelForImageClassification, pipeline
4
- #from transformers import pipeline
5
- import os
6
  from numpy import exp
7
- import pandas as pd
8
  from PIL import Image
9
  import urllib.request
10
  import uuid
11
- uid=uuid.uuid4()
12
 
13
- models=[
 
14
  "cmckinle/sdxl-flux-detector",
15
  "umm-maybe/AI-image-detector",
16
  "Organika/sdxl-detector",
17
- #"arnolfokam/ai-generated-image-detector",
18
  ]
19
-
20
- pipe0 = pipeline("image-classification", f"{models[0]}")
21
- pipe1 = pipeline("image-classification", f"{models[1]}")
22
- pipe2 = pipeline("image-classification", f"{models[2]}")
23
-
24
-
25
- fin_sum=[]
26
- def image_classifier0(image):
27
- labels = ["AI","Real"]
28
- outputs = pipe0(image)
29
- results = {}
30
- result_test={}
31
- for idx,result in enumerate(outputs):
32
- results[labels[idx]] = outputs[idx]['score']
33
- #print (result_test)
34
- #for result in outputs:
35
- # results[result['label']] = result['score']
36
- #print (results)
37
- fin_sum.append(results)
38
- return results
39
- def image_classifier1(image):
40
- labels = ["AI","Real"]
41
- outputs = pipe1(image)
42
- results = {}
43
- result_test={}
44
- for idx,result in enumerate(outputs):
45
- results[labels[idx]] = outputs[idx]['score']
46
- #print (result_test)
47
- #for result in outputs:
48
- # results[result['label']] = result['score']
49
- #print (results)
50
- fin_sum.append(results)
51
- return results
52
- def image_classifier2(image):
53
- labels = ["AI","Real"]
54
- outputs = pipe2(image)
55
- results = {}
56
- result_test={}
57
- for idx,result in enumerate(outputs):
58
- results[labels[idx]] = outputs[idx]['score']
59
- #print (result_test)
60
- #for result in outputs:
61
- # results[result['label']] = result['score']
62
- #print (results)
63
- fin_sum.append(results)
64
- return results
65
 
66
  def softmax(vector):
67
- e = exp(vector)
68
- return e / e.sum()
69
 
70
-
71
-
72
- def aiornot0(image):
73
- labels = ["AI", "Real"]
74
- mod=models[0]
75
- feature_extractor0 = AutoFeatureExtractor.from_pretrained(mod)
76
- model0 = AutoModelForImageClassification.from_pretrained(mod)
77
- input = feature_extractor0(image, return_tensors="pt")
78
- with torch.no_grad():
79
- outputs = model0(**input)
80
- logits = outputs.logits
81
- probability = softmax(logits)
82
- px = pd.DataFrame(probability.numpy())
83
- prediction = logits.argmax(-1).item()
84
- label = labels[prediction]
85
- html_out = f"""
86
- <h1>This image is likely: {label}</h1><br><h3>
87
-
88
- Probabilites:<br>
89
- Real: {px[1][0]}<br>
90
- AI: {px[0][0]}"""
91
- results = {}
92
- for idx,result in enumerate(px):
93
- results[labels[idx]] = px[idx][0]
94
- #results[labels['label']] = result['score']
95
- fin_sum.append(results)
96
- return gr.HTML.update(html_out),results
97
- def aiornot1(image):
98
- labels = ["AI", "Real"]
99
- mod=models[1]
100
- feature_extractor1 = AutoFeatureExtractor.from_pretrained(mod)
101
- model1 = AutoModelForImageClassification.from_pretrained(mod)
102
- input = feature_extractor1(image, return_tensors="pt")
103
- with torch.no_grad():
104
- outputs = model1(**input)
105
- logits = outputs.logits
106
- probability = softmax(logits)
107
- px = pd.DataFrame(probability.numpy())
108
- prediction = logits.argmax(-1).item()
109
- label = labels[prediction]
110
- html_out = f"""
111
- <h1>This image is likely: {label}</h1><br><h3>
112
-
113
- Probabilites:<br>
114
- Real: {px[1][0]}<br>
115
- AI: {px[0][0]}"""
116
- results = {}
117
- for idx,result in enumerate(px):
118
- results[labels[idx]] = px[idx][0]
119
- #results[labels['label']] = result['score']
120
- fin_sum.append(results)
121
- return gr.HTML.update(html_out),results
122
- def aiornot2(image):
123
- labels = ["Real", "AI"]
124
- mod=models[2]
125
- feature_extractor2 = AutoFeatureExtractor.from_pretrained(mod)
126
- #feature_extractor2 = AutoFeatureExtractor.from_pretrained("microsoft/resnet-50")
127
- model2 = AutoModelForImageClassification.from_pretrained(mod)
128
- input = feature_extractor2(image, return_tensors="pt")
129
  with torch.no_grad():
130
- outputs = model2(**input)
131
  logits = outputs.logits
132
  probability = softmax(logits)
133
  px = pd.DataFrame(probability.numpy())
134
- prediction = logits.argmax(-1).item()
135
- label = labels[prediction]
 
 
 
 
 
 
136
  html_out = f"""
137
  <h1>This image is likely: {label}</h1><br><h3>
138
-
139
- Probabilites:<br>
140
- Real: {px[0][0]}<br>
141
- AI: {px[1][0]}"""
142
-
143
- results = {}
144
- for idx,result in enumerate(px):
145
- results[labels[idx]] = px[idx][0]
146
- #results[labels['label']] = result['score']
147
- fin_sum.append(results)
148
-
149
- return gr.HTML.update(html_out),results
150
 
151
  def load_url(url):
152
  try:
153
- urllib.request.urlretrieve(
154
- f'{url}',
155
- f"{uid}tmp_im.png")
156
  image = Image.open(f"{uid}tmp_im.png")
157
  mes = "Image Loaded"
158
  except Exception as e:
159
- image=None
160
- mes=f"Image not Found<br>Error: {e}"
161
- return image,mes
162
-
163
- def tot_prob():
164
- try:
165
- fin_out = fin_sum[0]["Real"]+fin_sum[1]["Real"]+fin_sum[2]["Real"]+fin_sum[3]["Real"]+fin_sum[4]["Real"]+fin_sum[5]["Real"]
166
- fin_out = fin_out/6
167
- fin_sub = 1-fin_out
168
- out={
169
- "Real":f"{fin_out}",
170
- "AI":f"{fin_sub}"
171
- }
172
- #fin_sum.clear()
173
- #print (fin_out)
174
- return out
175
- except Exception as e:
176
- pass
177
- print (e)
178
- return None
179
- def fin_clear():
180
- fin_sum.clear()
181
- return None
182
-
183
- def upd(image):
184
- print (image)
185
- rand_im = uuid.uuid4()
186
- image.save(f"{rand_im}-vid_tmp_proc.png")
187
- out = Image.open(f"{rand_im}-vid_tmp_proc.png")
188
 
189
- #image.save(f"{rand_im}-vid_tmp_proc.png")
190
- #out = os.path.abspath(f"{rand_im}-vid_tmp_proc.png")
191
- #out_url = f'https://omnibus_AI_or_Not_dev.hf.space/file={out}'
192
- #out_url = f"{rand_im}-vid_tmp_proc.png"
193
- return out
 
 
 
194
 
195
-
196
  with gr.Blocks() as app:
197
- gr.Markdown("""<center><h1>AI Image Detector<br><h4>(Test Demo - accuracy varies by model)""")
198
  with gr.Column():
199
  inp = gr.Image(type='pil')
200
- in_url=gr.Textbox(label="Image URL")
201
  with gr.Row():
202
- load_btn=gr.Button("Load URL")
203
  btn = gr.Button("Detect AI")
204
- mes = gr.HTML("""""")
205
- with gr.Group():
206
- with gr.Row():
207
- fin=gr.Label(label="Final Probability")
208
  with gr.Row():
209
- with gr.Box():
210
- lab0 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[0]}'>{models[0]}</a></b>""")
211
- nun0 = gr.HTML("""""")
212
- with gr.Box():
213
- lab1 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[1]}'>{models[1]}</a></b>""")
214
- nun1 = gr.HTML("""""")
215
- with gr.Box():
216
- lab2 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[2]}'>{models[2]}</a></b>""")
217
- nun2 = gr.HTML("""""")
218
-
219
  with gr.Row():
220
- with gr.Box():
221
- n_out0=gr.Label(label="Output")
222
- outp0 = gr.HTML("""""")
223
- with gr.Box():
224
- n_out1=gr.Label(label="Output")
225
- outp1 = gr.HTML("""""")
226
- with gr.Box():
227
- n_out2=gr.Label(label="Output")
228
- outp2 = gr.HTML("""""")
229
- with gr.Row():
230
- with gr.Box():
231
- n_out3=gr.Label(label="Output")
232
- outp3 = gr.HTML("""""")
233
- with gr.Box():
234
- n_out4=gr.Label(label="Output")
235
- outp4 = gr.HTML("""""")
236
- with gr.Box():
237
- n_out5=gr.Label(label="Output")
238
- outp5 = gr.HTML("""""")
239
- hid_box=gr.Textbox(visible=False)
240
- hid_im = gr.Image(type="pil",visible=False)
241
- def echo(inp):
242
- return inp
243
-
244
- #inp.change(echo,inp,hid_im).then(upd,hid_im,inp)
245
-
246
- btn.click(fin_clear,None,fin,show_progress=False)
247
- load_btn.click(load_url,in_url,[inp,mes])
248
-
249
- btn.click(aiornot0,[inp],[outp0,n_out0]).then(tot_prob,None,fin,show_progress=False)
250
- btn.click(aiornot1,[inp],[outp1,n_out1]).then(tot_prob,None,fin,show_progress=False)
251
- btn.click(aiornot2,[inp],[outp2,n_out2]).then(tot_prob,None,fin,show_progress=False)
252
-
253
- btn.click(image_classifier0,[inp],[n_out3]).then(tot_prob,None,fin,show_progress=False)
254
- btn.click(image_classifier1,[inp],[n_out4]).then(tot_prob,None,fin,show_progress=False)
255
- btn.click(image_classifier2,[inp],[n_out5]).then(tot_prob,None,fin,show_progress=False)
256
 
257
- app.launch(show_api=False,max_threads=24)
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import AutoFeatureExtractor, AutoModelForImageClassification
 
 
4
  from numpy import exp
5
+ import pandas as pd
6
  from PIL import Image
7
  import urllib.request
8
  import uuid
 
9
 
10
+ uid = uuid.uuid4()
11
+ models = [
12
  "cmckinle/sdxl-flux-detector",
13
  "umm-maybe/AI-image-detector",
14
  "Organika/sdxl-detector",
 
15
  ]
16
+ results_store = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  def softmax(vector):
19
+ e = exp(vector)
20
+ return e / e.sum()
21
 
22
+ def aiornot(image, model_index):
23
+ mod = models[model_index]
24
+ feature_extractor = AutoFeatureExtractor.from_pretrained(mod)
25
+ model = AutoModelForImageClassification.from_pretrained(mod)
26
+ input = feature_extractor(image, return_tensors="pt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  with torch.no_grad():
28
+ outputs = model(**input)
29
  logits = outputs.logits
30
  probability = softmax(logits)
31
  px = pd.DataFrame(probability.numpy())
32
+
33
+ if model_index == 2: # Organika model
34
+ real_prob, ai_prob = px[0][0], px[1][0]
35
+ label = "Real" if real_prob > ai_prob else "AI"
36
+ else:
37
+ ai_prob, real_prob = px[0][0], px[1][0]
38
+ label = "AI" if ai_prob > real_prob else "Real"
39
+
40
  html_out = f"""
41
  <h1>This image is likely: {label}</h1><br><h3>
42
+ Probabilities:<br>
43
+ Real: {real_prob:.4f}<br>
44
+ AI: {ai_prob:.4f}"""
45
+
46
+ results = {"Real": real_prob, "AI": ai_prob}
47
+ results_store.append(results)
48
+ return gr.HTML.update(html_out), results
 
 
 
 
 
49
 
50
  def load_url(url):
51
  try:
52
+ urllib.request.urlretrieve(f'{url}', f"{uid}tmp_im.png")
 
 
53
  image = Image.open(f"{uid}tmp_im.png")
54
  mes = "Image Loaded"
55
  except Exception as e:
56
+ image = None
57
+ mes = f"Image not Found<br>Error: {e}"
58
+ return image, mes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
+ def calculate_final_prob():
61
+ if not results_store:
62
+ return {"Real": "N/A", "AI": "N/A"}
63
+ fin_out = sum(result["Real"] for result in results_store) / len(results_store)
64
+ return {
65
+ "Real": f"{fin_out:.4f}",
66
+ "AI": f"{1 - fin_out:.4f}"
67
+ }
68
 
 
69
  with gr.Blocks() as app:
70
+ gr.Markdown("""<center><h1>AI Image Detector<br><h4>(Test Demo - accuracy varies by model)</h4></center>""")
71
  with gr.Column():
72
  inp = gr.Image(type='pil')
73
+ in_url = gr.Textbox(label="Image URL")
74
  with gr.Row():
75
+ load_btn = gr.Button("Load URL")
76
  btn = gr.Button("Detect AI")
77
+ mes = gr.HTML()
78
+ with gr.Group():
 
 
79
  with gr.Row():
80
+ fin = gr.Label(label="Final Probability")
 
 
 
 
 
 
 
 
 
81
  with gr.Row():
82
+ for i, model in enumerate(models):
83
+ with gr.Column():
84
+ gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{model}'>{model}</a></b>""")
85
+ output_html = gr.HTML()
86
+ output_label = gr.Label(label="Output")
87
+ btn.click(aiornot, inputs=[inp, gr.Number(value=i, visible=False)], outputs=[output_html, output_label])
88
+ if i == len(models) - 1: # After the last model, calculate final probability
89
+ btn.click(lambda: results_store.clear(), outputs=None) # Clear results before new detection
90
+ btn.click(calculate_final_prob, outputs=fin)
91
+ load_btn.click(load_url, in_url, [inp, mes])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
+ app.launch(show_api=False, max_threads=24)