Commit
·
a44aa14
1
Parent(s):
3d2a04d
first upload
Browse files- ELA_CNN_ART_V2.h5 +3 -0
- cnn_ela_test.py +175 -0
- datasets/test_set/none.txt +0 -0
- project_cnn_ela.py +199 -0
ELA_CNN_ART_V2.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9b681289e0f151aeb2f52c56dda38c8e1ea89a22ff11e1066c63ad2b3e68fd2
|
3 |
+
size 236205528
|
cnn_ela_test.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
Created on Fri May 24 14:31:20 2024
|
4 |
+
|
5 |
+
@author: beni
|
6 |
+
"""
|
7 |
+
|
8 |
+
###test on art 30 epoches
|
9 |
+
###Test Loss: 0.7387489676475525
|
10 |
+
#Test Accuracy: 0.8525179624557495 ELA_CNN_ART.h5
|
11 |
+
####
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
#####test on objects
|
16 |
+
###ELA_CNN_OBJ.h5
|
17 |
+
#Test Loss: 1.260271430015564
|
18 |
+
#Test Accuracy: 0.5509259104728699
|
19 |
+
|
20 |
+
|
21 |
+
from keras.models import Sequential
|
22 |
+
from keras.layers import Conv2D, MaxPool2D, Dropout, Flatten, Dense
|
23 |
+
from project_cnn_ela import convert_to_ela_image, shuffle_and_split_data, labeling
|
24 |
+
import pandas as pd
|
25 |
+
import numpy as np
|
26 |
+
from PIL import Image
|
27 |
+
import os
|
28 |
+
from pylab import *
|
29 |
+
import re
|
30 |
+
from PIL import Image, ImageChops, ImageEnhance
|
31 |
+
import tensorflow as tf
|
32 |
+
import itertools
|
33 |
+
from tensorflow.keras.utils import to_categorical
|
34 |
+
from tensorflow.keras.optimizers.legacy import RMSprop
|
35 |
+
from sklearn.metrics import confusion_matrix
|
36 |
+
import seaborn as sns
|
37 |
+
import matplotlib.pyplot as plt
|
38 |
+
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
|
39 |
+
from copy import deepcopy
|
40 |
+
|
41 |
+
model = Sequential()
|
42 |
+
|
43 |
+
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'valid',
|
44 |
+
activation ='relu', input_shape = (128,128,3)))
|
45 |
+
print("Input: ", model.input_shape)
|
46 |
+
print("Output: ", model.output_shape)
|
47 |
+
|
48 |
+
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'valid',
|
49 |
+
activation ='relu'))
|
50 |
+
print("Input: ", model.input_shape)
|
51 |
+
print("Output: ", model.output_shape)
|
52 |
+
|
53 |
+
model.add(MaxPool2D(pool_size=(2,2)))
|
54 |
+
|
55 |
+
model.add(Dropout(0.25))
|
56 |
+
print("Input: ", model.input_shape)
|
57 |
+
print("Output: ", model.output_shape)
|
58 |
+
|
59 |
+
model.add(Flatten())
|
60 |
+
model.add(Dense(256, activation = "relu"))
|
61 |
+
model.add(Dropout(0.5))
|
62 |
+
model.add(Dense(2, activation = "softmax"))
|
63 |
+
|
64 |
+
model.summary()
|
65 |
+
|
66 |
+
|
67 |
+
# Load saved weights
|
68 |
+
model.load_weights("ELA_CNN_ART_V2.h5")
|
69 |
+
|
70 |
+
optimizer = RMSprop(lr=0.0005, rho=0.9, epsilon=1e-08, decay=0.0)
|
71 |
+
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
test_real_folder = 'datasets/test_set/real/'
|
77 |
+
test_fake_folder = 'datasets/test_set/fake/'
|
78 |
+
|
79 |
+
|
80 |
+
|
81 |
+
test_real_ela_folder = 'datasets/test_set/ela_real/'
|
82 |
+
test_fake_ela_folder = 'datasets/test_set/ela_fake/'
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
test_set = labeling(test_real_folder, test_fake_folder)
|
87 |
+
X_test = []
|
88 |
+
Y_test = []
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
# Preprocess test set
|
93 |
+
for index, row in test_set.iterrows():
|
94 |
+
X_test.append(array(convert_to_ela_image(row[0], 90, test_real_ela_folder).resize((128, 128))).flatten() / 255.0)
|
95 |
+
Y_test.append(row[1])
|
96 |
+
|
97 |
+
# Convert to numpy arrays
|
98 |
+
X_test = np.array(X_test)
|
99 |
+
Y_test = to_categorical(Y_test, 2)
|
100 |
+
|
101 |
+
# Reshape images
|
102 |
+
X_test = X_test.reshape(-1, 128, 128, 3)
|
103 |
+
|
104 |
+
# Evaluate the model on test set
|
105 |
+
test_loss, test_accuracy = model.evaluate(X_test, Y_test)
|
106 |
+
print()
|
107 |
+
print("~~~~~art Dataset~~~~")
|
108 |
+
print()
|
109 |
+
print("Test Loss:", test_loss)
|
110 |
+
print("Test Accuracy:", test_accuracy)
|
111 |
+
|
112 |
+
#######################################################
|
113 |
+
|
114 |
+
def calculate_acc(y_true, y_pred):
|
115 |
+
|
116 |
+
|
117 |
+
# Calculate precision
|
118 |
+
precision = precision_score(y_true, y_pred)
|
119 |
+
|
120 |
+
# Calculate recall
|
121 |
+
recall = recall_score(y_true, y_pred)
|
122 |
+
|
123 |
+
# Calculate F1 score
|
124 |
+
f1 = f1_score(y_true, y_pred)
|
125 |
+
|
126 |
+
# Calculate confusion matrix
|
127 |
+
conf_matrix = confusion_matrix(y_true, y_pred)
|
128 |
+
|
129 |
+
|
130 |
+
print("Precision:", precision)
|
131 |
+
print("Recall:", recall)
|
132 |
+
print("F1 Score:", f1)
|
133 |
+
print("Confusion Matrix:")
|
134 |
+
|
135 |
+
# Plot confusion matrix
|
136 |
+
plt.figure(figsize=(8, 6))
|
137 |
+
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', cbar=False)
|
138 |
+
plt.xlabel('Predicted Label')
|
139 |
+
plt.ylabel('True Label')
|
140 |
+
plt.title('Confusion Matrix')
|
141 |
+
plt.show()
|
142 |
+
|
143 |
+
|
144 |
+
|
145 |
+
# Get predicted probabilities
|
146 |
+
Y_pred_prob = model.predict(X_test)
|
147 |
+
|
148 |
+
# Convert predicted probabilities to class labels
|
149 |
+
Y_pred = np.argmax(Y_pred_prob, axis=1)
|
150 |
+
|
151 |
+
Y_true = np.argmax(Y_test, axis=1)
|
152 |
+
|
153 |
+
# Calculate accuracies
|
154 |
+
calculate_acc(Y_true, Y_pred)
|
155 |
+
|
156 |
+
model.summary()
|
157 |
+
|
158 |
+
|
159 |
+
|
160 |
+
|
161 |
+
|
162 |
+
|
163 |
+
|
164 |
+
|
165 |
+
|
166 |
+
|
167 |
+
|
168 |
+
|
169 |
+
|
170 |
+
|
171 |
+
|
172 |
+
|
173 |
+
|
174 |
+
|
175 |
+
|
datasets/test_set/none.txt
ADDED
File without changes
|
project_cnn_ela.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
Created on Mon Apr 29 17:46:18 2024
|
4 |
+
|
5 |
+
@author: beni
|
6 |
+
"""
|
7 |
+
|
8 |
+
import pandas as pd
|
9 |
+
import numpy as np
|
10 |
+
import matplotlib.pyplot as plt
|
11 |
+
import matplotlib.image as mpimg
|
12 |
+
from PIL import Image
|
13 |
+
import os
|
14 |
+
from pylab import *
|
15 |
+
import re
|
16 |
+
from PIL import Image, ImageChops, ImageEnhance
|
17 |
+
import tensorflow as tf
|
18 |
+
from sklearn.model_selection import train_test_split
|
19 |
+
from sklearn.metrics import confusion_matrix
|
20 |
+
import itertools
|
21 |
+
from tensorflow.keras.utils import to_categorical # convert to one-hot-encoding
|
22 |
+
from keras.models import Sequential
|
23 |
+
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
|
24 |
+
from tensorflow.keras.optimizers.legacy import RMSprop
|
25 |
+
from keras.preprocessing.image import ImageDataGenerator
|
26 |
+
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
|
27 |
+
from scipy.ndimage import gaussian_filter
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
def convert_to_ela_image(path, quality, output_dir, resize=(256, 256)):
|
34 |
+
filename = path
|
35 |
+
resaved_filename = os.path.join(output_dir, os.path.splitext(os.path.basename(filename))[0] + '.resaved.jpg')
|
36 |
+
ELA_filename = os.path.join(output_dir, os.path.splitext(os.path.basename(filename))[0] + '.ela.png')
|
37 |
+
|
38 |
+
# Open and resize the image
|
39 |
+
im = Image.open(filename).convert('RGB')
|
40 |
+
im_resized = im.resize(resize)
|
41 |
+
|
42 |
+
# Save the resized image
|
43 |
+
im_resized.save(resaved_filename, 'JPEG', quality=quality)
|
44 |
+
resaved_im = Image.open(resaved_filename)
|
45 |
+
|
46 |
+
ela_im = ImageChops.difference(im_resized, resaved_im)
|
47 |
+
|
48 |
+
extrema = ela_im.getextrema()
|
49 |
+
max_diff = max([ex[1] for ex in extrema])
|
50 |
+
if max_diff == 0:
|
51 |
+
max_diff = 1
|
52 |
+
scale = 255.0 / max_diff
|
53 |
+
|
54 |
+
ela_im = ImageEnhance.Brightness(ela_im).enhance(scale)
|
55 |
+
|
56 |
+
ela_im.save(ELA_filename)
|
57 |
+
|
58 |
+
return ela_im
|
59 |
+
|
60 |
+
def shuffle_and_split_data(dataframe, test_size=0.2, random_state=59):
|
61 |
+
# Shuffle the DataFrame
|
62 |
+
shuffled_df = dataframe.sample(frac=1, random_state=random_state).reset_index(drop=True)
|
63 |
+
|
64 |
+
# Split the DataFrame into train and validation sets
|
65 |
+
train_df, val_df = train_test_split(shuffled_df, test_size=test_size, random_state=random_state)
|
66 |
+
|
67 |
+
return train_df, val_df
|
68 |
+
|
69 |
+
def labeling(path_real, path_fake):
|
70 |
+
image_paths = []
|
71 |
+
labels = []
|
72 |
+
|
73 |
+
for filename in os.listdir(path_real):
|
74 |
+
image_paths.append(os.path.join(path_real, filename))
|
75 |
+
labels.append(0)
|
76 |
+
|
77 |
+
for filename in os.listdir(path_fake):
|
78 |
+
image_paths.append(os.path.join(path_fake, filename))
|
79 |
+
labels.append(1)
|
80 |
+
|
81 |
+
dataset = pd.DataFrame({'image_path': image_paths, 'label': labels})
|
82 |
+
|
83 |
+
return dataset
|
84 |
+
|
85 |
+
|
86 |
+
if __name__ == "__main__":
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
np.random.seed(22)
|
92 |
+
tf.random.set_seed(9)
|
93 |
+
|
94 |
+
traning_fake_folder = 'datasets/training_set/fake/'
|
95 |
+
traning_real_folder = 'datasets/training_set/real/'
|
96 |
+
|
97 |
+
|
98 |
+
|
99 |
+
test_real_folder = 'datasets/test_set/real/'
|
100 |
+
test_fake_folder = 'datasets/test_set/fake/'
|
101 |
+
|
102 |
+
|
103 |
+
traning_fake_ela_folder = 'datasets/training_set/ela_fake/'
|
104 |
+
traning_real_ela_folder = 'datasets/training_set/ela_real/'
|
105 |
+
|
106 |
+
test_real_ela_folder = 'datasets/test_set/ela_real/'
|
107 |
+
test_fake_ela_folder = 'datasets/test_set/ela_fake/'
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
traning_set = labeling(traning_real_folder, traning_fake_folder)
|
112 |
+
|
113 |
+
|
114 |
+
X = []
|
115 |
+
Y = []
|
116 |
+
|
117 |
+
for index, row in traning_set.iterrows():
|
118 |
+
X.append(array(convert_to_ela_image(row[0], 90,traning_real_ela_folder).resize((128, 128))).flatten() / 255.0)
|
119 |
+
Y.append(row[1])
|
120 |
+
|
121 |
+
|
122 |
+
X = np.array(X)
|
123 |
+
Y = to_categorical(Y, 2)
|
124 |
+
|
125 |
+
X = X.reshape(-1, 128, 128, 3)
|
126 |
+
|
127 |
+
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size = 0.2, random_state=1,shuffle=True)
|
128 |
+
|
129 |
+
model = Sequential()
|
130 |
+
|
131 |
+
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'valid',
|
132 |
+
activation ='relu', input_shape = (128,128,3)))
|
133 |
+
print("Input: ", model.input_shape)
|
134 |
+
print("Output: ", model.output_shape)
|
135 |
+
|
136 |
+
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'valid',
|
137 |
+
activation ='relu'))
|
138 |
+
print("Input: ", model.input_shape)
|
139 |
+
print("Output: ", model.output_shape)
|
140 |
+
|
141 |
+
model.add(MaxPool2D(pool_size=(2,2)))
|
142 |
+
|
143 |
+
model.add(Dropout(0.25))
|
144 |
+
print("Input: ", model.input_shape)
|
145 |
+
print("Output: ", model.output_shape)
|
146 |
+
|
147 |
+
model.add(Flatten())
|
148 |
+
model.add(Dense(256, activation = "relu"))
|
149 |
+
model.add(Dropout(0.5))
|
150 |
+
model.add(Dense(2, activation = "softmax"))
|
151 |
+
|
152 |
+
model.summary()
|
153 |
+
|
154 |
+
optimizer = RMSprop(lr=0.0005, rho=0.9, epsilon=1e-08, decay=0.0)
|
155 |
+
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
|
156 |
+
|
157 |
+
early_stopping = EarlyStopping(monitor='val_acc',
|
158 |
+
min_delta=0,
|
159 |
+
patience=2,
|
160 |
+
verbose=0, mode='auto')
|
161 |
+
|
162 |
+
|
163 |
+
epochs = 22
|
164 |
+
batch_size = 100
|
165 |
+
|
166 |
+
|
167 |
+
history = model.fit(X_train, Y_train, batch_size = batch_size, epochs = epochs,
|
168 |
+
validation_data = (X_val, Y_val), verbose = 2, callbacks=[early_stopping])
|
169 |
+
|
170 |
+
plt.plot(history.history['accuracy'])
|
171 |
+
plt.plot(history.history['val_accuracy'])
|
172 |
+
plt.title('Model accuracy')
|
173 |
+
plt.xlabel('Epoch')
|
174 |
+
plt.ylabel('Accuracy')
|
175 |
+
plt.legend(['Train', 'Validation'], loc='upper left')
|
176 |
+
plt.show()
|
177 |
+
|
178 |
+
# Plot training & validation loss values
|
179 |
+
plt.plot(history.history['loss'])
|
180 |
+
plt.plot(history.history['val_loss'])
|
181 |
+
plt.title('Model loss')
|
182 |
+
plt.xlabel('Epoch')
|
183 |
+
plt.ylabel('Loss')
|
184 |
+
plt.legend(['Train', 'Validation'], loc='upper left')
|
185 |
+
plt.show()
|
186 |
+
|
187 |
+
# every training can give different results , we got the best training score so no need to run again
|
188 |
+
# model.save('ELA_CNN_ART_V2.h5')
|
189 |
+
|
190 |
+
|
191 |
+
|
192 |
+
|
193 |
+
|
194 |
+
|
195 |
+
|
196 |
+
|
197 |
+
|
198 |
+
|
199 |
+
|