-
Notifications
You must be signed in to change notification settings - Fork 0
/
autoencoder_23x.py
116 lines (89 loc) · 3.56 KB
/
autoencoder_23x.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import numpy as np
import h5py
import pickle
import matplotlib.pyplot as plt
from tqdm import tqdm
from PIL import Image
from scipy import misc
from os import listdir
from kevz import DebugCounter
from kevz import email
from keras.layers import Input, Conv3D, Dense, Conv2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras.callbacks import EarlyStopping
y = DebugCounter()
image_dir = "face_images/"
all_images = [image_dir+file for file in listdir(image_dir)]
dir_size = len(all_images)
training = all_images
validation = all_images[:20000]
batch_size = 32
def image_generator(files):
count = 0
while(True):
batch = []
i = 0
while i < batch_size:
try:
img = misc.imread(files[count%len(files)])
img = img.astype("float32")/255
if img.shape == (300, 300):
y.GrayScale += 1
elif img.shape != (300, 300, 3):
y.Other += 1
else:
batch.append(img)
i+=1
except:
y.EXCEPTIONAL_IMAGE += 1
count+=1
yield np.array(batch), np.array(batch)
input_img = Input(shape=(300, 300, 3)) # adapt this if using `channels_first` image data format
x = Conv2D(8, (3, 3), activation='relu', strides=2, padding='same')(input_img)
x = Conv2D(8, (3, 3), activation='relu', strides=2, padding='same')(x)
encoded = Conv2D(8, (3, 3), strides=2, activation='relu', padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding="same")(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding="same")(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x)
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
earlyStopping = EarlyStopping(patience=2)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit_generator(image_generator(training),
steps_per_epoch=int(len(training)/batch_size), callbacks=[earlyStopping],
verbose = 1,
validation_data = image_generator(validation),
validation_steps= int(len(validation)/batch_size),
epochs=16)
autoencoder.save('autoencoder.h5')
#this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
encoder.save("encode.h5")
img_enc_dic = {}
count = 0
for file in tqdm(all_images):
try:
img = misc.imread(file)
img = img.astype("float32")/255
encoded_img = encoder.predict(np.array([img]))
product = encoded_img.shape[1]*encoded_img.shape[2]*encoded_img.shape[3]
encoded_img = encoded_img.reshape(product)
img_enc_dic[file] = encoded_img
except:
count+=1
print(count/len(all_images))
with open("encodings.pkl", "wb") as outfile:
pickle.dump(img_enc_dic, outfile)
email("kevz@mit.edu", "finished encoding images", "Proportion of succesfully encrypted images: "+str(1-count/len(all_images)))
email("pmantica@mit.edu", "finished encoding images","Proportion of succesfully encrypted images: "+str(1-count/len(all_images)))
test_image = misc.imread(all_images[0])
test_image = test_image.astype("float32")/255
plt.imshow(test_image)
plt.show()
res = autoencoder.predict(np.array([test_image]))[0]
plt.imshow(res)
plt.show()