Reputation: 37
I'm trying to create Python autoencoder image compression but all I'm getting is the mixed up images.
Here's my code:
path1 = 'C:\\Users\\klaud\\Desktop\\images\\'
all_images = []
subjects = os.listdir(path1)
numberOfSubject = len(subjects)
print('Number of Subjects: ', numberOfSubject)
for number1 in range(0, numberOfSubject): # numberOfSubject
path2 = (path1 + subjects[number1] + '/')
sequences = os.listdir(path2)
numberOfsequences = len(sequences)
for number2 in range(0, numberOfsequences):
path3 = path2 + sequences[number2]
img = cv2.imread(path3, 0)
img = img.reshape(512, 512, 1)
all_images.append(img)
x_train = np.array([all_images[0], all_images[1]])
x_test = np.array(all_images[2:])
print("X TRAIN \n")
print(x_train)
print("X TEST \n")
print(x_test)
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
print (x_train.shape)
print (x_test.shape)
latent_dim = 4
class Autoencoder(Model):
def __init__(self, latent_dim):
super(Autoencoder, self).__init__()
self.latent_dim = latent_dim
self.encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(latent_dim, activation='relu'),
])
self.decoder = tf.keras.Sequential([
layers.Dense(262144, activation='sigmoid'),
layers.Reshape((512, 512))
])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
autoencoder = Autoencoder(latent_dim)
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
autoencoder.fit(x_train, x_train,
epochs=10,
shuffle=True,
validation_data=(x_test, x_test))
encoded_imgs = autoencoder.encoder(x_test).numpy()
decoded_imgs = autoencoder.decoder(encoded_imgs).numpy()
n = 6
plt.figure(figsize=(20, 6))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i])
plt.title("original")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i])
plt.title("reconstructed")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
I have no idea whether it's a autodecoder problem or is there a problem with the way matplotlib is showing up plots? I tried to change almost everything and if I'm not getting the errors while compiling the program then it's mixed up images. I'd apprecatie any helpful advice!
Upvotes: 0
Views: 61
Reputation: 346
This looks like you only train on two images:
x_train = np.array([all_images[0], all_images[1]])
In that case, the network can reach a (local) minimum by learning to reproduce the tiny input space - and just spits out a mixture of both images regardless of input.
To fix this, you absolutely need a larger training data set! It does not need to be ImageNet, but maybe something like the Stanford dogs would be a good fit.
Upvotes: 1