Reputation: 43
I have two CNN based on MobileNet Code (is the same for first and second CNN):
img_height, img_width = 224, 224
num_classes = 30
input_shape = (img_height, img_width, 3)
epochs = 1
base_model_fingerprint = MobileNet(weights='imagenet', include_top=False, input_shape=input_shape)
x = base_model_fingerprint.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
output_fingerprint = Dense(num_classes, activation='softmax')(x)
model_fingerprint = Model(inputs=base_model_fingerprint.input, outputs=output_fingerprint)
for layer in model_fingerprint.layers:
layer._name = 'fingerprint_' + layer.name
for layer in base_model_fingerprint.layers:
layer.trainable = False
model_fingerprint.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
train_datagen_fingerprint = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen_fingerprint = ImageDataGenerator(rescale=1. / 255)
train_generator_fingerprint = train_datagen_fingerprint.flow_from_directory(
'C:/Users/giova/Desktop/CNN_FINGER_RESIZE/TRAIN',
target_size=(img_height, img_width),
batch_size=32,
class_mode='categorical')
test_generator_fingerprint = test_datagen_fingerprint.flow_from_directory(
'C:/Users/giova/Desktop/CNN_FINGER_RESIZE/TEST',
target_size=(img_height, img_width),
batch_size=32,
class_mode='categorical')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.0001)
early_stop = EarlyStopping(monitor='val_loss', patience=10)
history = model_fingerprint.fit(train_generator_fingerprint,
validation_data=test_generator_fingerprint,
epochs=epochs,
callbacks=[reduce_lr, early_stop])
test_loss, test_acc = model_fingerprint.evaluate(test_generator_fingerprint, verbose=2)
print('Test accuracy:', test_acc)
How can I create a third CNN for better accuracy?
I have tried to use model.save so I can use .h5 file but it doesn't work the second CNN only changes the directory where it gets the input data from.
Update: I wrote this code but I can't solve the error:
import tensorflow as tf
from tensorflow.keras.models import Model, load_model
# Carica i modelli delle due CNN
model1 = load_model('pesi_fingerprint.h5')
model2 = load_model('pesi_palmprint.h5')
# Rimuove l'ultimo livello di ciascuna CNN
model1.layers.pop()
model2.layers.pop()
# Imposta i layer delle due CNN come non trainabili
for layer in model1.layers:
layer.trainable = False
layer._name = 'model1_' + layer.name
for layer in model2.layers:
layer.trainable = False
layer._name = 'model2_' + layer.name
# Crea il nuovo modello concatenando le feature maps
concatenated = tf.keras.layers.Concatenate()([model1.layers[-1].output, model2.layers[-1].output])
x = tf.keras.layers.Reshape((6, 5, 3))(concatenated) # aumenta le dimensioni
x = tf.keras.layers.Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=(6, 5, 3))(x) # utilizza un kernel di convoluzione 3x3
x = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(x)
x = tf.keras.layers.Conv2D(8, kernel_size=(3,3), activation='relu', padding='same')(x)
x = tf.keras.layers.Conv2D(8, kernel_size=(3,3), activation='relu', padding='same')(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(128, activation='relu')(x)
output = tf.keras.layers.Dense(num_classes, activation='softmax')(x)
# Crea il modello finale
model = Model(inputs=[model1.input, model2.input], outputs=output)
# Compila il modello
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Stampa la struttura del modello
model.summary()
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255, # normalizza i valori dei pixel tra 0 e 1
rotation_range=20, # ruota le immagini in modo casuale
width_shift_range=0.2, # sposta le immagini in orizzontale in modo casuale
height_shift_range=0.2, # sposta le immagini in verticale in modo casuale
shear_range=0.2, # applica la deformazione di taglio alle immagini
zoom_range=0.2, # applica lo zoom alle immagini
horizontal_flip=True, # inverte le immagini in orizzontale in modo casuale
fill_mode='nearest',
target_size = (224,224) # riempie i pixel mancanti con il valore più vicino
)
test_datagen = ImageDataGenerator(rescale=1./255,
target_size=(224, 224))
# specifica il percorso della cartella che contiene i dati di training e di test
train_dir = 'C:\\Users\\giova\\Desktop\\MERGE CNN\\TRAIN'
test_dir = 'C:\\Users\\giova\\Desktop\\MERGE CNN\\TEST'
# imposta il numero di classi
num_classes = 30
# leggi i dati di training dalla cartella specificata
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical'
)
# leggi i dati di test dalla cartella specificata
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical'
)
# Addestramento del modello
# Addestramento del modello
model.fit(train_generator, epochs=10, validation_data=test_generator)
ERROR: ValueError: Exception encountered when calling layer "reshape" (type Reshape).
total size of new array must be unchanged, input_shape = [60], output_shape = [6, 5, 3]
Call arguments received by layer "reshape" (type Reshape): • inputs=tf.Tensor(shape=(None, 60), dtype=float32)
Upvotes: 4
Views: 182
Reputation: 805
The output shape of your concatenated model is [60]
but you're trying to reshape it to [6,5,3]
which is not possible since it changes the total size of the array eg : 6x5x3 != 60, try reshaping it to [6,5,2]
instead.
Upvotes: 1