Reputation: 11
I am working on model to train images using tensorflow and inception resnet v2 architecture and can't train this model, I have tried to train it but everytime I get
AttributeError: module 'tensorflow.compat.v1' has no attribute 'fit'
import tensorflow.compat.v1 as tf
import inception_resnet_v2 as incep_v2
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import selectivesearch
import matplotlib.patches as mpatches
import pandas as pd
import random
tf.disable_eager_execution()
# ------------------------------------------------------------------------------
# Configurations
# ------------------------------------------------------------------------------
IMG_SIZE = 150
TRAIN_DIR = "./dataset/train_images"
TEST_DIR = "./dataset/test_images"
data = pd.read_csv("./dataset/train.csv")
data = data.iloc[0:100, :]
# ------------------------------------------------------------------------------
# Read Train Image
# ------------------------------------------------------------------------------
def create_train_data():
train_data = []
for ind in data.index:
path = os.path.join(TRAIN_DIR, data["image_name"][ind])
img_data = cv2.imread(path)
img_data = cv2.resize(img_data, (IMG_SIZE, IMG_SIZE))
train_data.append([np.array(img_data), data["label"][ind]])
# fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
# ax.imshow(img_data)
# plt.show()
random.shuffle(train_data)
np.save('train_data.npy', train_data)
return train_data
def create_test_data():
test_data = []
for img in os.listdir(TEST_DIR):
path = os.path.join(TEST_DIR, img)
img_data = cv2.imread(path)
img_data = cv2.resize(img_data, (IMG_SIZE, IMG_SIZE))
test_data.append(np.array(img_data))
break
random.shuffle(test_data)
return test_data
train_data = create_train_data()
test_data = create_test_data()
# ------------------------------------------------------------------------------
# Declarations
# ------------------------------------------------------------------------------
def define_model(model, is_training):
model.Image = tf.placeholder(tf.float32, shape=[None, IMG_SIZE, IMG_SIZE, 3])
with incep_v2.slim.arg_scope(incep_v2.inception_resnet_v2_arg_scope()):
model.logits, model.end_points = incep_v2.inception_resnet_v2(model.Image, is_training=is_training)
class Model_Class:
def __init__(self, is_training):
define_model(self, is_training=is_training)
sess = tf.Session()
# ------------------------------------------------------------------------------
# Create Model
# ------------------------------------------------------------------------------
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
with tf.device('/cpu:0'):
model = Model_Class(True)
This is the code I use to make a tensorflow model using inception resnet v2 architecture and I don't know how I can train my dataset. Any help?
Upvotes: 1
Views: 3246
Reputation: 146
Actually, with Tensorflow 2 , you can use Inception Resnet V2 directly from tensorflow.keras.applications. Below is the demo.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
import os
import sys
from glob import glob
import cv2
import time
import datetime
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Input, Dense, Conv2D, Flatten
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2 as PretrainedModel,preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
Now after this, you can prepare your data as (you might have already done this, I guess)
train_path = r"./train"
test_path = r"./test"
IMAGE_SIZE = [150,150]
image_files = glob(train_path + '/*/*.png')
test_image_files = glob(test_path + '/*/*.png')
folders = glob(train_path + "/*")
Now, let's get the pretrained model. Assuming you want to do transfer learning.
ptm = PretrainedModel(
input_shape = IMAGE_SIZE + [3],
weights = 'imagenet',
include_top = False
)
ptm.trainable = False
K = len(folders)
x = Flatten()(ptm.output)
x = Dense(K, activation = 'softmax')(x)
model = Model(inputs = ptm.input , outputs = x)
Now, let's get the generators which will fetch the data from our folders.
gen = ImageDataGenerator(
rotation_range = 20,
width_shift_range = 0.1,
height_shift_range = 0.1,
shear_range = 0.1,
zoom_range = 0.2,
horizontal_flip = True,
preprocessing_function = preprocess_input
)
batch_size = 64
train_generator = gen.flow_from_directory(
train_path,
shuffle = True,
target_size = IMAGE_SIZE,
batch_size = batch_size
)
test_generator = gen.flow_from_directory(
test_path,
target_size = IMAGE_SIZE,
batch_size = batch_size
)
Now, let's compile our model.
model.compile(loss = 'categorical_crossentropy' , optimizer = 'adam' , metrics = ['accuracy'])
Finally, let's fit our model to the data
r = model.fit(
train_generator,
validation_data = test_generator,
epochs = 8,
steps_per_epoch = int(np.ceil(len(image_files)/batch_size)),
validation_steps = int(np.ceil(len(test_image_files)/batch_size)),
callbacks=[myCall]
)
Let's get some plots as well
plt.plot(r.history['loss'] , color = 'red' , label = 'loss')
plt.plot(r.history['val_loss'] , color = 'blue' , label = 'val_loss')
Some more..
plt.plot(r.history['accuracy'] , color = 'red' , label = 'loss')
plt.plot(r.history['val_accuracy'] , color = 'blue' , label = 'val_loss')
Let's ave the model for future use..
model.save("model.h5")
We can generate predictions later if we want.
model = load_model("model.h5")
im = cv2.imread(r".\sample.png")
im = cv2.resize(im , (150,150))
np.argmax(model.predict(im.reshape(1,150,150,3)))
Upvotes: 2