Jacob
Jacob

Reputation: 161

Want to predict model output with some dummy inputs

I am running a Keras model where I want to produce model output before compilation with some dummy input before compiling and training the model. Here I share some code of my model which I think is important to understand my model, For complete code please visit to this colab file and you can also view the official keras code here.

class ShiftViTModel(keras.Model):
"""The ShiftViT Model.

Args:
    data_augmentation (keras.Model): A data augmentation model.
    projected_dim (int): The dimension to which the patches of the image are
        projected.
    patch_size (int): The patch size of the images.
    num_shift_blocks_per_stages (list[int]): A list of all the number of shit
        blocks per stage.
    epsilon (float): The epsilon constant.
    mlp_dropout_rate (float): The dropout rate used in the MLP block.
    stochastic_depth_rate (float): The maximum drop rate probability.
    num_div (int): The number of divisions of the channesl of the feature
        map. Defaults to 12.
    shift_pixel (int): The number of pixel to shift. Default to 1.
    mlp_expand_ratio (int): The ratio with which the initial mlp dense layer
        is expanded to. Defaults to 2.
"""

def __init__(
    self,
    data_augmentation,
    projected_dim,
    patch_size,
    num_shift_blocks_per_stages,
    epsilon,
    mlp_dropout_rate,
    stochastic_depth_rate,
    num_div=12,
    shift_pixel=1,
    mlp_expand_ratio=2,
    **kwargs,
):
    super().__init__(**kwargs)
    self.data_augmentation = data_augmentation
    self.patch_projection = layers.Conv2D(
        filters=projected_dim,
        kernel_size=patch_size,
        strides=patch_size,
        padding="same",
    )
    self.stages = list()
    for index, num_shift_blocks in enumerate(num_shift_blocks_per_stages):
        if index == len(num_shift_blocks_per_stages) - 1:
            # This is the last stage, do not use the patch merge here.
            is_merge = False
        else:
            is_merge = True
        # Build the stages.
        self.stages.append(
            StackedShiftBlocks(
                epsilon=epsilon,
                mlp_dropout_rate=mlp_dropout_rate,
                num_shift_blocks=num_shift_blocks,
                stochastic_depth_rate=stochastic_depth_rate,
                is_merge=is_merge,
                num_div=num_div,
                shift_pixel=shift_pixel,
                mlp_expand_ratio=mlp_expand_ratio,
            )
        )
    self.global_avg_pool = layers.GlobalAveragePooling2D()

def get_config(self):
    config = super().get_config()
    config.update(
        {
            "data_augmentation": self.data_augmentation,
            "patch_projection": self.patch_projection,
            "stages": self.stages,
            "global_avg_pool": self.global_avg_pool,
        }
    )
    return config

def _calculate_loss(self, data, training=False):
    (images, labels) = data

    # Augment the images
    augmented_images = self.data_augmentation(images, training=training)

    # Create patches and project the pathces.
    projected_patches = self.patch_projection(augmented_images)

    # Pass through the stages
    x = projected_patches
    for stage in self.stages:
        x = stage(x, training=training)

    # Get the logits.
    logits = self.global_avg_pool(x)

    # Calculate the loss and return it.
    total_loss = self.compiled_loss(labels, logits)
    return total_loss, labels, logits

def train_step(self, inputs):
    with tf.GradientTape() as tape:
        total_loss, labels, logits = self._calculate_loss(
            data=inputs, training=True
        )

    # Apply gradients.
    train_vars = [
        self.data_augmentation.trainable_variables,
        self.patch_projection.trainable_variables,
        self.global_avg_pool.trainable_variables,
    ]
    train_vars = train_vars + [stage.trainable_variables for stage in self.stages]

    # Optimize the gradients.
    grads = tape.gradient(total_loss, train_vars)
    trainable_variable_list = []
    for (grad, var) in zip(grads, train_vars):
        for g, v in zip(grad, var):
            trainable_variable_list.append((g, v))
    self.optimizer.apply_gradients(trainable_variable_list)

    # Update the metrics
    self.compiled_metrics.update_state(labels, logits)
    return {m.name: m.result() for m in self.metrics}

def test_step(self, data):
    _, labels, logits = self._calculate_loss(data=data, training=False)

    # Update the metrics
    self.compiled_metrics.update_state(labels, logits)
    return {m.name: m.result() for m in self.metrics}

Second block

model = ShiftViTModel(
data_augmentation=get_augmentation_model(),
projected_dim=config.projected_dim,
patch_size=config.patch_size,
num_shift_blocks_per_stages=config.num_shift_blocks_per_stages,
epsilon=config.epsilon,
mlp_dropout_rate=config.mlp_dropout_rate,
stochastic_depth_rate=config.stochastic_depth_rate,
num_div=config.num_div,
shift_pixel=config.shift_pixel,
mlp_expand_ratio=config.mlp_expand_ratio, 
)

actually I am trying to produce output from the above model like this

dummy_inputs = tf.ones((2, 32, 32, 3))
outputs = model(dummy_inputs, training=False)
print(outputs.shape)

but it generates an error that

Unimplemented tf.keras.Model.call(): if you intend to create a Model with the Functional API, please provide inputs and outputs arguments. Otherwise, subclass Model with an overridden call() method.

Upvotes: 1

Views: 596

Answers (1)

Innat
Innat

Reputation: 17239

The call method is not implemented and is required in such implementation if we need to inspect the model with dummy data. You can implement the call method in the ShiftViTModel class as follows with the used layers (see the train_step method).

def call(self, images):
   augmented_images = self.data_augmentation(images)
   x = self.patch_projection(augmented_images)
   logits = self.global_avg_pool(x)
   return logits

Now, if we do

model = ShiftViTModel( ... )
x,y = next(iter(train_ds))
print(x.shape, y.shape)
model(x).shape

(256, 32, 32, 3) (256, 1)
TensorShape([256, 96])

Upvotes: 1

Related Questions