Reputation: 11362
if you have a standard Unet encoder such as resnet50, then it's easy to add pertaining to it. for example:
ENCODER = 'resnet50'
ENCODER_WEIGHTS = 'imagenet'
CLASSES = class_names
ACTIVATION = 'sigmoid' # could be None for logits or 'softmax2d' for multiclass segmentation
# create segmentation model with pretrained encoder
model = smp.Unet(
encoder_name=ENCODER,
encoder_weights=ENCODER_WEIGHTS,
classes=len(CLASSES),
activation=ACTIVATION,
)
preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)
However, suppose you have a custom-made Unet (not necessarily use resent50) encoder such as:
class VGGBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super().__init__()
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_channels, middle_channels, 3, padding=1)
self.bn1 = nn.BatchNorm2d(middle_channels)
self.conv2 = nn.Conv2d(middle_channels, out_channels, 3, padding=1)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
return out
class UNet(nn.Module):
def __init__(self, num_classes, input_channels=3, **kwargs):
super().__init__()
nb_filter = [32, 64, 128, 256, 512]
self.pool = nn.MaxPool2d(2, 2)
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])
self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])
self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])
self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])
self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])
self.conv3_1 = VGGBlock(nb_filter[3]+nb_filter[4], nb_filter[3], nb_filter[3])
self.conv2_2 = VGGBlock(nb_filter[2]+nb_filter[3], nb_filter[2], nb_filter[2])
self.conv1_3 = VGGBlock(nb_filter[1]+nb_filter[2], nb_filter[1], nb_filter[1])
self.conv0_4 = VGGBlock(nb_filter[0]+nb_filter[1], nb_filter[0], nb_filter[0])
self.final = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)
def forward(self, input):
x0_0 = self.conv0_0(input)
x1_0 = self.conv1_0(self.pool(x0_0))
x2_0 = self.conv2_0(self.pool(x1_0))
x3_0 = self.conv3_0(self.pool(x2_0))
x4_0 = self.conv4_0(self.pool(x3_0))
x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))
x2_2 = self.conv2_2(torch.cat([x2_0, self.up(x3_1)], 1))
x1_3 = self.conv1_3(torch.cat([x1_0, self.up(x2_2)], 1))
x0_4 = self.conv0_4(torch.cat([x0_0, self.up(x1_3)], 1))
output = self.final(x0_4)
return output
How to do Imagenet pretraining for the encoder. I assume doing pretraining for the encoder from scratch will take long time. Is there a way to utilize an existing pre-trained encoder such as the resnet50 for such Unet.
Upvotes: 1
Views: 902
Reputation: 2093
Yes, it is possible to use only a pre-trained block instead of using the entire network such as resnet50 from Torchvision
. Since you mentioned a custom encoder based on a VGG-type block, I'm answering based on that.
Instead of defining the layers in the VGGBlock
manually, you can just call the pre-trained VGG network within that class and then select up to the 2nd conv layer.
First, you would need to get the pre-trained VGG network from Torchvision
:
# Necessary imports
from torchvision.models import vgg16_bn
import torch
import torch.nn as nn
from copy import deepcopy
# Initializing the pre-trained vgg16 (with BatchNorm) network from torchvision
model = vgg16_bn(pretrained = True)
Then, you can modify your VGGBlock
by the following:
class VGGBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.vggblock = deepcopy(model.features[:6])
self.vggblock[0].in_channels = in_channels
self.vggblock[0].out_channels = out_channels
self.vggblock[1].num_features = out_channels
self.vggblock[3].in_channels = out_channels
self.vggblock[3].out_channels = out_channels
self.vggblock[4].num_features = out_channels
def forward(self, x):
out = self.vggblock(x)
return out
I also modified your UNet
class a bit and this is the modified code:
class UNet(nn.Module):
def __init__(self, num_classes, input_channels):
super().__init__()
nb_filter = [32, 64, 128, 256, 512]
self.pool = nn.MaxPool2d(2, 2)
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv0_0 = VGGBlock(input_channels, nb_filter[0])
self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1])
self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2])
self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3])
self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4])
self.conv3_1 = VGGBlock(nb_filter[3]+nb_filter[4], nb_filter[3])
self.conv2_2 = VGGBlock(nb_filter[2]+nb_filter[3], nb_filter[2])
self.conv1_3 = VGGBlock(nb_filter[1]+nb_filter[2], nb_filter[1])
self.conv0_4 = VGGBlock(nb_filter[0]+nb_filter[1], nb_filter[0])
self.final = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)
def forward(self, input):
x0_0 = self.conv0_0(input)
x1_0 = self.conv1_0(self.pool(x0_0))
x2_0 = self.conv2_0(self.pool(x1_0))
x3_0 = self.conv3_0(self.pool(x2_0))
x4_0 = self.conv4_0(self.pool(x3_0))
x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))
x2_2 = self.conv2_2(torch.cat([x2_0, self.up(x3_1)], 1))
x1_3 = self.conv1_3(torch.cat([x1_0, self.up(x2_2)], 1))
x0_4 = self.conv0_4(torch.cat([x0_0, self.up(x1_3)], 1))
output = self.final(x0_4)
return output
You would notice that, both in the VGGBlock
and in the UNet
class, I skipped the use of middle_channels
as you did in your snippets. That input argument is actually irrelevant since your middle_channels
and out_channels
are essentially the same. The above code would build you the exact UNet architecture that you posted in the question with pre-trained weights.
Upvotes: 1