Reputation: 155
I want to extract features from some of the layers from a pretrained model. For this aim, I am using thi pretrained model from here. I removed some of the final layers and for loading the pretrained weights, I use strict=False
.
The architecture of the model is as follows:
Net(
(blocks): ModuleList(
(0): ResNetBasicStem(
(conv): Conv3d(3, 64, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False)
(norm): BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU()
)
(1): ResStage(
(res_blocks): ModuleList(
(0): ResBlock(
(branch1_conv): Conv3d(64, 256, kernel_size=(1, 1, 1), stride=(1, 2, 2), bias=False)
(branch1_norm): BatchNorm3d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(branch2): BottleneckBlock(
(conv_a): Conv3d(64, 64, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_a): BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_a): ReLU()
(conv_b): Conv2plus1d(
(conv_t): Conv3d(64, 64, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False)
(norm): BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU()
(conv_xy): Conv3d(64, 64, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1), bias=False)
)
(norm_b): BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_b): ReLU()
(conv_c): Conv3d(64, 256, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_c): BatchNorm3d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(activation): ReLU()
)
(1): ResBlock(
(branch2): BottleneckBlock(
(conv_a): Conv3d(256, 64, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_a): BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_a): ReLU()
(conv_b): Conv2plus1d(
(conv_t): Conv3d(64, 64, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False)
(norm): BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU()
(conv_xy): Conv3d(64, 64, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
)
(norm_b): BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_b): ReLU()
(conv_c): Conv3d(64, 256, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_c): BatchNorm3d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(activation): ReLU()
)
(2): ResBlock(
(branch2): BottleneckBlock(
(conv_a): Conv3d(256, 64, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_a): BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_a): ReLU()
(conv_b): Conv2plus1d(
(conv_t): Conv3d(64, 64, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False)
(norm): BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU()
(conv_xy): Conv3d(64, 64, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
)
(norm_b): BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_b): ReLU()
(conv_c): Conv3d(64, 256, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_c): BatchNorm3d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(activation): ReLU()
)
)
)
(2): ResStage(
(res_blocks): ModuleList(
(0): ResBlock(
(branch1_conv): Conv3d(256, 512, kernel_size=(1, 1, 1), stride=(1, 2, 2), bias=False)
(branch1_norm): BatchNorm3d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(branch2): BottleneckBlock(
(conv_a): Conv3d(256, 128, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_a): BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_a): ReLU()
(conv_b): Conv2plus1d(
(conv_t): Conv3d(128, 128, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False)
(norm): BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU()
(conv_xy): Conv3d(128, 128, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1), bias=False)
)
(norm_b): BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_b): ReLU()
(conv_c): Conv3d(128, 512, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_c): BatchNorm3d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(activation): ReLU()
)
(1): ResBlock(
(branch2): BottleneckBlock(
(conv_a): Conv3d(512, 128, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_a): BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_a): ReLU()
(conv_b): Conv2plus1d(
(conv_t): Conv3d(128, 128, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False)
(norm): BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU()
(conv_xy): Conv3d(128, 128, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
)
(norm_b): BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_b): ReLU()
(conv_c): Conv3d(128, 512, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_c): BatchNorm3d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(activation): ReLU()
)
(2): ResBlock(
(branch2): BottleneckBlock(
(conv_a): Conv3d(512, 128, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_a): BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_a): ReLU()
(conv_b): Conv2plus1d(
(conv_t): Conv3d(128, 128, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False)
(norm): BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU()
(conv_xy): Conv3d(128, 128, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
)
(norm_b): BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_b): ReLU()
(conv_c): Conv3d(128, 512, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_c): BatchNorm3d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(activation): ReLU()
)
(3): ResBlock(
(branch2): BottleneckBlock(
(conv_a): Conv3d(512, 128, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_a): BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_a): ReLU()
(conv_b): Conv2plus1d(
(conv_t): Conv3d(128, 128, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False)
(norm): BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU()
(conv_xy): Conv3d(128, 128, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
)
(norm_b): BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act_b): ReLU()
(conv_c): Conv3d(128, 512, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
(norm_c): BatchNorm3d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(activation): ReLU()
)
)
)
)
)
I use hook function for extracting features from layers and my method for loading the features from (1): ResStage
and (2): ResStage
is as follows:
class mymodel(nn.Module):
def __init__(self, pretrained=False):
super(mymodel, self).__init__()
self.activation = {}
def get_activation(name):
def hook(model, input, output):
self.activation[name] = output.detach()
return hook
self.r2plus1d = create_r2plus1d()
self.r2plus1d.Net.blocks[1].register_forward_hook(get_activation('ResBlock1'))
self.r2plus1d.Net.blocks[2].register_forward_hook(get_activation('ResBlock2'))
def forward(self, x, out_consp = False):
x = self.r2plus1d(x)
block1_output = self.activation['ResBlock1'] # channel_num:256
block2_output = self.activation['ResBlock2'] # channel_num:512
return block1_output, block2_output
Unfortunately the error says that there is not Net
insised the state_dict of the model (when it comes to use from hook function). For other pretrained models I could use such scenarios for extracting features from intermediate layers but seemingly, if I'm not mistaken, I think maybe it would be tricky to extract features from Net
.
Upvotes: 1
Views: 122
Reputation: 1204
Looking at the link you provided, the function create_r2plus1d()
returns the following
return Net(blocks=nn.ModuleList(blocks))
Your object self.r2plus1d
is already a Net
instance, so your line
self.r2plus1d.Net.blocks[1].register_forward_hook(get_activation('ResBlock1'))
is basically like calling Net
twice.
You probably only have to call it like that and it should work.
self.r2plus1d.blocks[1].register_forward_hook(get_activation('ResBlock1'))
Let me know if this helps.
Upvotes: 3