How to take depth of neural network as argument while constructing Network in Pytorch

I have written following code to take depth of network as parameter in Pytorch. Later I realized even if I am using many hidden layers, the learnable parameters remain the same.

class Net3(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_output, depth, init):
        super(Net3, self).__init__()
        self.input = torch.nn.Linear(n_feature, n_hidden).float().to(device)   
        self.hidden = torch.nn.Linear(n_hidden, n_hidden).float().to(device)   
        self.predict = torch.nn.Linear(n_hidden, n_output).float().to(device)   =
        self.depth = depth 


    def forward(self, x):
        x = F.relu(self.input(x))      # activation function for hidden layer
        for i in range(self.depth):
           x = F.relu(self.hidden(x))      # activation function for hidden layer
        x = self.predict(x)           
        return x

Is there any other way to achieve this ?

Upvotes: 0

Views: 626

Answers (1)

hdkrgr
hdkrgr

Reputation: 1736

In init you need to create multiple hidden layers, currently you're only making one. One possibility to do this with little overhead is using a torch.nn.ModuleDict that will give you named layers:

class Net3(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_output, depth, init):
        super(Net3, self).__init__()
        self.layers = nn.ModuleDict() # a collection that will hold your layers

        self.layers['input'] = torch.nn.Linear(n_feature, n_hidden).float().to(device)

        for i in range(1, depth):
            self.layers['hidden_'+str(i)] = torch.nn.Linear(n_hidden, n_hidden).float().to(device)  

        self.layers['output'] = torch.nn.Linear(n_hidden, n_output).float().to(device)   =
        self.depth = depth 


    def forward(self, x):
        for layer in self.layers:
            x = F.relu(self.layers[layer](x))

        return x

Upvotes: 1

Related Questions