Welcome toVigges Developer Community-Open, Learning,Share
Welcome To Ask or Share your Answers For Others

Categories

0 votes
1.1k views
in Technique[技术] by (71.8m points)

pytorch - running_mean should contain %% elements not ** when use BatchNorm1d

code:

##% first ,I set a model:

class net(nn.Module):
    def __init__(self, max_len, feature_linear, rnn, input_size, hidden_size, output_dim, num__rnn_layers, bidirectional, batch_first=True, p=0.2):
        super(net, self).__init__()
        self.max_len = max_len
        self.feature_linear = feature_linear
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.bidirectional = bidirectional
        self.num_directions = 2 if bidirectional == True else 1
        self.p = p
        self.batch_first = batch_first
        self.linear1 = nn.Linear(max_len, feature_linear) 
        init.kaiming_normal_(self.linear1.weight, mode='fan_in')
        self.BN1 = BN(feature_linear) 
     
        self.linear2 = nn.Linear(feature_linear, feature_linear) 
        init.kaiming_normal_(self.linear2.weight, mode='fan_in')
        self.BN2 = BN(feature_linear) 

        self.linear5 = nn.Linear(feature_linear, feature_linear)
        init.kaiming_normal_(self.linear5.weight, mode='fan_in')
        self.BN5 = BN(feature_linear)
        
        self.rnn = rnn(input_size, hidden_size, num_rnn_layers, bidirectional=bidirectional, batch_first=batch_first) 
        self.hidden_to_out1 = nn.Linear(hidden_size * self.num_directions, hidden_size, bias=False) 
        init.kaiming_normal_(self.hidden_to_out1.weight, mode='fan_in')
        self.BN6 = BN(hidden_size) 

    def forward(self, xb, seq_len_crt):
        rnn_input = torch.zeros(xb.shape[0], self.feature_linear, self.input_size)
        for i in range(self.input_size): 
            out = self.linear1(xb[:, :, i]) # xb[:,:,i].shape:(1,34), out.shape(1,100)
            out = F.relu(out) # 输入:out.shape(1,100), 输出:out.shape(1,100)
            self.BN1.eval()
            out = self.BN1(out) # 输入:out.shape(1,100),输出:out.shape(1,100)
            
            
            out = self.linear2(out)
            out = F.relu(out)
            self.BN2.eval()
            out = self.BN2(out)
            
            out = self.linear5(out)
#             out = self.dropout2(out)
            out = F.relu(out)
            self.BN5.eval()
            out = self.BN5(out)
            
            rnn_input[:, :, i] = out

        packed = nn.utils.rnn.pack_padded_sequence(rnn_input, seq_len_crt, batch_first=self.batch_first, enforce_sorted=False)
        out, states = self.rnn(packed)
        padded_out, input_sizes = nn.utils.rnn.pad_packed_sequence(out, batch_first=self.batch_first)
        # assert torch.all(torch.tensor(seq_len_crt).eq(input_sizes))

        y_hat = self.hidden_to_out1(padded_out)
        y_hat = F.relu(y_hat)
        self.BN6.eval()
        y_hat = self.BN6(y_hat)

        
  return y_hat.squeeze(-1)






##% second,I make a model as a funtion and optimize it



input_size = 5
hidden_size = 32
output_dim = 1
num_rnn_layers = 2
bidirectional = True
rnn = nn.LSTM
batch_size = batch_size
feature_linear = 60
BN = nn.BatchNorm1d


model = net(max_len, feature_linear, rnn, input_size, hidden_size, output_dim, num_rnn_layers, bidirectional, p=0.1)
model.train()
loss_func = nn.MSELoss(reduction='none')
# optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
# optimizer = optim.Adam(model.parameters(), lr=0.01)
optimizer = optim.AdamW(model.parameters(), lr=0.001, weight_decay=0.05)

then I fit the model

fit(500, model, loss_func, optimizer, train_dl, valid_dl, valid_ds, seq_len_train_iter, seq_len_valid_iter)

but I met an error , the key error message is :

running_mean should contain 25 elements not 32

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-221-fb062ad3f20e> in <module>
----> 1 fit(500, model, loss_func, optimizer, train_dl, valid_dl, valid_ds, seq_len_train_iter, seq_len_valid_iter)

<ipython-input-216-330ddbc44040> in forward(self, xb, seq_len_crt)
     88         y_hat = F.relu(y_hat)
     89         self.BN6.eval()
---> 90         y_hat = self.BN6(y_hat)
     91 
     92         y_hat = self.hidden_to_out2(y_hat)


RuntimeError: running_mean should contain 25 elements not 32

so I checked :

self.hidden_to_out1 = nn.Linear(hidden_size * self.num_directions, hidden_size, bias=False) 
init.kaiming_normal_(self.hidden_to_out1.weight, mode='fan_in')
self.BN6 = BN(hidden_size) 

hidden_size = 32, So ,I don't know why and how to modify it

question from:https://stackoverflow.com/questions/65884987/running-mean-should-contain-elements-not-when-use-batchnorm1d

与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
Welcome To Ask or Share your Answers For Others

1 Answer

0 votes
by (71.8m points)
Waitting for answers

与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
Welcome to Vigges Developer Community for programmer and developer-Open, Learning and Share
...