python – pytorch LSTM model with unequal hidden layer

i have tuned a lstm model in keras as follows. but i dont know how write that code in pytorch. i put my pytorch code here but i dont think be right, because It does not give the right answer. how much I searched, I could not find a sample code in pytorch for more than one lstm layer with unequal hidden layers. my input shape is (None,(60,10)) with output shape (None,15) Please express a similar example for my keras model in pytorch. Thank

my_Keras_model:

model_input = keras.Input(shape=(60, 10))
x_1 = layers.LSTM(160,return_sequences=True)(model_input)
x_1 = layers.LSTM(190)(x_1)
x_1 = layers.Dense(200)(x_1)
x_1 = layers.Dense(15)(x_1)
model = keras.models.Model(model_input, x_1)

my_pytorch_model:

input_dim = 10
hidden_dim_1 = 160
hidden_dim_2 = 190
hidden_dim_3 = 200
num_layers = 1
output_dim = 15

class LSTM(nn.Module):
    def __init__(self, input_dim, hidden_dim_1, hidden_dim_2, hidden_dim_3 ,num_layers, output_dim):
        super(LSTM, self).__init__()
        self.hidden_dim_1 = hidden_dim_1
        self.hidden_dim_2 = hidden_dim_2
        self.hidden_dim_3 = hidden_dim_3
        self.num_layers = num_layers
        
        self.lstm_1 = nn.LSTM(input_dim, hidden_dim_1, num_layers, batch_first=True)
        self.lstm_2 = nn.LSTM(hidden_dim_1, hidden_dim_2, num_layers, batch_first=True)
        self.fc_1 = nn.Linear(hidden_dim_2, hidden_dim_3)
        self.fc_out = nn.Linear(hidden_dim_3, output_dim)

    def forward(self, x):
        input_X = x
        h_1 = torch.zeros(num_layers, 1 , self.hidden_dim_1).requires_grad_()
        c_1 = torch.zeros(num_layers, 1 , self.hidden_dim_1).requires_grad_()
        h_2 = torch.zeros(num_layers, 1 , self.hidden_dim_2).requires_grad_()
        c_2 = torch.zeros(num_layers, 1 , self.hidden_dim_2).requires_grad_()
        out_put = ()
        
        for i, input_t in enumerate(input_X.chunk(input_X.size(0))):
          out_lstm_1 , (h_1, c_1) = self.lstm_1(input_t, (h_1.detach(), c_1.detach()))
          out_lstm_2 , (h_2, c_2) = self.lstm_2(out_lstm_1, (h_2.detach(), c_2.detach()))
          out_Dense_1 = self.fc_1(out_lstm_2(:, -1, :))
          out_Dense_out = self.fc_out(out_Dense_1)
          out_put += out_Dense_out
        out_put = torch.stack(out_put, 0).squeeze(1)
        return out_put