欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

pytorch c++ LSTM

程序员文章站 2022-06-11 22:08:19
...

代码

#include <torch/script.h>
#include <ATen/ATen.h>

#include <torch/nn/module.h>
#include <torch/nn/modules/batchnorm.h>
#include <torch/nn/modules/conv.h>
#include <torch/nn/modules/rnn.h>
#include <torch/nn/modules/dropout.h>
#include <torch/nn/modules/embedding.h>
#include <torch/nn/modules/functional.h>
#include <torch/nn/modules/linear.h>
#include <torch/nn/modules/sequential.h>
#include <torch/optim.h>
#include <torch/types.h>
#include <torch/utils.h>

#include <cmath>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <memory>
#include <random>
#include <vector>

using namespace std;
using namespace at;
using namespace torch::nn;
using namespace torch::optim;

void check_lstm_sizes(RNNOutput output) {
  // Expect the LSTM to have 64 outputs and 3 layers, with an input of batch
  // 10 and 16 time steps (10 x 16 x n)

  ASSERT_EQ(output.output.ndimension(), 3);
  ASSERT_EQ(output.output.size(0), 10);
  ASSERT_EQ(output.output.size(1), 16);
  ASSERT_EQ(output.output.size(2), 64);

  ASSERT_EQ(output.state.ndimension(), 4);
  ASSERT_EQ(output.state.size(0), 2); // (hx, cx)
  ASSERT_EQ(output.state.size(1), 3); // layers
  ASSERT_EQ(output.state.size(2), 16); // Batchsize
  ASSERT_EQ(output.state.size(3), 64); // 64 hidden dims
  // Something is in the hiddens
  ASSERT_GT(output.state.norm().item<float>(), 0);
}


int main(int argc, const char* argv[])
{


    LSTM model(LSTMOptions(128, 64).layers(3).dropout(0.2)); // Input size is: sequence length, batch size, input size
    auto x = torch::randn({10, 16, 128}, torch::requires_grad());
    auto output = model->forward(x);
    
    check_lstm_sizes(output);
    
    std::cout << output.output.sizes() << endl;

    std::cout<< "ok\n";
    return 1;
}
相关标签: pytorch c++