使用一个简单的fully-connected NN来训练一个Xor Gate。
主要目的是熟悉使用PyTorch C++ 的API。
#include <torch/torch.h>
#include <iostream>
using namespace torch::indexing;
struct Xor_net : torch::nn::Module
{
Xor_net()
{
fc1 = register_module("fc1", torch::nn::Linear(2, 16));
fc2 = register_module("fc2", torch::nn::Linear(16, 1));
}
torch::Tensor forward(torch::Tensor x)
{
x = torch::relu(fc1->forward(x));
x = torch::relu(fc2->forward(x));
return x;
}
torch::nn::Linear fc1{nullptr}, fc2{nullptr};
};
int main()
{
torch::manual_seed(0);
Xor_net a;
float data[] = {1, 1, 0,
1, 0, 1,
0, 1, 1,
0, 0, 0};
torch::Tensor training_data = torch::from_blob(data, {4, 3});
torch::optim::SGD optimizer(a.parameters(), /*lr=*/0.01);
torch::Tensor train_x = training_data.index({Slice(), Slice(None, 2)});
torch::Tensor train_y = training_data.index({Slice(), Slice(2, 3)});
for (int i = 0; i <= 10000; ++i)
{
optimizer.zero_grad();
torch::Tensor prediction = a.forward(train_x);
torch::Tensor loss = torch::nn::functional::mse_loss(prediction, train_y);
loss.backward();
optimizer.step();
if (i % 1000 == 0)
{
std::cout << "loss at iter " << i << ": " << loss.item<float>() << std::endl;
}
}
{
std::cout << "final learning result: " << std::endl;
torch::NoGradGuard no_grad;
std::cout << a.forward(train_x) << std::endl;
}
}
|