前往小程序,Get更优阅读体验!
立即前往
首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >第9节:libtorch开发深度学习算法中的autograde

第9节:libtorch开发深度学习算法中的autograde

作者头像
三更两点
发布2022-08-07 12:40:50
4870
发布2022-08-07 12:40:50
举报

文章目录

CmakeLists.txt

代码语言:javascript
复制
cmake_minimum_required (VERSION 3.8)

project(SOLDIER)
set(Torch_DIR "/libtorch/share/cmake/Torch")
set(PYTHON_EXECUTABLE "/usr/bin/python3")


find_package(Torch REQUIRED)
find_package(OpenCV REQUIRED)

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")


set(CMAKE_BUILD_TYPE Debug)

include_directories(${CMAKE_SOURCE_DIR}/include)
include_directories(${OpenCV_INCLUDE_DIRS})

add_executable(run main.cpp)
target_link_libraries(run "${OpenCV_LIBS}" "${TORCH_LIBRARIES}")
set_property(TARGET run PROPERTY CXX_STANDARD 14)

C++

代码语言:javascript
复制
#include <iostream>
#include<torch/torch.h>
#include<ATen/ATen.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/autograd/function.h>


using namespace torch::autograd;

void basic_autograd_operations_example() {
  std::cout << "====== Running: \"Basic autograd operations\" ======" << std::endl;

  // 创建一个张量并设置为 ``torch::requires_grad()`` 用于跟踪计算过程
  auto x = torch::ones({2, 2}, torch::requires_grad());
  std::cout <<"x="<< x << std::endl;

  // 张量的加法操作:
  auto y = x + 2;
  std::cout << "y="<<y << std::endl;

  // y是加法的记过,可以操作``grad_fn``
  std::cout <<"y.grad_fn:"<< y.grad_fn()->name() << std::endl;

  // 做更多的操作在y上
  auto z = y * y * 3;
  auto out = z.mean();

  std::cout << "z="<<z << std::endl;
  std::cout <<"z.grad_fn:"<< z.grad_fn()->name() << std::endl;
  std::cout <<"out="<< out << std::endl;
  std::cout <<"out.grad_fn:"<< out.grad_fn()->name() << std::endl;

  // ``.requires_grad_( ... )`` changes an existing tensor's ``requires_grad`` flag in-place.
  auto a = torch::randn({2, 2});
  a = ((a * 3) / (a - 1));
  std::cout << a.requires_grad() << std::endl;

  a.requires_grad_(true);
  std::cout << a.requires_grad() << std::endl;

  auto b = (a * a).sum();
  std::cout << b.grad_fn()->name() << std::endl;

  // Let's backprop now. Because ``out`` contains a single scalar, ``out.backward()``
  // is equivalent to ``out.backward(torch::tensor(1.))``.
  out.backward();

  // Print gradients d(out)/dx
  std::cout << x.grad() << std::endl;

  // Now let's take a look at an example of vector-Jacobian product:
  x = torch::randn(3, torch::requires_grad());

  y = x * 2;
  while (y.norm().item<double>() < 1000) {
    y = y * 2;
  }

  std::cout << y << std::endl;
  std::cout << y.grad_fn()->name() << std::endl;

  // If we want the vector-Jacobian product, pass the vector to ``backward`` as argument:
  auto v = torch::tensor({0.1, 1.0, 0.0001}, torch::kFloat);
  y.backward(v);

  std::cout << x.grad() << std::endl;

  // You can also stop autograd from tracking history on tensors that require gradients
  // either by putting ``torch::NoGradGuard`` in a code block
  std::cout << x.requires_grad() << std::endl;
  std::cout << x.pow(2).requires_grad() << std::endl;

  {
    torch::NoGradGuard no_grad;
    std::cout << x.pow(2).requires_grad() << std::endl;
  }

  // Or by using ``.detach()`` to get a new tensor with the same content but that does
  // not require gradients:
  std::cout << x.requires_grad() << std::endl;
  y = x.detach();
  std::cout << y.requires_grad() << std::endl;
  std::cout << x.eq(y).all().item<bool>() << std::endl;
}

void compute_higher_order_gradients_example() {
  std::cout << "====== Running \"Computing higher-order gradients in C++\" ======" << std::endl;

  // One of the applications of higher-order gradients is calculating gradient penalty.
  // Let's see an example of it using ``torch::autograd::grad``:

  auto model = torch::nn::Linear(4, 3);

  auto input = torch::randn({3, 4}).requires_grad_(true);
  auto output = model(input);

  // Calculate loss
  auto target = torch::randn({3, 3});
  auto loss = torch::nn::MSELoss()(output, target);

  // Use norm of gradients as penalty
  auto grad_output = torch::ones_like(output);
  auto gradient = torch::autograd::grad({output}, {input}, /*grad_outputs=*/{grad_output}, /*create_graph=*/true)[0];
  auto gradient_penalty = torch::pow((gradient.norm(2, /*dim=*/1) - 1), 2).mean();

  // Add gradient penalty to loss
  auto combined_loss = loss + gradient_penalty;
  combined_loss.backward();

  std::cout << input.grad() << std::endl;
}

// Inherit from Function
class LinearFunction : public Function<LinearFunction> {
 public:
  // Note that both forward and backward are static functions

  // bias is an optional argument
  static torch::Tensor forward(
      AutogradContext *ctx, torch::Tensor input, torch::Tensor weight, torch::Tensor bias = torch::Tensor()) {
    ctx->save_for_backward({input, weight, bias});
    auto output = input.mm(weight.t());
    if (bias.defined()) {
      output += bias.unsqueeze(0).expand_as(output);
    }
    return output;
  }

static tensor_list backward(AutogradContext *ctx, tensor_list grad_outputs) {
    auto saved = ctx->get_saved_variables();
    auto input = saved[0];
    auto weight = saved[1];
    auto bias = saved[2];

    auto grad_output = grad_outputs[0];
    auto grad_input = grad_output.mm(weight);
    auto grad_weight = grad_output.t().mm(input);
    auto grad_bias = torch::Tensor();
    if (bias.defined()) {
      grad_bias = grad_output.sum(0);
    }

    return {grad_input, grad_weight, grad_bias};
  }
};

class MulConstant : public Function<MulConstant> {
 public:
  static torch::Tensor forward(AutogradContext *ctx, torch::Tensor tensor, double constant) {
    // ctx is a context object that can be used to stash information
    // for backward computation
    ctx->saved_data["constant"] = constant;
    return tensor * constant;
  }

  static tensor_list backward(AutogradContext *ctx, tensor_list grad_outputs) {
    // We return as many input gradients as there were arguments.
    // Gradients of non-tensor arguments to forward must be `torch::Tensor()`.
    return {grad_outputs[0] * ctx->saved_data["constant"].toDouble(), torch::Tensor()};
  }
};

void custom_autograd_function_example() {
  std::cout << "====== Running \"Using custom autograd function in C++\" ======" << std::endl;
  {
    auto x = torch::randn({2, 3}).requires_grad_();
    auto weight = torch::randn({4, 3}).requires_grad_();
    auto y = LinearFunction::apply(x, weight);
    y.sum().backward();

    std::cout << x.grad() << std::endl;
    std::cout << weight.grad() << std::endl;
  }
  {
    auto x = torch::randn({2}).requires_grad_();
    auto y = MulConstant::apply(x, 5.5);
    y.sum().backward();

    std::cout << x.grad() << std::endl;
  }
}

int autograde_example(){
  
  std::cout << "Autograd: Automatic Differentiation\n\n";
  // Create a tensor and set requires_grad=True to track computation with it:
  auto x = torch::ones({2, 2}, torch::TensorOptions().requires_grad(true));
  std::cout << "x:\n" << x << '\n';

  // Do a tensor operation:
  auto y = x + 2;
  std::cout << "y:\n" << y << '\n';

  // y was created as a result of an operation, so it has a grad_fn:
  std::cout << "y.grad_fn:\n" << y.grad_fn() << '\n';

  // Do more operations on y:
  auto z = y * y * 3;
  auto out = z.mean();
  std::cout << "z:\n" << z << "out:\n" << out << '\n';

  // .requires_grad_(...) changes an existing Tensor’s requires_grad flag in-place:
  auto a = torch::randn({2, 2});
  a = ((a * 3) / (a - 1));
  std::cout << a.requires_grad() << '\n';
  a.requires_grad_(true);
  std::cout << a.requires_grad() << '\n';
  auto b = (a * a).sum();
  std::cout << b.grad_fn() << '\n';

  std::cout << "Gradients\n\n";

  // Let’s backprop now:
  out.backward();

  // Print gradients d(out)/dx:
  std::cout << "x.grad:\n" << x.grad() << '\n';

  // Example of vector-Jacobian product:
  x = torch::randn(3, torch::TensorOptions().requires_grad(true));
  y = x * 2;
  while (y.data().norm().item<int>() < 1000) {
      y = y * 2;
  }
  std::cout << "y:\n" << y << '\n';

  // Simply pass the vector to backward as argument:
  auto v = torch::tensor({0.1, 1.0, 0.0001}, torch::TensorOptions(torch::kFloat));
  y.backward(v);
  std::cout << "x.grad:\n" << x.grad() << '\n';

  // Stop autograd from tracking history on Tensors with .requires_grad=True:
  std::cout << "x.requires_grad\n" << x.requires_grad() << '\n';
  std::cout << "(x ** 2).requires_grad\n" << (x * x).requires_grad() << '\n';
  torch::NoGradGuard no_grad;
  std::cout << "(x ** 2).requires_grad\n" << (x * x).requires_grad() << '\n';

  // Or by using .detach() to get a new Tensor with the same content but that does not require gradients:
  std::cout << "x.requires_grad:\n" << x.requires_grad() << '\n';
  y = x.detach();
  std::cout << "y.requires_grad:\n" << y.requires_grad() << '\n';
  std::cout << "x.eq(y).all():\n" << x.eq(y).all() << '\n';
  return 0;
}

int main()
{
    std::cout << "Deep Learning with PyTorch" << std::endl;

    basic_autograd_operations_example();
    std::cout << "\n";

    compute_higher_order_gradients_example();
    std::cout << "\n";

    custom_autograd_function_example();
    std::cout<< "\n";

    autograde_example();
    return 0;
}
本文参与 腾讯云自媒体同步曝光计划,分享自作者个人站点/博客。
原始发表:2022-08-05,如有侵权请联系 cloudcommunity@tencent.com 删除

本文分享自 作者个人站点/博客 前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文参与 腾讯云自媒体同步曝光计划  ,欢迎热爱写作的你一起参与!

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
目录
  • 文章目录
  • CmakeLists.txt
  • C++
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档