# 网络，层，权重，训练

vector<float>
classify(const vector<vector<vector<float>>> &image)
{
vector<vector<vector<float>>> t;

t = convolve(t, weights_firstConv, biases_firstConv);
t = activation(t, "relu");
t = maxPool(t, 2, 2);

t = convolve(t, weights_secondConv, biases_secondConv);
t = activation(t, "relu");
t = maxPool(t, 2, 2);

t = convolve(t, weights_thirdConv, biases_thirdConv);
t = activation(t, "relu");
t = maxPool(t, 2, 2);

t = convolve(t, weights_fourthConv, biases_fourthConv);
t = activation(t, "relu");
t = maxPool(t, 2, 2);

vector<float> flat = flatten(t);

flat = dense(flat, weights_firstDense, biases_firstDense);
flat = activation(flat, "relu");

flat = dense(flat, weights_labeller, biases_labeller);
flat = activation(flat, "softmax");

return flat;
}

• 矩阵是一个2阶张量，形状包括高度和宽度。 \begin{bmatrix}1.0&3.0&5.0\\2.0&4.0&6.0\end{bmatrix} 的形状（shape）为2,3。
• C++的浮点数向量是1阶张量，其形状是一个值的列表，即向量中元素的数量。 矢量{1.0,2.0,3.0}的形状为3。
• 单个数字也可以被认为是0阶张量，其形状为[]。

# 模型中的层

## 卷积层（Convolution layer）

for (size_t k = 0; k < nkernels; ++k) {
for (size_t y = 0; y < out_height; ++y) {
for (size_t x = 0; x < out_width; ++x) {
for (size_t c = 0; c < depth; ++c) {
for (size_t ky = 0; ky < kernel_height; ++ky) {
for (size_t kx = 0; kx < kernel_width; ++kx) {
out[y][x][k] +=
weights[ky][kx][c][k] *
in[y + ky][x + kx][c];
}
}
}
out[y][x][k] += biases[k];
}
}
}

Every filter is small spatially (along width and height), but extends through the full depth of the input volume.

for (size_t y = 0; y < in_height; ++y) {
for (size_t x = 0; x < in_width; ++x) {
for (size_t c = 0; c < depth; ++c) {
}
}
}

## 激活层（Activation layer）

if (x < 0.0) {
x = 0.0;
}

float sum = 0.f;
for (size_t i = 0; i < sz; ++i) {
out[i] = exp(out[i]);
sum += out[i];
}
for (size_t i = 0; i < sz; ++i) {
out[i] /= sum;
}

## 最大池化层（Max pooling layer）

maxPool函数：

for (size_t y = 0; y < out_height; ++y) {
for (size_t x = 0; x < out_width; ++x) {
for (size_t i = 0; i < pool_y; ++i) {
for (size_t j = 0; j < pool_x; ++j) {
for (size_t c = 0; c < depth; ++c) {
float value = in[y * pool_y + i][x * pool_x + j][c];
out[y][x][c] = max(out[y][x][c], value);
}
}
}
}
}

## 扁平层（Flatten layer）

for (size_t y = 0; y < height; ++y) {
for (size_t x = 0; x < width; ++x) {
for (size_t c = 0; c < depth; ++c) {
out[i++] = in[y][x][c];
}
}
}

## 全连接层（Dense layer）

dense函数：

vector<float> out(out_size, 0.f);

for (size_t i = 0; i < in_size; ++i) {
for (size_t j = 0; j < out_size; ++j) {
out[j] += weights[i][j] * in[i];
}
}

for (size_t j = 0; j < out_size; ++j) {
out[j] += biases[j];
}

# 其他

15 篇文章24 人订阅

0 条评论

## 相关文章

### Torch7模型训练

Torch7搭建卷积神经网络详细教程已经详细的介绍啦Module模块，这里再次基础上再给出一些上Container、 Transfer Functions La...

399130

9520

51670

20820

### 01 TensorFlow入门（2）

Working with Matrices：         了解TensorFlow如何使用矩阵对于通过计算图理解数据流非常重要。 Getting read...

29060

582120

38650

### Batch Normalization怎么加入batch normalization

Batch Normalization 会使你的参数搜索问题变得很容易，使神经网络对超参数的选择更加稳定，超参数的范围会更加庞大，工作效果也很好，也会使你的训练...

9420

12920

49290