# 白给的性能不要？cvpr2021-Diverse branch block

• 论文地址：Diverse Branch Block: Building a Convolution as an Inception-like Unit
• 官方代码：DingXiaoH/DiverseBranchBlock

DBB结构转换

BN层公式为

# 转换2 分支相加

import oneflow as flow
import oneflow.typing as tp
import numpy as np
from typing import Tuple

@flow.global_function()
def conv_add(x: tp.Numpy.Placeholder(shape=(1, 2, 4, 4)))->Tuple[tp.Numpy, tp.Numpy]:
conv1 = flow.layers.conv2d(x, 4, kernel_size=3, padding="SAME", name="conv1")
conv2 = flow.layers.conv2d(x, 4, kernel_size=3, padding="SAME", name="conv2")

x = np.ones(shape=(1, 2, 4, 4)).astype(np.float32)
weight_1 = np.random.randn(4, 2, 3, 3).astype(np.float32)
weight_2 = np.random.randn(4, 2, 3, 3).astype(np.float32)

print("Conv1 + Conv2 output is: ", original_conv_add)


# 转换3 序列卷积融合

1x1接3x3

1x1和KxK卷积转换

import oneflow as flow
import oneflow.typing as tp
import numpy as np
from typing import Tuple

@flow.global_function()
def conv2d_Job(x: tp.Numpy.Placeholder((1, 3, 4, 4))) -> Tuple[tp.Numpy, tp.Numpy]:
weight_1x1 = flow.get_variable(
name="weight1x1",
shape=[2, 3, 1, 1], # [O_c, I_c, ksize, ksize]
initializer=flow.ones_initializer(),
)
weight_3x3 = flow.get_variable(
name="weight3x3",
shape=[4, 2, 3, 3], # [O_c, I_c, ksize, ksize]
initializer=flow.ones_initializer(),
)

conv_1x1 = flow.nn.conv2d(x, weight_1x1, strides=1, padding=(0, 0, 0, 0), name="conv1x1")
conv_1x1_3x3 = flow.nn.conv2d(conv_1x1, weight_3x3, strides=1, padding=(0, 0, 0, 0), name="conv3x3")

weight_1x1_transposed = flow.transpose(weight_1x1, [1, 0, 2, 3]) # [2, 3, 1, 1] -> [3, 2, 1, 1]
weight_merge = flow.nn.conv2d(weight_3x3, weight_1x1_transposed, strides=1, padding=(0, 0, 0, 0), name="weight_merge") # [4, 3, 3, 3]
conv_merge = flow.nn.conv2d(x, weight_merge, strides=1, padding=(0, 0, 0, 0), name="conv_merge")

return conv_1x1_3x3, conv_merge

x = np.ones(shape=(1, 3, 4, 4)).astype(np.float32)
weight_1x1 = np.random.randn(2, 3, 1, 1).astype(np.float32)
weight_3x3 = np.random.randn(4, 2, 3, 3).astype(np.float32)

conv1x1_3x3, conv_merge = conv2d_Job(x)

print("Conv 1x1 and 3x3 is: ", conv1x1_3x3)
print("Merge Conv: ", conv_merge)

print("Is Match: ", np.allclose(conv1x1_3x3, conv_merge, atol=1e-5))


# 转换4 拼接融合

import oneflow as flow
import oneflow.typing as tp
import numpy as np
from typing import Tuple

@flow.global_function()
def conv_concat(x: tp.Numpy.Placeholder(shape=(1, 1, 4, 4)))->Tuple[tp.Numpy, tp.Numpy]:
conv1 = flow.layers.conv2d(x, 2, kernel_size=3, padding="SAME", name="conv1")
conv2 = flow.layers.conv2d(x, 2, kernel_size=3, padding="SAME", name="conv2")
# Merge Concat
conv_merge_concat = flow.layers.conv2d(x, 4, kernel_size=3, padding="SAME", name="conv_merge_concat")
return flow.concat([conv1, conv2], axis=1), conv_merge_concat

x = np.ones(shape=(1, 1, 4, 4)).astype(np.float32)
weight_1 = np.random.randn(2, 1, 3, 3).astype(np.float32)
weight_2 = np.random.randn(2, 1, 3, 3).astype(np.float32)

flow.load_variables({"conv1-weight": weight_1, "conv2-weight": weight_2, "conv_merge_concat-weight": np.concatenate([weight_1, weight_2], axis=0)})

original_conv_concat, merge_conv_concat = conv_concat(x)

print("Conv1 concat Conv2 output is: ", original_conv_concat)
print("Merge Concat output is: ", merge_conv_concat)
print("Is Match: ", np.allclose(original_conv_concat, merge_conv_concat, atol=1e-5))


# 转换5 平均池化层转换

import oneflow as flow
import oneflow.typing as tp
import numpy as np
from typing import Tuple

@flow.global_function()
def avg_pool(x: tp.Numpy.Placeholder(shape=(1, 3, 4, 4)))->Tuple[tp.Numpy, tp.Numpy]:
avg_pool_out = flow.nn.avg_pool2d(x, ksize=3, strides=1, padding=(0, 0, 0, 0))
# Use conv to instead average pool
conv_avg_pool = flow.layers.conv2d(x, 3, kernel_size=3, strides=1, name="conv_avg")
return avg_pool_out, conv_avg_pool

x = np.ones(shape=(1, 3, 4, 4)).astype(np.float32)
weight = np.zeros(shape=(3, 3, 3, 3)).astype(np.float32)

for i in range(3):
weight[i, i, :, :] = 1 / 9 # Set 3x3 kernel weight value as 1/9

avg_pool_out, conv_avg_pool = avg_pool(x)

print("Average Pool output is: ", avg_pool_out)
print("Conv Average Pool output is: ", conv_avg_pool)
print("Is Match: ", np.allclose(avg_pool_out, conv_avg_pool, atol=1e-5))


# Diverse Branch Block结构

DBB结构

• 1x1 卷积分支
• 1x1 - KxK卷积分支
• 1x1 - 平均池化分支
• KxK 卷积分支 启发于Inception模块，各操作有不同的感受野以及计算复杂度，能够极大丰富整个模块的特征空间

# 总结

0 条评论

• ### 3*3卷积+1*3卷积+3*1卷积=白给的精度提升

下面要介绍的论文发于ICCV2019，题为「ACNet:Strengthening the Kernel Skeletons for Powerful CNN ...

• ### 图解神秘的NC4HW4

【GiantPandaCV导语】以卷积和im2col+gemm实现卷积操作举例,来图解深度学习中Tensor的NC4HW4(其实应该是N{C/4+C%4>0?1...

• ### 【CNN结构设计】深入理解深度可分离卷积

现在我们来计算一下常规卷积的计算开销（以最简单的stride（步长）为的情况进行讨论）

• ### 深度学习那些事 — 反向传播

这部分是深度学习的重点，掌握了反向传播算法就相当于掌握了一半的神经网络算法。其实就是将损失函数产生的误差通过边进行反向传播往回传播的过程，传播的过程当中会得到每...

• ### Guava Cache 用法介绍

Guava Cache是在内存中缓存数据，相比较于数据库或redis存储，访问内存中的数据会更加高效。Guava官网介绍，下面的这几种情况可以考虑使用Guava...

• ### 面试中又被问到Redis如何实现抢购，赶快代码实现一波吧！

面试常常遇到写一个抢购实例，或者讲讲抢购实现想法，然后总是讲不明白，因为目前工作没做抢购这一块儿。

• ### smjdbctemplate基于Spring JdbcTemplate的升级版

GitHub地址：https://github.com/yinjihuan/smjdbctemplate

• ### Spring Boot 整合 Redis

实际开发中缓存处理是必须的，不可能我们每次客户端去请求一次服务器，服务器每次都要去数据库中进行查找，为什么要使用缓存？说到底是为了提高系统的运行速度。将用户频繁...

• ### [享学Netflix] 三十九、Ribbon核心API源码解析：ribbon-core（二）IClientConfig配置详解

代码下载地址：https://github.com/f641385712/netflix-learning