专栏首页粽子的深度学习笔记图像分割之DeepLab v3+

图像分割之DeepLab v3+

from PIL import Image
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
# 一个1×1卷积 + 三个3×3卷积(rate = {6, 12, 18}) + 全局平均池化
class ASPP_module(nn.Module):
    def __init__(self, inplanes, planes, dilation):
        '''
        inplanes: input
        planes:   output
        dilation: dilation rate
        '''
        super(ASPP_module, self).__init__()
        if dilation == 1:
            kernel_size = 1
            padding = 0
        else:
            kernel_size = 3
            padding = dilation
        self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
                                            stride=1, padding=padding, dilation=dilation, bias=False) # output=input
        self.bn = nn.BatchNorm2d(planes)
        self.relu = nn.ReLU()

        self._init_weight()

    def forward(self, x):
        x = self.atrous_convolution(x)
        x = self.bn(x)

        return self.relu(x)

    def _init_weight(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
def fixed_padding(inputs, kernel_size, dilation):
    kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
    pad_total = kernel_size_effective - 1 # 3-1
    pad_beg = pad_total // 2 # 2//2
    pad_end = pad_total - pad_beg # 1
    padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
    return padded_inputs


class SeparableConv2d_same(nn.Module):
    def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False):
        super(SeparableConv2d_same, self).__init__()

        self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation,
                               groups=inplanes, bias=bias) ### padding=1???
        self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias) 

    def forward(self, x):
        x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])
        x = self.conv1(x)
        x = self.pointwise(x)
        return x

DeepLab v3+对Xception进行了微调:

  1. 更深的Xception结构,原始middle flow迭代8次,微调后迭代16次
  2. 所有max pooling结构被stride=2的深度可分离卷积替代
  3. 每个3x3的depthwise convolution后都跟BN和Relu

微调后Xception结构如下图所示:

class Block(nn.Module):
    def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False):
        super(Block, self).__init__()

        if planes != inplanes or stride != 1:
            self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
            self.skipbn = nn.BatchNorm2d(planes)
        else:
            self.skip = None

        self.relu = nn.ReLU(inplace=True)
        rep = []

        filters = inplanes
        if grow_first:
            rep.append(self.relu)
            rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
            rep.append(nn.BatchNorm2d(planes))
            filters = planes

        for i in range(reps - 1):
            rep.append(self.relu)
            rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation))
            rep.append(nn.BatchNorm2d(filters))

        if not grow_first:
            rep.append(self.relu)
            rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
            rep.append(nn.BatchNorm2d(planes))

        if not start_with_relu:
            rep = rep[1:]

        if stride != 1:
            rep.append(SeparableConv2d_same(planes, planes, 3, stride=2))

        if stride == 1 and is_last:
            rep.append(SeparableConv2d_same(planes, planes, 3, stride=1))

        self.rep = nn.Sequential(*rep)

    def forward(self, inp):
        x = self.rep(inp)

        if self.skip is not None:
            skip = self.skip(inp)
            skip = self.skipbn(skip)
        else:
            skip = inp

        x += skip

        return x


class Xception(nn.Module):
    """
    Modified Alighed Xception
    """
    def __init__(self, inplanes=3, os=16):
        super(Xception, self).__init__()

        if os == 16:
            entry_block3_stride = 2
            middle_block_dilation = 1
            exit_block_dilations = (1, 2)
        elif os == 8:
            entry_block3_stride = 1
            middle_block_dilation = 2
            exit_block_dilations = (2, 4)
        else:
            raise NotImplementedError


        # Entry flow
        self.conv1 = nn.Conv2d(inplanes, 32, 3, stride=2, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(32)
        self.relu = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(64)

        self.block1 = Block(64, 128, reps=2, stride=2, start_with_relu=False)
        self.block2 = Block(128, 256, reps=2, stride=2, start_with_relu=True)
        self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, start_with_relu=True)

        # Middle flow
        self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)
        self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation)

        # Exit flow
        self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_dilations[0],
                             start_with_relu=True, grow_first=False, is_last=True)

        self.conv3 = SeparableConv2d_same(1024, 1536, 3, stride=1, dilation=exit_block_dilations[1])
        self.bn3 = nn.BatchNorm2d(1536)

        self.conv4 = SeparableConv2d_same(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1])
        self.bn4 = nn.BatchNorm2d(1536)

        self.conv5 = SeparableConv2d_same(1536, 2048, 3, stride=1, dilation=exit_block_dilations[1])
        self.bn5 = nn.BatchNorm2d(2048)

        # Init weights
        self._init_weight()

    def forward(self, x):
        # Entry flow
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = self.relu(x)

        x = self.block1(x)
        low_level_feat = x ###
        x = self.block2(x)
        x = self.block3(x)

        # Middle flow
        x = self.block4(x)
        x = self.block5(x)
        x = self.block6(x)
        x = self.block7(x)
        x = self.block8(x)
        x = self.block9(x)
        x = self.block10(x)
        x = self.block11(x)
        x = self.block12(x)
        x = self.block13(x)
        x = self.block14(x)
        x = self.block15(x)
        x = self.block16(x)
        x = self.block17(x)
        x = self.block18(x)
        x = self.block19(x)

        # Exit flow
        x = self.block20(x)
        x = self.conv3(x)
        x = self.bn3(x)
        x = self.relu(x)

        x = self.conv4(x)
        x = self.bn4(x)
        x = self.relu(x)

        x = self.conv5(x)
        x = self.bn5(x)
        x = self.relu(x)

        return x, low_level_feat

    def _init_weight(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
  • DeepLab v3作为encoder,添加decoder得到新的模型(DeepLabv3+)
  • 将Xception/ResNet模型应用于分割任务,模型中广泛使用深度可分离卷积

Decoder部分:

  1. 先把encoder的结果上采样4倍,然后与编码器中相对应尺寸的特征图进行拼接融合,再进行3x3的卷积,最后上采样4倍得到最终结果
  2. 融合低层次信息前,先进行1x1的卷积,目的是降低通道数
class DeepLabv3_plus(nn.Module):
    def __init__(self, nInputChannels=3, n_classes=21, os=16, _print=True):
        if _print:
            print("Constructing DeepLabv3+ model...")
            print("Backbone: Xception")
            print("Number of classes: {}".format(n_classes))
            print("Output stride: {}".format(os))
            print("Number of Input Channels: {}".format(nInputChannels))
        super(DeepLabv3_plus, self).__init__()

        # Atrous Conv
        self.xception_features = Xception(nInputChannels, os)

        # ASPP
        if os == 16:
            dilations = [1, 6, 12, 18]
        elif os == 8:
            dilations = [1, 12, 24, 36]
        else:
            raise NotImplementedError

        self.aspp1 = ASPP_module(2048, 256, dilation=dilations[0])
        self.aspp2 = ASPP_module(2048, 256, dilation=dilations[1])
        self.aspp3 = ASPP_module(2048, 256, dilation=dilations[2])
        self.aspp4 = ASPP_module(2048, 256, dilation=dilations[3])

        self.relu = nn.ReLU()

        self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                             nn.Conv2d(2048, 256, 1, stride=1, bias=False),
                                             nn.BatchNorm2d(256),
                                             nn.ReLU()) ### 

        self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(256)

        # adopt [1x1, 48] for channel reduction.
        self.conv2 = nn.Conv2d(128, 48, 1, bias=False)
        self.bn2 = nn.BatchNorm2d(48)

        self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
                                       nn.BatchNorm2d(256),
                                       nn.ReLU(),
                                       nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
                                       nn.BatchNorm2d(256),
                                       nn.ReLU(),
                                       nn.Conv2d(256, n_classes, kernel_size=1, stride=1))

    def forward(self, input):
        x, low_level_features = self.xception_features(input)
        x1 = self.aspp1(x)
        x2 = self.aspp2(x)
        x3 = self.aspp3(x)
        x4 = self.aspp4(x)
        x5 = self.global_avg_pool(x)
        x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)

        x = torch.cat((x1, x2, x3, x4, x5), dim=1) # 256*5

        x = self.conv1(x) #256
        x = self.bn1(x)
        x = self.relu(x)
        x = F.interpolate(x, size=(int(math.ceil(input.size()[-2]/4)),
                                int(math.ceil(input.size()[-1]/4))), mode='bilinear', align_corners=True)# 4倍上采样

        low_level_features = self.conv2(low_level_features)
        low_level_features = self.bn2(low_level_features)
        low_level_features = self.relu(low_level_features)

        x = torch.cat((x, low_level_features), dim=1) # 256+48
        x = self.last_conv(x) # 304->256
        x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)

        return x

    def _init_weight(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
model = DeepLabv3_plus(nInputChannels=3, n_classes=21, os=16, _print=True)
model.eval()
image = torch.randn(1, 3, 512, 512)
output = model(image)
print(output.size())
Constructing DeepLabv3+ model...
Backbone: Xception
Number of classes: 21
Output stride: 16
Number of Input Channels: 3
torch.Size([1, 21, 512, 512])

本文参与腾讯云自媒体分享计划,欢迎正在阅读的你也加入,一起分享。

我来说两句

0 条评论
登录 后参与评论

相关文章

  • 业界 | 谷歌最新语义图像分割模型DeepLab-v3+今日开源

    选自Google Research Blog 作者:Liang-Chieh Chen、Yukun Zhu 机器之心编译 参与:刘晓坤、路雪 刚刚,谷歌开源了语义...

    机器之心
  • 谷歌开源语义图像分割模型DeepLab-v3+ | 附代码

    据谷歌在博客上的描述,DeepLab-v3+模型是目前DeepLab中最新的、执行效果最好的语义图像分割模型,可用于服务器端的部署。

    量子位
  • 【开源】谷歌开源其语义图像分割模型DeepLab-v3+

    AiTechYun 编辑:xiaoshan 语义图像分割是为图像中的每个像素分配诸如“道路”,“天空”,“人”,“狗”之类的语义标签,它使得许多新的应用程序在(...

    AiTechYun
  • 谷歌开源AI图像分割模型,用Cloud TPU快速准确地进行图像分割

    自去年起,谷歌的TPU芯片是谷歌云平台客户可以使用的最新一代芯片,专为人工智能推理和训练任务量身定制,如图像识别,自然语言处理和强化学习。

    AiTechYun
  • 深度学习系列(五)分割网络模型(DeepLab V1、V2、V3、V3+、PSPNet)

    深度学习系列(五)分割网络模型(DeepLab V1、DeepLab V2、PSPNet、DeepLab V3、DeepLab V3+)

    Minerva
  • 语义分割研究党福利来袭,谷歌宣布开源 DeepLabv3+

    DeepLab 是一种用于图像语义分割的顶尖深度学习模型,其目标是将语义标签(如人、狗、猫等)分配给输入图像的每个像素。 经过三年左右的发展,目前 DeepLa...

    AI研习社
  • tensorflow 语义分割系列DeepLabV3/V4实践

    语义分割是图像高级别像素理解的主要任务之一,也是无人驾驶的重要技术基础。前面已经对该方面进行过复现实验,见:空洞卷积与DeeplabV2实现图像语...

    sparkexpert
  • 你知道Deeplab那些事儿吗?

    DeepLab系列论文一共有四篇,分别对应DeepLab V1,DeepLab V2,DeepLab V3,DeepLab V3+。

    灿视学长
  • 遥感资源大放送(上):用开源代码,训练土地分类模型

    内容概要:土地分类是遥感影像的重要应用场景之一,本文介绍了土地分类的几个常用方法,并利用开源语义分割代码,打造了一个土地分类模型。

    HyperAI超神经
  • 资源 | 用PyTorch搞定GluonCV预训练模型,这个计算机视觉库真的很好用

    项目地址:https://github.com/zhanghang1989/gluoncv-torch

    机器之心
  • 计算机视觉杂志 | 10月刊

    还在国庆节假日,Amusi 尽量推送一些以阅读为主的文章。至于"实战"上手内容,还是等国庆黄金周结束后,再推送。

    Amusi
  • 李飞飞等人提出Auto-DeepLab:自动搜索图像语义分割架构

    近日,斯坦福大学李飞飞组的研究者提出了 Auto-DeepLab,其在图像语义分割问题上超越了很多业内最佳模型,甚至可以在未经过预训练的情况下达到预训练模型的表...

    机器之心
  • 骚操作!如何完成垃圾分类的图像识别?

    计算机视觉作为人工智能的主流技术领域之一,历经图像分类-->目标定位-->目标检测,最终发展到图像语义分割技术。

    zhisheng
  • 图像语义分割 —利用Deeplab v3+训练VOC2012数据集

    配置:windows10 + Tensorflow1.6.0 + Python3.6.4(笔记本无GPU)

    机器学习AI算法工程
  • 项目实战 DeepLabV1,V2,V3 Google三大语义分割算法源码解析

    算法和工程是算法工程师不可缺少的两种能力,之前我介绍了DeepLab V1,V2, V3,但总是感觉少了点什么?只有Paper,没有源码那不相当于是纸上谈兵了,...

    BBuf
  • 寒武纪新一轮融资进行中,估值增至140亿元;前滴滴研究院创始院长何晓飞创业

    整理 | 明明 一分钟AI 寒武纪新一轮融资进行中,估值增至140亿元 Alphabet CEO赞助:无人驾驶飞行出租车上天 谷歌智能手表操作系统Androi...

    AI科技大本营
  • 深度 | 语义分割网络DeepLab-v3的架构设计思想和TensorFlow实现

    选自Medium 作者:Thalles Silva 机器之心编译 参与:Nurhachu Null、刘晓坤 深度卷积神经网络在各类计算机视觉应用中取得了显著的成...

    机器之心
  • 深度 | 语义分割网络DeepLab-v3的架构设计思想和TensorFlow实现

    选自Medium 作者:Thalles Silva 机器之心编译 参与:Nurhachu Null、刘晓坤 深度卷积神经网络在各类计算机视觉应用中取得了显著的成...

    朱晓霞
  • 分割算法——可以分割一切目标(各种分割总结)

    周末应该是一个好好休息的时间,但是一定会有在默默努力科研的你,由于最近是开学季,很多关注的朋友一直会问“计算机视觉战队平台有基础性的内容吗?”,今天我和大家说一...

    计算机视觉研究院

扫码关注云+社区

领取腾讯云代金券