前往小程序,Get更优阅读体验!
立即前往
首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >ResNet18代码实现[通俗易懂]

ResNet18代码实现[通俗易懂]

作者头像
全栈程序员站长
发布2022-08-24 20:34:42
9760
发布2022-08-24 20:34:42
举报
文章被收录于专栏:全栈程序员必看

大家好,又见面了,我是你们的朋友全栈君。

import tensorflow as tf

from tensorflow import keras

from tensorflow.keras import layers, Sequential, Model, datasets, optimizers

# 自定义的预处理函数

def preprocess(x, y):

# 调用此函数时会自动传入x,y对象,shape为[b,28,28],[b]

# 标准化到0-1

x = 2*tf.cast(x, dtype=tf.float32) / 255.-1

# 转成整型张量

y = tf.cast(y, dtype=tf.int32)

# 返回的x,y将替换传入的x,y参数,从而实现数据的预处理功能

return x, y

# 在线下载,加载CIFAR10数据集

(x,y),(x_test,y_test)= datasets.cifar10.load_data()

# 删除y的一个不必要的维度,[b,1] → [b]

y= tf.squeeze(y,axis= 1)

y_test= tf.squeeze(y_test, axis= 1)

# 打印训练集和测试集的形状

# print(x.shape,y.shape, x_test.shape, y_test.shape)

# 构建训练集对象,随机打乱,预处理,批量化

train_db= tf.data.Dataset.from_tensor_slices((x,y))

train_db= train_db.shuffle(1000).map(preprocess).batch(512)

# 构建测试集对象,预处理,批量化

test_db= tf.data.Dataset.from_tensor_slices((x_test,y_test))

test_db= test_db.map(preprocess).batch(512)

# 从训练集中采样一个Batch,并观察

sample= next(iter(train_db))

# print(‘sample:’,sample[0].shape,sample[1].shape,tf.reduce_min(sample[0]),tf.reduce_max(sample[0]))

class BasicBlock(layers.Layer):

# 残差模块

def __init__(self, filter_num, stride= 1):

super(BasicBlock, self).__init__()

#第一个卷积单元

self.conv1= layers.Conv2D(filter_num, kernel_size=(3,3), strides= stride, padding= ‘same’)

self.bn1= layers.BatchNormalization()

self.relu= layers.Activation(‘relu’)

# 第二个卷积单元

self.conv2= layers.Conv2D(filter_num, kernel_size=(3,3), strides= 1, padding= ‘same’ )

self.bn2= layers.BatchNormalization()

# 通过1*1卷积完成shape匹配

if stride != 1:

self.downsample= Sequential()

self.downsample.add(layers.Conv2D(filter_num, kernel_size= (1,1), strides= stride))

else: # shape匹配,直接短接

self.downsample= lambda x:x

def call(self, inputs, training= None):

# 前向计算函数

# [b,h,w,c], 通过第一个卷积单元

out= self.conv1(inputs)

out= self.bn1(out)

out= self.relu(out)

# 通过第二个卷积单元

out= self.conv2(out)

out= self.bn2(out)

# 通过identity模块

identity= self.downsample(inputs)

# 两条路径输出直接相加

output= layers.add([out,identity])

output= tf.nn.relu(output)

return output

class ResNet(Model):

def __init__(self, layer_dims, num_classes= 10): #[2,2,2,2]

super(ResNet, self).__init__()

# 根网络,预处理

self.stem= Sequential([

layers.Conv2D(64, kernel_size= (3,3), strides= (1,1)),

layers.BatchNormalization(),

layers.Activation(‘relu’),

layers.MaxPool2D(pool_size=(2,2), strides=(1,1), padding= ‘same’)

])

# 堆叠4个Block,每个Block包含了多个BasicBlock,设置步长不一样

self.layer1= self.build_resblock(64, layer_dims[0])

self.layer2= self.build_resblock(128, layer_dims[1], stride= 2)

self.layer3= self.build_resblock(256, layer_dims[2], stride= 2)

self.layer4= self.build_resblock(512, layer_dims[3], stride= 2)

# 通过Pooling层将高宽降低为1*1

self.avgpool= layers.GlobalAveragePooling2D()

# 最后连接一个全连接层分类

self.fc= layers.Dense(num_classes)

def build_resblock(self, filter_num, blocks, stride= 1):

# 辅助函数,堆叠filter_num个BasicBlock

res_blocks= Sequential()

# 只有第一个BasicBlock的步长可能不为1, 实现下采样

res_blocks.add(BasicBlock(filter_num, stride))

# 其他BasicBlock步长都为1

for _ in range(1, blocks):

res_blocks.add(BasicBlock(filter_num, stride= 1))

return res_blocks

def call(self, inputs, training= None):

# 前向计算函数:通过根网络

x= self.stem(inputs)

# 一次通过4个模块

x= self.layer1(x)

x= self.layer2(x)

x= self.layer3(x)

x= self.layer4(x)

# 通过池化层

x= self.avgpool(x)

# 通过全连接层

x= self.fc(x)

return x

def resnet18():

# 通过调整模块内部BasicBlock的数量和配置实现不同的ResNet

return ResNet([2,2,2,2])

# def resnet34():

# # 通过调整模块内部BasicBlock的数量和配置实现不同的ResNet

# return ResNet([3,4,6,3])

model = resnet18() # ResNet18网络

model.build(input_shape=(None, 32, 32, 3))

# model.summary() # 统计网络参数

def main():

optimizer = optimizers.Adam(learning_rate=1e-4)

for epoch in range(10):

for step, (x,y) in enumerate(train_db):

with tf.GradientTape() as tape:

# [b, 32, 32, 3] => [b, 1, 1, 512]

logits= model(x)

y_onehot = tf.one_hot(y, depth=10)

# compute loss

loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)

loss = tf.reduce_mean(loss)

# 对所有参数求梯度

grads= tape.gradient(loss, model.trainable_variables)

# 自动更新

optimizer.apply_gradients(zip(grads,model.trainable_variables))

if step %10 == 0:

print(epoch, step, ‘loss:’, float(loss))

total_num = 0

total_correct = 0

for x,y in test_db:

# out = model(x)

# out = tf.reshape(out, [-1, 512])

logits = model(x)

prob = tf.nn.softmax(logits, axis=1)

pred = tf.argmax(prob, axis=1)

pred = tf.cast(pred, dtype=tf.int32)

correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)

correct = tf.reduce_sum(correct)

total_num += x.shape[0]

total_correct += int(correct)

acc = total_correct / total_num

print(epoch, ‘acc:’, acc)

if __name__ == ‘__main__’:

main()

发布者:全栈程序员栈长,转载请注明出处:https://javaforall.cn/141598.html原文链接:https://javaforall.cn

本文参与 腾讯云自媒体同步曝光计划,分享自作者个人站点/博客。
原始发表:2022年5月9,如有侵权请联系 cloudcommunity@tencent.com 删除

本文分享自 作者个人站点/博客 前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文参与 腾讯云自媒体同步曝光计划  ,欢迎热爱写作的你一起参与!

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
相关产品与服务
批量计算
批量计算(BatchCompute,Batch)是为有大数据计算业务的企业、科研单位等提供高性价比且易用的计算服务。批量计算 Batch 可以根据用户提供的批处理规模,智能地管理作业和调动其所需的最佳资源。有了 Batch 的帮助,您可以将精力集中在如何分析和处理数据结果上。
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档