GAN是非监督式学习的一种方法,在2014年被提出。
GAN主要用途:
5.1.2.1 定义
生成对抗网络(Generative Adversarial Network,简称GAN),主要结构包括一个生成器G(Generator)和一个判别器D(Discriminator)。
5.1.2.2 理解
过程分析:
5.1.2.3 训练损失
整个优化我们其实只看做一个部分:
对于真实样本:对数预测概率损失,提高预测的概率
对于生成样本:对数预测概率损失,降低预测概率
最终可以这样:
5.1.2.4 G、D结构
G、D结构是两个网络,特点是能够反向传播可导计算要介绍G、D结构,需要区分不同版本的GAN。
5.1.3.1 案例演示与结果显示
5.1.3.2 代码步骤流程
5.1.3.3 代码编写
class DCGAN():
def __init__(self):
# 输入图片的形状
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
def init_model(self):
# 生成原始噪点数据大小
self.latent_dim = 100
optimizer = Adam(0.0002, 0.5)
# 1、建立判别器训练参数
# 选择损失,优化器,以及衡量准确率
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# 2、联合建立生成器训练参数,指定生成器损失
self.generator = self.build_generator()
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# 合并模型的损失,并且之后只训练生成器,判别器不训练
self.discriminator.trainable = False
valid = self.discriminator(img)
# 训练生成器欺骗判别器
self.combined = Model(z, valid)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def build_discriminator(self):
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def train(self, epochs, batch_size=32):
# 加载手写数字
(X_train, _), (_, _) = mnist.load_data()
# 进行归一化
X_train = X_train / 127.5 - 1.
X_train = np.expand_dims(X_train, axis=3)
# 正负样本的目标值建立
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# 1、训练判别器
# 选择随机的一些真实样本
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# 生成器产生假样本
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
gen_imgs = self.generator.predict(noise)
# 训练判别器过程
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
# 计算平均两部分损失
d_loss = np.add(d_loss_real, d_loss_fake) / 2
# 2、训练生成器,停止判别器
# 合并训练,并停止训练判别器
# 用目标值为1去训练,目的使得生成器生成的样本越来越接近真是样本
g_loss = self.combined.train_on_batch(noise, valid)
# 画出结果
print("迭代次数:%d [D 损失: %f, 准确率: %.2f%], [G 损失: %f]" % (epoch, d_loss[0], 100 * d_loss[1], g_loss))
# 保存生成的图片
if epoch % 50 == 0:
self.save_imgs(epoch)
def save_imgs(self, epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.latent_dim))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')
axs[i, j].axis('off')
cnt += 1
fig.savefig("./images/mnist_%d.png" % epoch)
plt.close()