使用场景
DCGAN(深度卷积生成对抗网络)被广泛应用于生成图像数据的任务。在本教程中,我们将使用DCGAN生成漫画头像。通过这一教程,您可以了解如何搭建DCGAN网络,设置优化器,计算损失函数,以及初始化模型权重。
原理
DCGAN是GAN(生成对抗网络)的扩展版本,GAN最早由Ian J. Goodfellow于2014年提出,主要由生成器和判别器两个模型组成。生成器负责生成看似真实的图像,而判别器则负责区分图像是真实的还是生成的。在DCGAN中,这两个模型分别使用卷积和转置卷积层进行图像处理。
在DCGAN中,判别器由卷积层、BatchNorm层和LeakyReLU激活层组成,输入是3x64x64的图像,输出是该图像为真图像的概率。生成器则由转置卷积层、BatchNorm层和ReLU激活层组成,输入是标准正态分布的隐向量z,输出是3x64x64的RGB图像。
实现方法
数据准备与处理
首先下载并解压数据集:
from download import download
url = "https://download.mindspore.cn/dataset/Faces/faces.zip"
path = download(url, "./faces", kind="zip", replace=True)
然后定义数据处理函数:
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.vision as vision
def create_dataset_imagenet(dataset_path):
dataset = ds.ImageFolderDataset(dataset_path, num_parallel_workers=4, shuffle=True, decode=True)
transforms = [
vision.Resize(image_size),
vision.CenterCrop(image_size),
vision.HWC2CHW(),
lambda x: ((x / 255).astype("float32"))
]
dataset = dataset.project('image')
dataset = dataset.map(transforms, 'image')
dataset = dataset.batch(batch_size)
return dataset
dataset = create_dataset_imagenet('./faces')
生成器
from mindspore import nn, ops
from mindspore.common.initializer import Normal
weight_init = Normal(mean=0, sigma=0.02)
gamma_init = Normal(mean=1, sigma=0.02)
class Generator(nn.Cell):
def __init__(self):
super(Generator, self).__init__()
self.generator = nn.SequentialCell(
nn.Conv2dTranspose(nz, ngf * 8, 4, 1, 'valid', weight_init=weight_init),
nn.BatchNorm2d(ngf * 8, gamma_init=gamma_init),
nn.ReLU(),
nn.Conv2dTranspose(ngf * 8, ngf * 4, 4, 2, 'pad', 1, weight_init=weight_init),
nn.BatchNorm2d(ngf * 4, gamma_init=gamma_init),
nn.ReLU(),
nn.Conv2dTranspose(ngf * 4, ngf * 2, 4, 2, 'pad', 1, weight_init=weight_init),
nn.BatchNorm2d(ngf * 2, gamma_init=gamma_init),
nn.ReLU(),
nn.Conv2dTranspose(ngf * 2, ngf, 4, 2, 'pad', 1, weight_init=weight_init),
nn.BatchNorm2d(ngf, gamma_init=gamma_init),
nn.ReLU(),
nn.Conv2dTranspose(ngf, nc, 4, 2, 'pad', 1, weight_init=weight_init),
nn.Tanh()
)
def construct(self, x):
return self.generator(x)
generator = Generator()
判别器
class Discriminator(nn.Cell):
def __init__(self):
super(Discriminator, self).__init__()
self.discriminator = nn.SequentialCell(
nn.Conv2d(nc, ndf, 4, 2, 'pad', 1, weight_init=weight_init),
nn.LeakyReLU(0.2),
nn.Conv2d(ndf, ndf * 2, 4, 2, 'pad', 1, weight_init=weight_init),
nn.BatchNorm2d(ndf * 2, gamma_init=gamma_init),
nn.LeakyReLU(0.2),
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 'pad', 1, weight_init=weight_init),
nn.BatchNorm2d(ndf * 4, gamma_init=gamma_init),
nn.LeakyReLU(0.2),
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 'pad', 1, weight_init=weight_init),
nn.BatchNorm2d(ndf * 8, gamma_init=gamma_init),
nn.LeakyReLU(0.2),
nn.Conv2d(ndf * 8, 1, 4, 1, 'valid', weight_init=weight_init),
)
self.adv_layer = nn.Sigmoid()
def construct(self, x):
out = self.discriminator(x)
out = out.reshape(out.shape[0], -1)
return self.adv_layer(out)
discriminator = Discriminator()
损失函数和优化器
adversarial_loss = nn.BCELoss(reduction='mean')
optimizer_D = nn.Adam(discriminator.trainable_params(), learning_rate=lr, beta1=beta1)
optimizer_G = nn.Adam(generator.trainable_params(), learning_rate=lr, beta1=beta1)
optimizer_G.update_parameters_name('optim_g.')
optimizer_D.update_parameters_name('optim_d.')
训练模型
import mindspore
import matplotlib.pyplot as plt
def generator_forward(real_imgs, valid):
z = ops.standard_normal((real_imgs.shape[0], nz, 1, 1))
gen_imgs = generator(z)
g_loss = adversarial_loss(discriminator(gen_imgs), valid)
return g_loss, gen_imgs
def discriminator_forward(real_imgs, gen_imgs, valid, fake):
real_loss = adversarial_loss(discriminator(real_imgs), valid)
fake_loss = adversarial_loss(discriminator(gen_imgs), fake)
d_loss = (real_loss + fake_loss) / 2
return d_loss
grad_generator_fn = mindspore.value_and_grad(generator_forward, None, optimizer_G.parameters, has_aux=True)
grad_discriminator_fn = mindspore.value_and_grad(discriminator_forward, None, optimizer_D.parameters)
@mindspore.jit
def train_step(imgs):
valid = ops.ones((imgs.shape[0], 1), mindspore.float32)
fake = ops.zeros((imgs.shape[0], 1), mindspore.float32)
(g_loss, gen_imgs), g_grads = grad_generator_fn(imgs, valid)
optimizer_G(g_grads)
d_loss, d_grads = grad_discriminator_fn(imgs, gen_imgs, valid, fake)
optimizer_D(d_grads)
return g_loss, d_loss, gen_imgs
G_losses = []
D_losses = []
image_list = []
total = dataset.get_dataset_size()
for epoch in range(num_epochs):
generator.set_train()
discriminator.set_train()
for i, (imgs, ) in enumerate(dataset.create_tuple_iterator()):
g_loss, d_loss, gen_imgs = train_step(imgs)
if i % 100 == 0 or i == total - 1:
print('[%2d/%d][%3d/%d] Loss_D:%7.4f Loss_G:%7.4f' % (
epoch + 1, num_epochs, i + 1, total, d_loss.asnumpy(), g_loss.asnumpy()))
D_losses.append(d_loss.asnumpy())
G_losses.append(g_loss.asnumpy())
generator.set_train(False)
fixed_noise = ops.standard_normal((batch_size, nz, 1, 1))
img = generator(fixed_noise)
image_list.append(img.transpose(0, 2, 3, 1).asnumpy())
mindspore.save_checkpoint(generator, "./generator.ckpt")
mindspore.save_checkpoint(discriminator, "./discriminator.ckpt")
结果展示
绘制损失函数变化图:
plt.figure(figsize=(10, 5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses, label="G", color='blue')
plt.plot(D_losses, label="D", color='orange')
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
通过隐向量生成图像并保存:
import matplotlib.animation as animation
def showGif(image_list):
show_list = []
fig = plt.figure(figsize=(8, 3), dpi=120)
for epoch in range(len(image_list)):
images = []
for i in range(3):
row = np.concatenate((image_list[epoch][i * 8:(i + 1) * 8]), axis=1)
images.append(row)
img = np.clip(np.concatenate((images[:]), axis=0), 0, 1)
plt.axis("off")
show_list.append([plt.imshow(img)])
ani = animation.ArtistAnimation(fig, show_list, interval=1000, repeat_delay=1000, blit=True)
ani.save('./dcgan.gif', writer='pillow', fps=1)
showGif(image_list)
加载生成器网络模型参数文件来生成图像:
mindspore.load_checkpoint("./generator.ckpt", generator)
fixed_noise = ops.standard_normal((batch_size, nz, 1, 1))
img64 = generator(fixed_noise).transpose(0, 2, 3, 1).asnumpy()
fig = plt.figure(figsize=(8, 3), dpi=120)
images = []
for i in range(3):
images.append(np.concatenate((img64[i * 8:(i + 1) * 8]), axis=1))
img = np.clip(np.concatenate((images[:]), axis=0), 0, 1)
plt.axis("off")
plt.imshow(img)
plt.show()
结果
学习心得:在学习和实现DCGAN(深度卷积生成对抗网络)的过程中,我们通过搭建卷积神经网络来生成漫画头像。这不仅加深了我们对GAN(生成对抗网络)原理的理解,也让我们体验到了生成模型和判别模型之间的博弈过程。通过实际操作,我们学会了如何下载并预处理数据集,如何构建生成器和判别器网络,以及如何设置损失函数和优化器。在训练模型的过程中,观察生成器和判别器的损失变化,逐步优化模型参数,最终生成出高质量的漫画头像图片。这次实践不仅强化了我们对深度学习框架的使用技能,也让我们体会到了生成对抗网络在图像生成领域的强大潜力和应用前景。
如果你觉得这篇博文对你有帮助,请点赞、收藏、关注我,并且可以打赏支持我!
欢迎关注我的后续博文,我将分享更多关于人工智能、自然语言处理和计算机视觉的精彩内容。
谢谢大家的支持!
标签:25,weight,nn,loss,dataset,init,DCGAN,imgs,打卡 From: https://blog.csdn.net/ljd939952281/article/details/140226850