当前位置: 首页 > news >正文

捕鱼网站建设永久免费手机建站

捕鱼网站建设,永久免费手机建站,wordpress文章不显示摘要,网网站站建建设设内容介绍#xff1a; Pix2Pix是基于条件生成对抗网络#xff08;cGAN, Condition Generative Adversarial Networks #xff09;实现的一种深度学习图像转换模型#xff0c;该模型是由Phillip Isola等作者在2017年CVPR上提出的#xff0c;可以实现语义/标签到真实图片、灰…内容介绍 Pix2Pix是基于条件生成对抗网络cGAN, Condition Generative Adversarial Networks 实现的一种深度学习图像转换模型该模型是由Phillip Isola等作者在2017年CVPR上提出的可以实现语义/标签到真实图片、灰度图到彩色图、航空图到地图、白天到黑夜、线稿图到实物图的转换。Pix2Pix是将cGAN应用于有监督的图像到图像翻译的经典之作其包括两个模型生成器和判别器。 传统上尽管此类任务的目标都是相同的从像素预测像素但每项都是用单独的专用机器来处理的。而Pix2Pix使用的网络作为一个通用框架使用相同的架构和目标只在不同的数据上进行训练即可得到令人满意的结果鉴于此许多人已经使用此网络发布了他们自己的艺术作品。 cGAN的生成器与传统GAN的生成器在原理上有一些区别cGAN的生成器是将输入图片作为指导信息由输入图像不断尝试生成用于迷惑判别器的“假”图像由输入图像转换输出为相应“假”图像的本质是从像素到另一个像素的映射而传统GAN的生成器是基于一个给定的随机噪声生成图像输出图像通过其他约束条件控制生成这是cGAN和GAN的在图像翻译任务中的差异。Pix2Pix中判别器的任务是判断从生成器输出的图像是真实的训练图像还是生成的“假”图像。在生成器与判别器的不断博弈过程中模型会达到一个平衡点生成器输出的图像与真实训练数据使得判别器刚好具有50%的概率判断正确。 具体内容 1. 导包 from download import download from mindspore import dataset as ds import matplotlib.pyplot as plt import mindspore import mindspore.nn as nn import mindspore.ops as ops import mindspore.nn as nn import mindspore.nn as nn from mindspore.common import initializer as init import numpy as np import os import datetime from mindspore import value_and_grad, Tensor from mindspore import load_checkpoint, load_param_into_net2. 下载数据集 url https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/models/application/dataset_pix2pix.tardownload(url, ./dataset, kindtar, replaceTrue) 3. 数据显示 dataset ds.MindDataset(./dataset/dataset_pix2pix/train.mindrecord, columns_list[input_images, target_images], shuffleTrue) data_iter next(dataset.create_dict_iterator(output_numpyTrue)) # 可视化部分训练数据 plt.figure(figsize(10, 3), dpi140) for i, image in enumerate(data_iter[input_images][:10], 1):plt.subplot(3, 10, i)plt.axis(off)plt.imshow((image.transpose(1, 2, 0) 1) / 2) plt.show() 4. 网络构建 class UNetSkipConnectionBlock(nn.Cell):def __init__(self, outer_nc, inner_nc, in_planesNone, dropoutFalse,submoduleNone, outermostFalse, innermostFalse, alpha0.2, norm_modebatch):super(UNetSkipConnectionBlock, self).__init__()down_norm nn.BatchNorm2d(inner_nc)up_norm nn.BatchNorm2d(outer_nc)use_bias Falseif norm_mode instance:down_norm nn.BatchNorm2d(inner_nc, affineFalse)up_norm nn.BatchNorm2d(outer_nc, affineFalse)use_bias Trueif in_planes is None:in_planes outer_ncdown_conv nn.Conv2d(in_planes, inner_nc, kernel_size4,stride2, padding1, has_biasuse_bias, pad_modepad)down_relu nn.LeakyReLU(alpha)up_relu nn.ReLU()if outermost:up_conv nn.Conv2dTranspose(inner_nc * 2, outer_nc,kernel_size4, stride2,padding1, pad_modepad)down [down_conv]up [up_relu, up_conv, nn.Tanh()]model down [submodule] upelif innermost:up_conv nn.Conv2dTranspose(inner_nc, outer_nc,kernel_size4, stride2,padding1, has_biasuse_bias, pad_modepad)down [down_relu, down_conv]up [up_relu, up_conv, up_norm]model down upelse:up_conv nn.Conv2dTranspose(inner_nc * 2, outer_nc,kernel_size4, stride2,padding1, has_biasuse_bias, pad_modepad)down [down_relu, down_conv, down_norm]up [up_relu, up_conv, up_norm]model down [submodule] upif dropout:model.append(nn.Dropout(p0.5))self.model nn.SequentialCell(model)self.skip_connections not outermostdef construct(self, x):out self.model(x)if self.skip_connections:out ops.concat((out, x), axis1)return out 5. UNet生成器 class UNetGenerator(nn.Cell):def __init__(self, in_planes, out_planes, ngf64, n_layers8, norm_modebn, dropoutFalse):super(UNetGenerator, self).__init__()unet_block UNetSkipConnectionBlock(ngf * 8, ngf * 8, in_planesNone, submoduleNone,norm_modenorm_mode, innermostTrue)for _ in range(n_layers - 5):unet_block UNetSkipConnectionBlock(ngf * 8, ngf * 8, in_planesNone, submoduleunet_block,norm_modenorm_mode, dropoutdropout)unet_block UNetSkipConnectionBlock(ngf * 4, ngf * 8, in_planesNone, submoduleunet_block,norm_modenorm_mode)unet_block UNetSkipConnectionBlock(ngf * 2, ngf * 4, in_planesNone, submoduleunet_block,norm_modenorm_mode)unet_block UNetSkipConnectionBlock(ngf, ngf * 2, in_planesNone, submoduleunet_block,norm_modenorm_mode)self.model UNetSkipConnectionBlock(out_planes, ngf, in_planesin_planes, submoduleunet_block,outermostTrue, norm_modenorm_mode)def construct(self, x):return self.model(x) 6. PatchGAN判别器 import mindspore.nn as nnclass ConvNormRelu(nn.Cell):def __init__(self,in_planes,out_planes,kernel_size4,stride2,alpha0.2,norm_modebatch,pad_modeCONSTANT,use_reluTrue,paddingNone):super(ConvNormRelu, self).__init__()norm nn.BatchNorm2d(out_planes)if norm_mode instance:norm nn.BatchNorm2d(out_planes, affineFalse)has_bias (norm_mode instance)if not padding:padding (kernel_size - 1) // 2if pad_mode CONSTANT:conv nn.Conv2d(in_planes, out_planes, kernel_size, stride, pad_modepad,has_biashas_bias, paddingpadding)layers [conv, norm]else:paddings ((0, 0), (0, 0), (padding, padding), (padding, padding))pad nn.Pad(paddingspaddings, modepad_mode)conv nn.Conv2d(in_planes, out_planes, kernel_size, stride, pad_modepad, has_biashas_bias)layers [pad, conv, norm]if use_relu:relu nn.ReLU()if alpha 0:relu nn.LeakyReLU(alpha)layers.append(relu)self.features nn.SequentialCell(layers)def construct(self, x):output self.features(x)return outputclass Discriminator(nn.Cell):def __init__(self, in_planes3, ndf64, n_layers3, alpha0.2, norm_modebatch):super(Discriminator, self).__init__()kernel_size 4layers [nn.Conv2d(in_planes, ndf, kernel_size, 2, pad_modepad, padding1),nn.LeakyReLU(alpha)]nf_mult ndffor i in range(1, n_layers):nf_mult_prev nf_multnf_mult min(2 ** i, 8) * ndflayers.append(ConvNormRelu(nf_mult_prev, nf_mult, kernel_size, 2, alpha, norm_mode, padding1))nf_mult_prev nf_multnf_mult min(2 ** n_layers, 8) * ndflayers.append(ConvNormRelu(nf_mult_prev, nf_mult, kernel_size, 1, alpha, norm_mode, padding1))layers.append(nn.Conv2d(nf_mult, 1, kernel_size, 1, pad_modepad, padding1))self.features nn.SequentialCell(layers)def construct(self, x, y):x_y ops.concat((x, y), axis1)output self.features(x_y)return output 7. Pix2Pix生成器和判别器初始化 g_in_planes 3 g_out_planes 3 g_ngf 64 g_layers 8 d_in_planes 6 d_ndf 64 d_layers 3 alpha 0.2 init_gain 0.02 init_type normalnet_generator UNetGenerator(in_planesg_in_planes, out_planesg_out_planes,ngfg_ngf, n_layersg_layers) for _, cell in net_generator.cells_and_names():if isinstance(cell, (nn.Conv2d, nn.Conv2dTranspose)):if init_type normal:cell.weight.set_data(init.initializer(init.Normal(init_gain), cell.weight.shape))elif init_type xavier:cell.weight.set_data(init.initializer(init.XavierUniform(init_gain), cell.weight.shape))elif init_type constant:cell.weight.set_data(init.initializer(0.001, cell.weight.shape))else:raise NotImplementedError(initialization method [%s] is not implemented % init_type)elif isinstance(cell, nn.BatchNorm2d):cell.gamma.set_data(init.initializer(ones, cell.gamma.shape))cell.beta.set_data(init.initializer(zeros, cell.beta.shape))net_discriminator Discriminator(in_planesd_in_planes, ndfd_ndf,alphaalpha, n_layersd_layers) for _, cell in net_discriminator.cells_and_names():if isinstance(cell, (nn.Conv2d, nn.Conv2dTranspose)):if init_type normal:cell.weight.set_data(init.initializer(init.Normal(init_gain), cell.weight.shape))elif init_type xavier:cell.weight.set_data(init.initializer(init.XavierUniform(init_gain), cell.weight.shape))elif init_type constant:cell.weight.set_data(init.initializer(0.001, cell.weight.shape))else:raise NotImplementedError(initialization method [%s] is not implemented % init_type)elif isinstance(cell, nn.BatchNorm2d):cell.gamma.set_data(init.initializer(ones, cell.gamma.shape))cell.beta.set_data(init.initializer(zeros, cell.beta.shape))class Pix2Pix(nn.Cell):Pix2Pix模型网络def __init__(self, discriminator, generator):super(Pix2Pix, self).__init__(auto_prefixTrue)self.net_discriminator discriminatorself.net_generator generatordef construct(self, reala):fakeb self.net_generator(reala)return fakeb 8. 训练 epoch_num 3 ckpt_dir results/ckpt dataset_size 400 val_pic_size 256 lr 0.0002 n_epochs 100 n_epochs_decay 100def get_lr():lrs [lr] * dataset_size * n_epochslr_epoch 0for epoch in range(n_epochs_decay):lr_epoch lr * (n_epochs_decay - epoch) / n_epochs_decaylrs [lr_epoch] * dataset_sizelrs [lr_epoch] * dataset_size * (epoch_num - n_epochs_decay - n_epochs)return Tensor(np.array(lrs).astype(np.float32))dataset ds.MindDataset(./dataset/dataset_pix2pix/train.mindrecord, columns_list[input_images, target_images], shuffleTrue, num_parallel_workers1) steps_per_epoch dataset.get_dataset_size() loss_f nn.BCEWithLogitsLoss() l1_loss nn.L1Loss()def forword_dis(reala, realb):lambda_dis 0.5fakeb net_generator(reala)pred0 net_discriminator(reala, fakeb)pred1 net_discriminator(reala, realb)loss_d loss_f(pred1, ops.ones_like(pred1)) loss_f(pred0, ops.zeros_like(pred0))loss_dis loss_d * lambda_disreturn loss_disdef forword_gan(reala, realb):lambda_gan 0.5lambda_l1 100fakeb net_generator(reala)pred0 net_discriminator(reala, fakeb)loss_1 loss_f(pred0, ops.ones_like(pred0))loss_2 l1_loss(fakeb, realb)loss_gan loss_1 * lambda_gan loss_2 * lambda_l1return loss_gand_opt nn.Adam(net_discriminator.trainable_params(), learning_rateget_lr(),beta10.5, beta20.999, loss_scale1) g_opt nn.Adam(net_generator.trainable_params(), learning_rateget_lr(),beta10.5, beta20.999, loss_scale1)grad_d value_and_grad(forword_dis, None, net_discriminator.trainable_params()) grad_g value_and_grad(forword_gan, None, net_generator.trainable_params())def train_step(reala, realb):loss_dis, d_grads grad_d(reala, realb)loss_gan, g_grads grad_g(reala, realb)d_opt(d_grads)g_opt(g_grads)return loss_dis, loss_ganif not os.path.isdir(ckpt_dir):os.makedirs(ckpt_dir)g_losses [] d_losses [] data_loader dataset.create_dict_iterator(output_numpyTrue, num_epochsepoch_num)for epoch in range(epoch_num):for i, data in enumerate(data_loader):start_time datetime.datetime.now()input_image Tensor(data[input_images])target_image Tensor(data[target_images])dis_loss, gen_loss train_step(input_image, target_image)end_time datetime.datetime.now()delta (end_time - start_time).microsecondsif i % 2 0:print(ms per step:{:.2f} epoch:{}/{} step:{}/{} Dloss:{:.4f} Gloss:{:.4f} .format((delta / 1000), (epoch 1), (epoch_num), i, steps_per_epoch, float(dis_loss), float(gen_loss)))d_losses.append(dis_loss.asnumpy())g_losses.append(gen_loss.asnumpy())if (epoch 1) epoch_num:mindspore.save_checkpoint(net_generator, ckpt_dir Generator.ckpt) 9. 推理 param_g load_checkpoint(ckpt_dir Generator.ckpt) load_param_into_net(net_generator, param_g) dataset ds.MindDataset(./dataset/dataset_pix2pix/train.mindrecord, columns_list[input_images, target_images], shuffleTrue) data_iter next(dataset.create_dict_iterator()) predict_show net_generator(data_iter[input_images]) plt.figure(figsize(10, 3), dpi140) for i in range(10):plt.subplot(2, 10, i 1)plt.imshow((data_iter[input_images][i].asnumpy().transpose(1, 2, 0) 1) / 2)plt.axis(off)plt.subplots_adjust(wspace0.05, hspace0.02)plt.subplot(2, 10, i 11)plt.imshow((predict_show[i].asnumpy().transpose(1, 2, 0) 1) / 2)plt.axis(off)plt.subplots_adjust(wspace0.05, hspace0.02) plt.show() Pix2Pix的学习过程让我深刻体会到了深度学习在图像处理领域的强大能力。通过训练一对相互竞争的神经网络——生成器与判别器Pix2Pix能够学习到输入图像与输出图像之间复杂的映射关系。这种端到端的学习方式无需人工设计复杂的特征提取与转换规则极大地简化了图像转换的流程同时也提高了转换结果的质量和多样性。
http://www.zqtcl.cn/news/282752/

相关文章:

  • 北京国互网网站建设公司东莞寮步搬家公司
  • 学校门户网站是什么意思做网站的意义大不大
  • 做网站卖酒网站内容建设的布局和结构
  • 效果图在哪个网站可以找比较好wordpress网站背景设置
  • 专业整站优化韩国设计公司网站
  • 网站建设与规划学的心得体会WordPress主题启用出现错误
  • 网站建设 资讯宁波东方论坛首页
  • 东莞网站制作有名 乐云践新郑州官方网
  • 网站开发经理具备什么知识调查问卷网站建设
  • 做购买网站企业宣传片制作拍摄
  • logo艺术字转换器徐州seo企业
  • 禹城网站建设公司湖州城市投资建设集团网站
  • 上海城乡住房建设厅网站asp网站怎么做301定向
  • 惠州免费网站建设上海家装10强名单
  • 新手学习做网站电子商务网站建设与维护实验报告
  • 网站建设制作设计推广上海职业技能培训机构
  • 网站不同颜色网站无障碍的建设方面空缺
  • 手机网站织梦模板it初学者做网站
  • asp做的网站怎么发布企业黄页网站源码
  • 在云服务器上搭建网站云端视差wordpress企业主题破解版
  • 英德市住房城乡建设网站图片设计公司
  • 网站建设分金手指专业十七wordpress 审核
  • 怎么欣赏一个网站设计图网站传送门怎么做
  • 网站有什么组成上海做推广网站
  • 网站上传大马后怎么做管理咨询公司口号
  • 网站集约整合建设交流雅虎网站提交入口
  • 网站安全建设必要性网站登录页面
  • 鄂州网站推广做区块链在哪个网站
  • 网站什么内容网站安全性设计
  • 免费动态域名申请seo发布网站