GAN的pytorch版本(线性网络)
程序员文章站
2022-03-09 13:06:37
...
import torch
import torchvision
from torchvision import transforms
from torchvision.utils import save_image
from torch import nn
from torch.autograd import Variable
from torch import optim
import os
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5,0.5,0.5),std=(0.5,0.5,0.5))
])
mnist = torchvision.datasets.MNIST(root='./data',train=True,download=False,transform=transform)
dataloader = torch.utils.data.DataLoader(mnist,batch_size=100,shuffle=True)
class Dnet(nn.Module):
def __init__(self):
super(Dnet,self).__init__()
self.network = nn.Sequential(
nn.Linear(784,256),
nn.LeakyReLU(),
nn.Linear(256,1),
nn.Sigmoid()
)
def forward(self,x):
return self.network(x)
class Gnet(nn.Module):
def __init__(self):
super(Gnet,self).__init__()
self.network = nn.Sequential(
nn.Linear(128,256),
nn.ReLU(),
nn.Linear(256,784),
nn.Tanh()
)
def forward(self,x):
return self.network(x)
def to_img(x):
y = (x+1)*0.5
y = y.clamp(0,1)
y = y.view(-1,1,28,28)
return y
class Net:
def __init__(self):
self.dnet = Dnet()
self.gnet = Gnet()
self.dnet = self.dnet.cuda()
self.gnet = self.gnet.cuda()
self.Loss = nn.BCELoss()
self.d_optimizer = optim.Adam(self.dnet.parameters(), lr=0.0002)
self.g_optimizer = optim.Adam(self.gnet.parameters(), lr=0.0002)
def forward(self,real_x,fack_x):
self.real_d_out = self.dnet(real_x)
g_out = self.gnet(fack_x)
self.g_d_out = net.dnet(g_out)
def backward(self,pos_y,nega_y,fack_xs):
d_out_loss = self.Loss(self.real_d_out, pos_y)
g_d_loss = self.Loss(self.g_d_out, nega_y)
self.d_loss = d_out_loss + g_d_loss
self.d_optimizer.zero_grad()
self.d_loss.backward()
self.d_optimizer.step()
self.fack_g_out = self.gnet(fack_xs)
fack_g_d_out = self.dnet(self.fack_g_out)
self.g_loss = self.Loss(fack_g_d_out, pos_y)
self.g_optimizer.zero_grad()
self.g_loss.backward()
self.g_optimizer.step()
if __name__ == '__main__':
if not os.path.exists('img_'):
os.mkdir('img_')
net = Net()
for i in range(100):
for x,y in dataloader:
x = x.view(x.size(0),-1)
real_x = Variable(x).cuda()
fack_x = Variable(torch.randn(100,128)).cuda()
pos_y = Variable(torch.ones(100)).cuda()
nega_y = Variable(torch.zeros(100)).cuda()
fack_xs = Variable(torch.randn(100, 128)).cuda()
net.forward(real_x,fack_x)
net.backward(pos_y,nega_y,fack_xs)
img = to_img(net.fack_g_out.data)
print(net.d_loss.item(),net.g_loss.item())
save_image(img,'./img/fake_images-{}.png'.format(i+1))
上一篇: 用“ 快速卷积”来加速‘卷积’
推荐阅读
-
Linux服务器配置GPU版本的pytorch Torchvision TensorFlow
-
Pytorch1.5.1版本安装的方法步骤
-
Ubuntu18.04 安装 显卡驱动 Nvidia Driver CUDA CUDNN 与GPU 版本的Pytorch
-
Linux环境下GPU版本的pytorch安装
-
Pytorch无法运行在GPU上,提示显卡版本太低的解决方法
-
pytorch 1.7训练保存的模型在1.4低版本无法加载:frame #63: <unknown function> + 0x1db3e0 (0x55ba98ddd3e0 in /data/user
-
【pytorch基础笔记五】基于条件GAN的色彩填充
-
Linux服务器配置GPU版本的pytorch Torchvision TensorFlow
-
Pytorch1.5.1版本安装的方法步骤
-
Linux环境下GPU版本的pytorch安装