欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

GAN的pytorch版本(线性网络)

程序员文章站 2022-03-09 13:06:37
...
import torch
import torchvision
from torchvision import transforms
from torchvision.utils import save_image
from torch import nn
from torch.autograd import Variable
from torch import optim
import os

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.5,0.5,0.5),std=(0.5,0.5,0.5))
])

mnist = torchvision.datasets.MNIST(root='./data',train=True,download=False,transform=transform)
dataloader = torch.utils.data.DataLoader(mnist,batch_size=100,shuffle=True)

class Dnet(nn.Module):
    def __init__(self):
        super(Dnet,self).__init__()
        self.network = nn.Sequential(
            nn.Linear(784,256),
            nn.LeakyReLU(),
            nn.Linear(256,1),
            nn.Sigmoid()
        )
    def forward(self,x):
        return self.network(x)
class Gnet(nn.Module):
    def __init__(self):
        super(Gnet,self).__init__()
        self.network = nn.Sequential(
            nn.Linear(128,256),
            nn.ReLU(),
            nn.Linear(256,784),
            nn.Tanh()
        )
    def forward(self,x):
        return self.network(x)

def to_img(x):
    y = (x+1)*0.5
    y = y.clamp(0,1)
    y = y.view(-1,1,28,28)
    return y

class Net:
    def __init__(self):
        self.dnet = Dnet()
        self.gnet = Gnet()
        self.dnet = self.dnet.cuda()
        self.gnet = self.gnet.cuda()
        self.Loss = nn.BCELoss()
        self.d_optimizer = optim.Adam(self.dnet.parameters(), lr=0.0002)
        self.g_optimizer = optim.Adam(self.gnet.parameters(), lr=0.0002)
    def forward(self,real_x,fack_x):
        self.real_d_out = self.dnet(real_x)
        g_out = self.gnet(fack_x)
        self.g_d_out = net.dnet(g_out)
    def backward(self,pos_y,nega_y,fack_xs):
        d_out_loss = self.Loss(self.real_d_out, pos_y)
        g_d_loss = self.Loss(self.g_d_out, nega_y)
        self.d_loss = d_out_loss + g_d_loss
        self.d_optimizer.zero_grad()
        self.d_loss.backward()
        self.d_optimizer.step()
        self.fack_g_out = self.gnet(fack_xs)
        fack_g_d_out = self.dnet(self.fack_g_out)
        self.g_loss = self.Loss(fack_g_d_out, pos_y)
        self.g_optimizer.zero_grad()
        self.g_loss.backward()
        self.g_optimizer.step()

if __name__ == '__main__':
    if not os.path.exists('img_'):
        os.mkdir('img_')
    net = Net()

    for i in range(100):
        for x,y in dataloader:
            x = x.view(x.size(0),-1)
            real_x = Variable(x).cuda()
            fack_x = Variable(torch.randn(100,128)).cuda()
            pos_y = Variable(torch.ones(100)).cuda()
            nega_y = Variable(torch.zeros(100)).cuda()
            fack_xs = Variable(torch.randn(100, 128)).cuda()
            net.forward(real_x,fack_x)
            net.backward(pos_y,nega_y,fack_xs)
            img = to_img(net.fack_g_out.data)
            print(net.d_loss.item(),net.g_loss.item())
            save_image(img,'./img/fake_images-{}.png'.format(i+1))

 

相关标签: GAN