其他分享
首页 > 其他分享> > GAN

GAN

作者:互联网

  1 import torch
  2 import torch.nn as nn
  3 import matplotlib.pyplot as plt
  4 import numpy as np
  5 
  6 # 参考 https://blog.csdn.net/jizhidexiaoming/article/details/96485095
  7 
  8 torch.manual_seed(1)
  9 np.random.seed(1)
 10 
 11 LR_G = 0.0001
 12 LR_D = 0.0001
 13 BATCH_SIZE = 64
 14 N_IDEAS = 5
 15 
 16 ART_COMPONETS = 15
 17 PAINT_POINTS = np.vstack([np.linspace(-1,1,ART_COMPONETS) for _ in range(BATCH_SIZE)]) # -1至1之间得数,产生ART_COMPONETS
 18 # print(PAINT_POINTS[0])
 19 
 20 
 21 # plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound')    #2 * x^2 + 1
 22 # plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound')    #   x^2
 23 # plt.legend(loc='upper right')           #标签位置
 24 # plt.show()
 25 
 26 
 27 def artist_work():
 28     a = np.random.uniform(1, 2, size=BATCH_SIZE)[:, np.newaxis] # 64*1
 29     paints = a * np.power(PAINT_POINTS,2) + (a-1)
 30     paints = torch.from_numpy(paints).float()
 31     return paints
 32 
 33 # a=np.random.uniform(1,2,size=BATCH_SIZE)[:,np.newaxis]
 34 # # b = np.random.uniform(1,2,size=BATCH_SIZE)
 35 # # print(a)
 36 # paints = a * np.power(PAINT_POINTS,2) + (a-1)
 37 # # print(paints)
 38 
 39 G = nn.Sequential(
 40     nn.Linear(N_IDEAS,128),
 41     nn.ReLU(),
 42     nn.Linear(128,ART_COMPONETS)
 43 )
 44 D = nn.Sequential(
 45     nn.Linear(ART_COMPONETS,128),
 46     nn.ReLU(),
 47     nn.Linear(128,1),
 48     nn.Sigmoid()
 49 )
 50 
 51 real_label = torch.ones(BATCH_SIZE).reshape(-1,1)
 52 fake_label = torch.zeros(BATCH_SIZE).reshape(-1,1)
 53 
 54 criterion = nn.BCELoss()  # 是单目标二分类交叉熵函数
 55 optimizer_G = torch.optim.Adam(G.parameters(),lr=LR_G)
 56 optimizer_D = torch.optim.Adam(D.parameters(),lr=LR_D)
 57 
 58 plt.ion()
 59 
 60 for step in range(10000):
 61     artist_painting = artist_work()
 62     G_idea = torch.randn(BATCH_SIZE,N_IDEAS)
 63 
 64     G_paintings = G(G_idea)
 65 
 66     pro_atrist0 = D(artist_painting)
 67     pro_atrist1 = D(G_paintings)
 68 
 69     G_loss = criterion(pro_atrist1, real_label) # 让生成尽可能的为正例
 70     D_loss = criterion(pro_atrist0, real_label) + criterion(pro_atrist1, fake_label) # 可以很好的区分正例和反例
 71     # G_loss = -1/torch.mean(torch.log(1.-pro_atrist1)) # -torch.mean(torch.log(pro_atrist1))也可以
 72     # D_loss = -torch.mean(torch.log(pro_atrist0)+torch.log(1-pro_atrist1))
 73 
 74 
 75     # optimizer_D.zero_grad()
 76     # D_loss.backward(retain_graph=True )
 77     # optimizer_D.step()
 78     #
 79     # optimizer_G.zero_grad()
 80     # G_loss.backward()
 81     # optimizer_G.step()
 82 
 83     optimizer_G.zero_grad()
 84     G_loss.backward(retain_graph=True)
 85 
 86     optimizer_D.zero_grad()
 87     D_loss.backward( )
 88     
 89     optimizer_D.step()
 90     optimizer_G.step()
 91 
 92 
 93     if step % 200 == 0:  # plotting
 94         plt.cla()
 95         plt.plot(PAINT_POINTS[0], G_paintings.data.numpy()[0], c='#4AD631', lw=3, label='Generated painting',)
 96         plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound')
 97         plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound')
 98         plt.text(-.5, 2.3, 'D accuracy=%.2f (0.5 for D to converge)' % pro_atrist0.data.numpy().mean(), fontdict={'size': 13})
 99         # plt.text(-.5, 2, 'G_loss= %.2f ' % G_loss.data.numpy(), fontdict={'size': 13})
100 
101         plt.ylim((0, 3));plt.legend(loc='upper right', fontsize=10);plt.draw();plt.pause(0.1)
102 
103 plt.ioff()
104 plt.show()

 

标签:plt,nn,PAINT,torch,GAN,POINTS,np
来源: https://www.cnblogs.com/zxcayumi/p/15915557.html