Generative Adversarial Networks(GAN) code review
1. Reference Video
다음 강의영상을 참고하여 포스팅을 작성했습니다. 교수님께 정말 감사합니다.
2. GAN with CNN descriptor
- 간단한 CNN 을 Descriptor 로 한 GAN모델 with cuda
# 간단한 CNN 을 Descriptor 로 한 GAN모델 with cuda
import torch
import torchvision
import fastai.vision.all
import matplotlib.pyplot as plt
from matplotlib import animation
plt.rcParams["animation.html"] = "jshtml"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # GPU가 있으면 GPU 사용, 없으면 CPU 사용
def plot_loss(loss_history_police, loss_history_faker, ax=None):
epochs = range(len(loss_history_police))
if ax is None:
fig, ax = plt.subplots()
ax.plot(epochs, loss_history_police, label="Discriminator Loss (net_police)")
ax.plot(epochs, loss_history_faker, label="Generator Loss (net_faker)")
ax.legend()
ax.set_xlabel("Epochs")
ax.set_ylabel("Loss")
ax.set_title("GAN Losses Over Time")
def learn_and_record(net_police, net_faker, loss_fn, optimizr_police, optimizr_faker, X_real, y_real, y_fake):
loss_history_police = []
loss_history_faker = []
fake_images_history = []
for epoc in range(1000):
## 판별자 (net_police) 훈련
Noise = torch.randn(6131, 4, device=device) # GPU로 생성
X_fake = net_faker(Noise).detach()
# 진짜 이미지와 가짜 이미지에 대한 예측
yhat_real = net_police(X_real)
yhat_fake = net_police(X_fake)
# 손실 계산
loss_police = loss_fn(yhat_real, y_real) + loss_fn(yhat_fake, y_fake)
loss_police.backward()
optimizr_police.step()
optimizr_police.zero_grad()
# 생성자 (net_faker) 훈련
Noise = torch.randn(6131, 4, device=device) # GPU로 생성
X_fake = net_faker(Noise)
# 가짜 이미지에 대한 예측
yhat_fake = net_police(X_fake)
loss_faker = loss_fn(yhat_fake, y_real)
loss_faker.backward()
optimizr_faker.step()
optimizr_faker.zero_grad()
# 매 100번의 epoch마다 손실과 생성된 이미지를 기록
if epoc % 100 == 0:
loss_history_police.append(loss_police.item())
loss_history_faker.append(loss_faker.item())
fake_images_history.append(X_fake[:10].detach().cpu()) # 일부 가짜 이미지 저장
return loss_history_police, loss_history_faker, fake_images_history
def show_animation(net_police, net_faker, loss_fn, optimizr_police, optimizr_faker, X_real, y_real, y_fake):
loss_history_police, loss_history_faker, fake_images_history = learn_and_record(
net_police, net_faker, loss_fn, optimizr_police, optimizr_faker, X_real, y_real, y_fake
)
fig = plt.figure(figsize=(10, 5))
ax1 = fig.add_subplot(1, 2, 1) # Loss 그래프
ax2 = fig.add_subplot(1, 2, 2) # 생성된 이미지
# Loss 그래프 업데이트
def animate(epoc):
ax1.clear()
plot_loss(loss_history_police[:epoc], loss_history_faker[:epoc], ax1)
ax2.clear()
ax2.imshow(torchvision.utils.make_grid(fake_images_history[epoc], nrow=5).permute(1, 2, 0))
fig.suptitle(f"Epoch {epoc * 100}")
ani = animation.FuncAnimation(fig, animate, frames=len(fake_images_history), interval=200)
plt.close()
return ani
path = fastai.data.external.untar_data(fastai.data.external.URLs.MNIST)
X_real = torch.stack([torchvision.io.read_image(str(l)) for l in (path/'training/3').ls()],axis=0).float().div(255).to(device) # GPU로 이동
y_real = torch.tensor([0]*6131, device=device).reshape(-1,1).float() # real GPU로 이동
y_fake = torch.tensor([1]*6131, device=device).reshape(-1,1).float() # fake GPU로 이동
class Reshape2828(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self,X):
return X.reshape(-1,1,28,28)
torch.manual_seed(43052)
net_police = torch.nn.Sequential(
torch.nn.Conv2d(1,16,kernel_size=(5,5)),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=(2,2)),
torch.nn.Conv2d(16,4,kernel_size=(5,5)),
torch.nn.Flatten(),
torch.nn.Linear(8*8*4,1),
torch.nn.Sigmoid()
).to(device) # GPU로 이동
net_faker = torch.nn.Sequential(
torch.nn.Linear(in_features=4, out_features=64), # (n,4) -> (n,64)
torch.nn.ReLU(),
torch.nn.Linear(in_features=64, out_features=64), # (n,64) -> (n,64)
torch.nn.ReLU(),
torch.nn.Linear(in_features=64, out_features=784), # (n,64) -> (n,784)
torch.nn.Sigmoid(),
Reshape2828()
).to(device) # GPU로 이동
bce = torch.nn.BCELoss().to(device) # 손실 함수도 GPU로 이동
optimizr_police = torch.optim.Adam(net_police.parameters(),lr=0.001,betas=(0.5,0.999))
optimizr_faker = torch.optim.Adam(net_faker.parameters(),lr=0.0002,betas=(0.5,0.999))
show_animation(net_police, net_faker, bce, optimizr_police, optimizr_faker, X_real, y_real, y_fake)
Noise = torch.randn(6131, 4, device=device) # GPU로 생성
X_fake = net_faker(Noise[0])
plt.imshow(X_fake.to("cpu").data.reshape(28,28),cmap="gray") # fake image
3. GAN with DNN descriptor
# DNN 을 Descriptor 로 한 GAN모델 with cuda
import torch
import torchvision
import fastai.vision.all
import matplotlib.pyplot as plt
from matplotlib import animation
plt.rcParams["animation.html"] = "jshtml"
# GPU 사용 설정
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def plot_loss(loss_history_police, loss_history_faker, ax=None):
epochs = range(len(loss_history_police))
if ax is None:
fig, ax = plt.subplots()
ax.plot(epochs, loss_history_police, label="Discriminator Loss (net_police)")
ax.plot(epochs, loss_history_faker, label="Generator Loss (net_faker)")
ax.legend()
ax.set_xlabel("Epochs")
ax.set_ylabel("Loss")
ax.set_title("GAN Losses Over Time")
def learn_and_record(net_police, net_faker, loss_fn, optimizr_police, optimizr_faker, X_real, y_real, y_fake):
loss_history_police = []
loss_history_faker = []
fake_images_history = []
for epoc in range(1000):
## 판별자 (net_police) 훈련
Noise = torch.randn(6131, 4, device=device) # Noise도 GPU로 이동
X_fake = net_faker(Noise).detach()
# 진짜 이미지와 가짜 이미지에 대한 예측
yhat_real = net_police(X_real)
yhat_fake = net_police(X_fake)
# 손실 계산
loss_police = loss_fn(yhat_real, y_real) + loss_fn(yhat_fake, y_fake)
loss_police.backward()
optimizr_police.step()
optimizr_police.zero_grad()
# 생성자 (net_faker) 훈련
Noise = torch.randn(6131, 4, device=device) # Noise도 GPU로 이동
X_fake = net_faker(Noise)
# 가짜 이미지에 대한 예측
yhat_fake = net_police(X_fake)
loss_faker = loss_fn(yhat_fake, y_real)
loss_faker.backward()
optimizr_faker.step()
optimizr_faker.zero_grad()
# 매 100번의 epoch마다 손실과 생성된 이미지를 기록
if epoc % 100 == 0:
loss_history_police.append(loss_police.item())
loss_history_faker.append(loss_faker.item())
fake_images_history.append(X_fake[:10].detach().cpu()) # 일부 가짜 이미지 저장
return loss_history_police, loss_history_faker, fake_images_history
def show_animation(net_police, net_faker, loss_fn, optimizr_police, optimizr_faker, X_real, y_real, y_fake):
loss_history_police, loss_history_faker, fake_images_history = learn_and_record(
net_police, net_faker, loss_fn, optimizr_police, optimizr_faker, X_real, y_real, y_fake
)
fig = plt.figure(figsize=(10, 5))
ax1 = fig.add_subplot(1, 2, 1) # Loss 그래프
ax2 = fig.add_subplot(1, 2, 2) # 생성된 이미지
# Loss 그래프 업데이트
def animate(epoc):
ax1.clear()
plot_loss(loss_history_police[:epoc], loss_history_faker[:epoc], ax1)
ax2.clear()
ax2.imshow(torchvision.utils.make_grid(fake_images_history[epoc], nrow=5).permute(1, 2, 0))
fig.suptitle(f"Epoch {epoc * 100}")
ani = animation.FuncAnimation(fig, animate, frames=len(fake_images_history), interval=200)
plt.close()
return ani
# 데이터셋을 GPU로 이동
path = fastai.data.external.untar_data(fastai.data.external.URLs.MNIST)
X_real = torch.stack([torchvision.io.read_image(str(l)) for l in (path/'training/3').ls()],axis=0).float().div(255).to(device)
class Reshape2828(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self,X):
return X.reshape(-1,1,28,28)
torch.manual_seed(43052)
# 판별자 모델을 GPU로 이동
net_police = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(in_features=784, out_features=30),
torch.nn.ReLU(),
# torch.nn.Linear(in_features=30, out_features=30),
# torch.nn.ReLU(),
torch.nn.Linear(in_features=30, out_features=1),
torch.nn.Sigmoid()
).to(device)
# 생성자 모델을 GPU로 이동
net_faker = torch.nn.Sequential(
torch.nn.Linear(in_features=4, out_features=64), # (n,4) -> (n,64)
torch.nn.ReLU(),
torch.nn.Linear(in_features=64, out_features=64), # (n,64) -> (n,64)
torch.nn.ReLU(),
torch.nn.Linear(in_features=64, out_features=784), # (n,64) -> (n,784)
torch.nn.Sigmoid(),
Reshape2828()
).to(device)
# 레이블을 GPU로 이동
y_real = torch.tensor([0]*6131, device=device).reshape(-1,1).float() # real
y_fake = torch.tensor([1]*6131, device=device).reshape(-1,1).float() # fake
# 손실 함수도 GPU로 이동
bce = torch.nn.BCELoss().to(device)
# 최적화 함수 설정
optimizr_police = torch.optim.Adam(net_police.parameters(), lr=0.001, betas=(0.5, 0.999))
optimizr_faker = torch.optim.Adam(net_faker.parameters(), lr=0.0002, betas=(0.5, 0.999))
# 애니메이션 실행
show_animation(net_police, net_faker, bce, optimizr_police, optimizr_faker, X_real, y_real, y_fake)
Noise = torch.randn(6131, 4, device=device) # GPU로 생성
X_fake = net_faker(Noise[0])
plt.imshow(X_fake.to("cpu").data.reshape(28,28),cmap="gray")
Leave a comment