self.l_gp_w = train_opt["gp_weigth"]
// optimizers
self.optimizers = [] // G and D
// G
wd_G = train_opt["weight_decay_G"] if train_opt["weight_decay_G"] else 0
optim_params = []
for k, v in self.netG.named_parameters(): // can optimize for a part of the model
if v.requires_grad:
optim_params.append(v)
else:
print("WARNING: params [%s] will not optimize." % k)
self.optimizer_G = torch.optim.Adam(optim_params, lr=train_opt["lr_G"], \
weight_decay=wd_G, betas=(train_opt["beta1_G"], 0.999))
self.optimizers.append(self.optimizer_G)
// D
wd_D = train_opt["weight_decay_D"] if train_opt["weight_decay_D"] else 0
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=train_opt["lr_D"], \
weight_decay=wd_D, betas=(train_opt["beta1_D"], 0.999))
self.optimizers.append(self.optimizer_D)
// schedulers
self.schedulers = []
if train_opt["lr_scheme"] == "MultiStepLR":
for optimizer in self.optimizers:
self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, \
train_opt["lr_steps"], train_opt["lr_gamma"]))
After Change
if v.requires_grad:
optim_params.append(v)
else:
print("WARNING: params [{:s}] will not optimize.".format(k))
self.optimizer_G = torch.optim.Adam(optim_params, lr=train_opt["lr_G"], \
weight_decay=wd_G, betas=(train_opt["beta1_G"], 0.999))
self.optimizers.append(self.optimizer_G)
// D