self.l_pix_w = train_opt["pixel_weight"]
// optimizers
self.optimizers = []
wd_G = train_opt["weight_decay_G"] if train_opt["weight_decay_G"] else 0
optim_params = []
for k, v in self.netG.named_parameters(): // can optimize for a part of the model
if v.requires_grad:
optim_params.append(v)
else:
print("WARNING: params [%s] will not optimize." % k)
self.optimizer_G = torch.optim.Adam(optim_params,
lr=train_opt["lr_G"], weight_decay=wd_G)
self.optimizers.append(self.optimizer_G)
// schedulers
self.schedulers = []
if train_opt["lr_scheme"] == "MultiStepLR":
for optimizer in self.optimizers:
self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, \
train_opt["lr_steps"], train_opt["lr_gamma"]))