trainer.set_learning_rate(trainer.learning_rate * 0.1)
for batch_i, (features, label) in enumerate(data_iter):
with autograd.record():
output = net(features)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
if batch_i * batch_size % log_interval == 0:
After Change
dataset = gdata.ArrayDataset(features, labels)
data_iter = gdata.DataLoader(dataset, batch_size, shuffle=True)
loss = gloss.L2Loss()
ls = [loss(net(features), labels).mean().asnumpy()]
for epoch in range(1, num_epochs + 1):
// Decay the learning rate.
if decay_epoch and epoch > decay_epoch: