@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_dp_resume(tmpdir):
Make sure DP continues training correctly.
hparams = EvalModelTemplate.get_default_hparams()
model = EvalModelTemplate(**hparams)
trainer_options = dict(max_epochs=1, gpus=2, accelerator="dp", default_root_dir=tmpdir)
// get logger
After Change
self.on_train_start_called = True
// new model
model = CustomModel()
// fit new model which should load hpc weights
new_trainer.fit(model)
assert model.on_train_start_called