// todo tf2: hardcoding for a single output feature - need to generalize
of_name = self.hyperparameters["output_features"][0]["name"]
output_feature = self.output_features[of_name]
if is_on_master():
progress_bar = tqdm(
desc="Evaluation" if name is None
else "Evaluation {0: <5.5}".format(name),
total=batcher.steps_per_epoch,
file=sys.stdout,
disable=is_progressbar_disabled()
)
while not batcher.last_batch():
batch = batcher.next_batch()
// todo: tf2 clean up code
// result = session.run(
// output_nodes,
// feed_dict=self.feed_dict(
// batch,
// regularization_lambda=regularization_lambda,
// dropout_rate=0.0,
// is_training=is_training
// )
// )
// todo: tf2 need to rationalize to reduce redundant code
// create array for predictors
// todo: tf2 need to handle case of single predictor, e.g., image
predictors = reduce(lambda x, y: np.vstack((x, y)),
[batch[f["name"]] for f in
self.hyperparameters["input_features"]]).T
// create array for target
// is there more than one target
if len(self.hyperparameters["output_features"]) > 1:
target = reduce(lambda x, y: np.vstack((x, y)),
[batch[f["name"]] for f in
self.hyperparameters["output_features"]]).T
else:
of_name = self.hyperparameters["output_features"][0]["name"]
output_feature = self.output_features[of_name]
target = batch[
self.hyperparameters["output_features"][0]["name"]]
result = self.evaluation_step(
self.keras_model,
output_feature,
predictors,
target
)
// output_stats, seq_set_size = self.update_output_stats_batch(
// output_stats,
// seq_set_size,
// collect_predictions,
// only_predictions,
// result
// )
if is_on_master():
progress_bar.update(1)
if is_on_master():
progress_bar.close()
// if self.horovod:
// output_stats, seq_set_size = self.merge_workers_outputs(
// output_stats,
// seq_set_size
// )
//
// output_stats = self.update_output_stats(
// output_stats,
// set_size,
// seq_set_size,
// collect_predictions,
// only_predictions
// )
// if "combined" in output_stats and LOSS in output_stats["combined"]:
// regularization = session.run(
// [self.regularization_loss],
// feed_dict={self.regularization_lambda: regularization_lambda}
// )[0]
// output_stats["combined"][LOSS] += regularization
// todo: tf2 debugging
template = f"Dataset {name}:"
for measure, measure_fn in output_feature.measure_functions.items():
if measure_fn is not None: // todo tf2 test is needed only during development
template += f" {measure}: {measure_fn.result()}"
print(template)
fake_stats = OrderedDict(
[("y", OrderedDict([("loss", [9489.847173455057]),