39a28aba95b0d8eeb56f7a17b7dad140e601b591,agents/clipped_ppo_agent.py,ClippedPPOAgent,train_network,#ClippedPPOAgent#Any#Any#,80

Before Change


                    actions = np.expand_dims(actions, -1)

                // get old policy probabilities and distribution
                result = self.main_network.target_network.predict([current_states])
                old_policy_distribution = result[1:]

                // calculate gradients and apply on both the local policy network and on the global policy network
                fetches = [self.main_network.online_network.output_heads[1].kl_divergence,
                           self.main_network.online_network.output_heads[1].entropy]

                total_return = np.expand_dims(total_return, -1)
                value_targets = gae_based_value_targets if self.tp.agent.estimate_value_using_gae else total_return
                total_loss, policy_losses, unclipped_grads, fetch_result =\
                    self.main_network.online_network.accumulate_gradients(
                        [current_states] + [actions] + old_policy_distribution,
                        [total_return, advantages], additional_fetches=fetches)

                self.value_targets.add_sample(value_targets)

After Change



                total_return = np.expand_dims(total_return, -1)
                value_targets = gae_based_value_targets if self.tp.agent.estimate_value_using_gae else total_return
                inputs = copy.copy(current_states)
                // TODO: why is this output 0 and not output 1?
                inputs["output_0_0"] = actions
                // TODO: does old_policy_distribution really need to be represented as a list?
                // A: yes it does, in the event of discrete controls, it has just a mean
                // otherwise, it has both a mean and standard deviation
                for input_index, input in enumerate(old_policy_distribution):
                    inputs["output_0_{}".format(input_index + 1)] = input
                // print("old_policy_distribution.shape", len(old_policy_distribution))
                total_loss, policy_losses, unclipped_grads, fetch_result =\
                    self.main_network.online_network.accumulate_gradients(
                        inputs, [total_return, advantages], additional_fetches=fetches)
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 11

Instances


Project Name: NervanaSystems/coach
Commit Name: 39a28aba95b0d8eeb56f7a17b7dad140e601b591
Time: 2018-02-21
Author: zach.dwiel@intel.com
File Name: agents/clipped_ppo_agent.py
Class Name: ClippedPPOAgent
Method Name: train_network


Project Name: NervanaSystems/coach
Commit Name: 8248caf35eeb43046f2b28937627d43cbf950c9a
Time: 2018-02-21
Author: zach.dwiel@intel.com
File Name: agents/ppo_agent.py
Class Name: PPOAgent
Method Name: train_policy_network


Project Name: NervanaSystems/coach
Commit Name: 39a28aba95b0d8eeb56f7a17b7dad140e601b591
Time: 2018-02-21
Author: zach.dwiel@intel.com
File Name: agents/clipped_ppo_agent.py
Class Name: ClippedPPOAgent
Method Name: train_network


Project Name: NervanaSystems/coach
Commit Name: ee6e0bdc3b91b5fb738e8278898ceb49e7080341
Time: 2018-02-21
Author: zach.dwiel@intel.com
File Name: agents/ppo_agent.py
Class Name: PPOAgent
Method Name: train_value_network