144b5afdd5d9262a5a9a9fdd9afa56a14b629ba1,texar/modules/decoders/transformer_decoders.py,TransformerDecoder,_build,#TransformerDecoder#,78

Before Change


                    self.dec += dec

                with tf.variable_scope("encdec_attention"):
                    dec = layers.layer_normalize(self.dec)
                    dec = layers.multihead_attention(
                        queries=dec,
                        keys=encoder_output,
                        queries_valid_length=tgt_length,
                        keys_valid_length=src_length,
                        num_units=self._hparams.embedding.dim,
                        num_heads=self._hparams.num_heads,
                        dropout_rate=self._hparams.dropout,
                        causality=False,
                        scope="multihead_attention")
                    dec = tf.layers.dropout(
                        dec,
                        rate=self._hparams.dropout,
                        training=context.is_train())
                    self.dec += dec

                poswise_network = FeedForwardNetwork(hparams=self._hparams["poswise_feedforward"])
                with tf.variable_scope(poswise_network.variable_scope):
                    dec = layers.layer_normalize(self.dec)
                    dec = poswise_network(dec)
                    dec = tf.layers.dropout(
                        dec,
                        rate=self._hparams.dropout,
                        training=context.is_train())
                    self.dec += dec

        // share the projection weight with word embedding

After Change


                        dropout_rate=self._hparams.dropout,
                        causality=True,
                        scope="self_attention")
                    self.dec = self.dec + tf.layers.dropout(
                        selfatt_output,
                        rate=self._hparams.dropout,
                        training=context.is_train()
                    )

                with tf.variable_scope("encdec_attention"):
                    encdec_output = layers.multihead_attention(
                        queries=layers.layer_normalize(self.dec),
                        keys=encoder_output,
                        queries_valid_length=tgt_length,
                        keys_valid_length=src_length,
                        num_units=self._hparams.embedding.dim,
                        num_heads=self._hparams.num_heads,
                        dropout_rate=self._hparams.dropout,
                        causality=False,
                        scope="multihead_attention")
                    self.dec = self.dec + tf.layers.dropout(encdec_output, \
                        rate=self._hparams.dropout,
                        training=context.is_train()
                    )
                poswise_network = FeedForwardNetwork(hparams=self._hparams["poswise_feedforward"])
                with tf.variable_scope(poswise_network.variable_scope):
                    sub_output = tf.layers.dropout(
                        poswise_network(layers.layer_normalize(self.dec)),
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 4

Non-data size: 7

Instances


Project Name: asyml/texar
Commit Name: 144b5afdd5d9262a5a9a9fdd9afa56a14b629ba1
Time: 2018-03-19
Author: zhiting.hu@petuum.com
File Name: texar/modules/decoders/transformer_decoders.py
Class Name: TransformerDecoder
Method Name: _build


Project Name: asyml/texar
Commit Name: 144b5afdd5d9262a5a9a9fdd9afa56a14b629ba1
Time: 2018-03-19
Author: zhiting.hu@petuum.com
File Name: texar/modules/decoders/transformer_decoders.py
Class Name: TransformerDecoder
Method Name: _build


Project Name: asyml/texar
Commit Name: 144b5afdd5d9262a5a9a9fdd9afa56a14b629ba1
Time: 2018-03-19
Author: zhiting.hu@petuum.com
File Name: texar/modules/encoders/transformer_encoders.py
Class Name: TransformerEncoder
Method Name: _build