Return:
emb (FloatTensor): len x batch x input_size
word = self.word_lut(src_input[:, :, 0])
emb = word
if self.feature_dicts:
features = [feature_lut(src_input[:, :, j+1])
for j, feature_lut in enumerate(self.feature_luts)]
// Apply one MLP layer.
emb = self.activation(
self.linear(torch.cat([word] + features, -1)))
if self.positional_encoding:
emb = emb + Variable(self.pe[:emb.size(0), :1, :emb.size(2)]
.expand_as(emb))
emb = self.dropout(emb)
return emb
class Encoder(nn.Module):
After Change
emb (FloatTensor): len x batch x sum of feature embedding sizes
feat_inputs = (feat.squeeze(2) for feat in src_input.split(1, dim=2))
features = [lut(feat) for lut, feat in zip(self.emb_luts, feat_inputs)]
emb = self.merge(features)
return emb