| import torch |
| import torch.nn as nn |
| import torch |
| from torch.autograd import Variable |
| import copy |
|
|
| class Beam(object): |
| def __init__(self, size, sos, eos): |
| self.size = size |
| self.tt = torch.cuda |
| |
| self.scores = self.tt.FloatTensor(size).zero_() |
| |
| self.prevKs = [] |
| |
| self.nextYs = [self.tt.LongTensor(size) |
| .fill_(0)] |
| self.nextYs[0][:] = sos |
| |
| self._eos = eos |
| self.eosTop = False |
| |
| self.finished = [] |
|
|
| def getCurrentState(self): |
| "Get the outputs for the current timestep." |
| batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1) |
| return batch |
|
|
| def getCurrentOrigin(self): |
| "Get the backpointers for the current timestep." |
| return self.prevKs[-1] |
|
|
| def advance(self, wordLk): |
| """ |
| Given prob over words for every last beam `wordLk` and attention |
| `attnOut`: Compute and update the beam search. |
| |
| Parameters: |
| |
| * `wordLk`- probs of advancing from the last step (K x words) |
| * `attnOut`- attention at the last step |
| |
| Returns: True if beam search is complete. |
| """ |
| numWords = wordLk.size(1) |
|
|
| |
| if len(self.prevKs) > 0: |
| beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk) |
|
|
| |
| for i in range(self.nextYs[-1].size(0)): |
| if self.nextYs[-1][i] in self._eos: |
| beamLk[i] = -1e20 |
| else: |
| beamLk = wordLk[0] |
| flatBeamLk = beamLk.view(-1) |
| bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True) |
|
|
| self.scores = bestScores |
|
|
| |
| |
| prevK = bestScoresId // numWords |
| self.prevKs.append(prevK) |
| self.nextYs.append((bestScoresId - prevK * numWords)) |
|
|
|
|
| for i in range(self.nextYs[-1].size(0)): |
| if self.nextYs[-1][i] in self._eos: |
| s = self.scores[i] |
| self.finished.append((s, len(self.nextYs) - 1, i)) |
|
|
| |
| if self.nextYs[-1][0] in self._eos: |
| self.eosTop = True |
|
|
| def done(self): |
| return self.eosTop and len(self.finished) >=self.size |
|
|
| def getFinal(self): |
| if len(self.finished) == 0: |
| self.finished.append((self.scores[0], len(self.nextYs) - 1, 0)) |
| self.finished.sort(key=lambda a: -a[0]) |
| if len(self.finished) != self.size: |
| unfinished=[] |
| for i in range(self.nextYs[-1].size(0)): |
| if self.nextYs[-1][i] not in self._eos: |
| s = self.scores[i] |
| unfinished.append((s, len(self.nextYs) - 1, i)) |
| unfinished.sort(key=lambda a: -a[0]) |
| self.finished+=unfinished[:self.size-len(self.finished)] |
| return self.finished[:self.size] |
|
|
| def getHyp(self, beam_res): |
| """ |
| Walk back to construct the full hypothesis. |
| """ |
| hyps=[] |
| for _,timestep, k in beam_res: |
| hyp = [] |
| for j in range(len(self.prevKs[:timestep]) - 1, -1, -1): |
| hyp.append(self.nextYs[j+1][k]) |
| k = self.prevKs[j][k] |
| hyps.append(hyp[::-1]) |
| return hyps |
| |
| def buildTargetTokens(self, preds): |
| sentence=[] |
| for pred in preds: |
| tokens = [] |
| for tok in pred: |
| tokens.append(tok) |
| if tok in self._eos: |
| break |
| sentence.append(tokens) |
| return sentence |
|
|