Skip to content

Commit

Permalink
Apply Taylor's Kaleido fix for attention mask
Browse files Browse the repository at this point in the history
  • Loading branch information
dmjoy committed Apr 24, 2024
1 parent 2c72c4a commit 4e9de4e
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions align_system/algorithms/lib/kaleido.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,10 +488,10 @@ def get_probs(self, inputs, batch_size=None):
encoded_batch = self.tokenizer.batch_encode_plus(
inputs[inds].tolist(),
return_tensors='pt', padding=True, truncation=False, max_length=128,
).to(self.device).input_ids
).to(self.device)
# batch_inputs = encoded_batch[i*batch_size:(i+1)*batch_size]
# Run through model, get last logits
logits_batch = self.model(input_ids=encoded_batch, labels=self.get_dummy(encoded_batch)).logits[:, -1, :].detach().cpu()
logits_batch = self.model(input_ids=encoded_batch.input_ids, attention_mask=encoded_batch.attention_mask, labels=self.get_dummy(encoded_batch.input_ids)).logits[:, -1, :].detach().cpu()
logits.append(logits_batch)

# concatenate logits
Expand Down

0 comments on commit 4e9de4e

Please sign in to comment.