lgcharpe commited on
Commit
e6ab847
1 Parent(s): 93b66fd

Update modeling_norbert.py

Browse files
Files changed (1) hide show
  1. modeling_norbert.py +1 -1
modeling_norbert.py CHANGED
@@ -142,7 +142,7 @@ class Attention(nn.Module):
142
  - torch.arange(config.max_position_embeddings, dtype=torch.long).unsqueeze(0)
143
  position_indices = self.make_log_bucket_position(position_indices, config.position_bucket_size, config.max_position_embeddings)
144
  position_indices = config.position_bucket_size - 1 + position_indices
145
- self.register_buffer("position_indices", position_indices, persistent=True)
146
 
147
  self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
148
  self.scale = 1.0 / math.sqrt(3 * self.head_size)
 
142
  - torch.arange(config.max_position_embeddings, dtype=torch.long).unsqueeze(0)
143
  position_indices = self.make_log_bucket_position(position_indices, config.position_bucket_size, config.max_position_embeddings)
144
  position_indices = config.position_bucket_size - 1 + position_indices
145
+ self.register_buffer("position_indices", position_indices, persistent=False)
146
 
147
  self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
148
  self.scale = 1.0 / math.sqrt(3 * self.head_size)