当前位置:   article > 正文

Bert的位置编码:position_embeddings(绝对位置可学习参数式编码)_bert中的positonal embedding

bert中的positonal embedding

position_embeddings(绝对位置可学习参数式编码

  1. class BertEmbeddings(nn.Module):
  2. """Construct the embeddings from word, position and token_type embeddings.
  3. """
  4. def __init__(self, config):
  5. super(BertEmbeddings, self).__init__()
  6. self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
  7. self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
  8. self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
  9. # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
  10. # any TensorFlow checkpoint file
  11. self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
  12. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  13. def forward(self, input_ids, token_type_ids=None):
  14. seq_length = input_ids.size(1) # 32
  15. position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # torch.Size([32])
  16. position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # torch.Size([32])---->torch.Size([128, 32])
  17. if token_type_ids is None:
  18. token_type_ids = torch.zeros_like(input_ids)
  19. words_embeddings = self.word_embeddings(input_ids) # torch.Size([128, 32])---->torch.Size([128, 32, 768])
  20. position_embeddings = self.position_embeddings(position_ids) # torch.Size([128, 32])---->torch.Size([128, 32, 768])
  21. token_type_embeddings = self.token_type_embeddings(token_type_ids) # torch.Size([128, 32])---->torch.Size([128, 32, 768])
  22. embeddings = words_embeddings + position_embeddings + token_type_embeddings # torch.Size([128, 32, 768])
  23. embeddings = self.LayerNorm(embeddings) # torch.Size([128, 32, 768])---->torch.Size([128, 32, 768])
  24. embeddings = self.dropout(embeddings) # torch.Size([128, 32, 768])
  25. return embeddings

完整代码

  1. # coding=utf-8
  2. # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
  3. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. """PyTorch BERT model."""
  17. from __future__ import absolute_import, division, print_function, unicode_literals
  18. import copy
  19. import json
  20. import logging
  21. import math
  22. import os
  23. import shutil
  24. import tarfile
  25. import tempfile
  26. import sys
  27. from io import open
  28. import torch
  29. from torch import nn
  30. from torch.nn import CrossEntropyLoss
  31. from .file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME
  32. logger = logging.getLogger(__name__)
  33. PRETRAINED_MODEL_ARCHIVE_MAP = {
  34. 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
  35. 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
  36. 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
  37. 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
  38. 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
  39. 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
  40. 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
  41. }
  42. BERT_CONFIG_NAME = 'bert_config.json'
  43. TF_WEIGHTS_NAME = 'model.ckpt'
  44. def load_tf_weights_in_bert(model, tf_checkpoint_path):
  45. """ Load tf checkpoints in a pytorch model
  46. """
  47. try:
  48. import re
  49. import numpy as np
  50. import tensorflow as tf
  51. except ImportError:
  52. print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
  53. "https://www.tensorflow.org/install/ for installation instructions.")
  54. raise
  55. tf_path = os.path.abspath(tf_checkpoint_path)
  56. print("Converting TensorFlow checkpoint from {}".format(tf_path))
  57. # Load weights from TF model
  58. init_vars = tf.train.list_variables(tf_path)
  59. names = []
  60. arrays = []
  61. for name, shape in init_vars:
  62. print("Loading TF weight {} with shape {}".format(name, shape))
  63. array = tf.train.load_variable(tf_path, name)
  64. names.append(name)
  65. arrays.append(array)
  66. for name, array in zip(names, arrays):
  67. name = name.split('/')
  68. # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
  69. # which are not required for using pretrained model
  70. if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
  71. print("Skipping {}".format("/".join(name)))
  72. continue
  73. pointer = model
  74. for m_name in name:
  75. if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
  76. l = re.split(r'_(\d+)', m_name)
  77. else:
  78. l = [m_name]
  79. if l[0] == 'kernel' or l[0] == 'gamma':
  80. pointer = getattr(pointer, 'weight')
  81. elif l[0] == 'output_bias' or l[0] == 'beta':
  82. pointer = getattr(pointer, 'bias')
  83. elif l[0] == 'output_weights':
  84. pointer = getattr(pointer, 'weight')
  85. elif l[0] == 'squad':
  86. pointer = getattr(pointer, 'classifier')
  87. else:
  88. try:
  89. pointer = getattr(pointer, l[0])
  90. except AttributeError:
  91. print("Skipping {}".format("/".join(name)))
  92. continue
  93. if len(l) >= 2:
  94. num = int(l[1])
  95. pointer = pointer[num]
  96. if m_name[-11:] == '_embeddings':
  97. pointer = getattr(pointer, 'weight')
  98. elif m_name == 'kernel':
  99. array = np.transpose(array)
  100. try:
  101. assert pointer.shape == array.shape
  102. except AssertionError as e:
  103. e.args += (pointer.shape, array.shape)
  104. raise
  105. print("Initialize PyTorch weight {}".format(name))
  106. pointer.data = torch.from_numpy(array)
  107. return model
  108. def gelu(x):
  109. """Implementation of the gelu activation function.
  110. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
  111. 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
  112. Also see https://arxiv.org/abs/1606.08415
  113. """
  114. return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
  115. def swish(x):
  116. return x * torch.sigmoid(x)
  117. ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
  118. class BertConfig(object):
  119. """Configuration class to store the configuration of a `BertModel`.
  120. """
  121. def __init__(self,
  122. vocab_size_or_config_json_file,
  123. hidden_size=768,
  124. num_hidden_layers=12,
  125. num_attention_heads=12,
  126. intermediate_size=3072,
  127. hidden_act="gelu",
  128. hidden_dropout_prob=0.1,
  129. attention_probs_dropout_prob=0.1,
  130. max_position_embeddings=512,
  131. type_vocab_size=2,
  132. initializer_range=0.02):
  133. """Constructs BertConfig.
  134. Args:
  135. vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
  136. hidden_size: Size of the encoder layers and the pooler layer.
  137. num_hidden_layers: Number of hidden layers in the Transformer encoder.
  138. num_attention_heads: Number of attention heads for each attention layer in
  139. the Transformer encoder.
  140. intermediate_size: The size of the "intermediate" (i.e., feed-forward)
  141. layer in the Transformer encoder.
  142. hidden_act: The non-linear activation function (function or string) in the
  143. encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
  144. hidden_dropout_prob: The dropout probabilitiy for all fully connected
  145. layers in the embeddings, encoder, and pooler.
  146. attention_probs_dropout_prob: The dropout ratio for the attention
  147. probabilities.
  148. max_position_embeddings: The maximum sequence length that this model might
  149. ever be used with. Typically set this to something large just in case
  150. (e.g., 512 or 1024 or 2048).
  151. type_vocab_size: The vocabulary size of the `token_type_ids` passed into
  152. `BertModel`.
  153. initializer_range: The sttdev of the truncated_normal_initializer for
  154. initializing all weight matrices.
  155. """
  156. if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
  157. and isinstance(vocab_size_or_config_json_file, unicode)):
  158. with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
  159. json_config = json.loads(reader.read())
  160. for key, value in json_config.items():
  161. self.__dict__[key] = value
  162. elif isinstance(vocab_size_or_config_json_file, int):
  163. self.vocab_size = vocab_size_or_config_json_file
  164. self.hidden_size = hidden_size
  165. self.num_hidden_layers = num_hidden_layers
  166. self.num_attention_heads = num_attention_heads
  167. self.hidden_act = hidden_act
  168. self.intermediate_size = intermediate_size
  169. self.hidden_dropout_prob = hidden_dropout_prob
  170. self.attention_probs_dropout_prob = attention_probs_dropout_prob
  171. self.max_position_embeddings = max_position_embeddings
  172. self.type_vocab_size = type_vocab_size
  173. self.initializer_range = initializer_range
  174. else:
  175. raise ValueError("First argument must be either a vocabulary size (int)"
  176. "or the path to a pretrained model config file (str)")
  177. @classmethod
  178. def from_dict(cls, json_object):
  179. """Constructs a `BertConfig` from a Python dictionary of parameters."""
  180. config = BertConfig(vocab_size_or_config_json_file=-1)
  181. for key, value in json_object.items():
  182. config.__dict__[key] = value
  183. return config
  184. @classmethod
  185. def from_json_file(cls, json_file):
  186. """Constructs a `BertConfig` from a json file of parameters."""
  187. with open(json_file, "r", encoding='utf-8') as reader:
  188. text = reader.read()
  189. return cls.from_dict(json.loads(text))
  190. def __repr__(self):
  191. return str(self.to_json_string())
  192. def to_dict(self):
  193. """Serializes this instance to a Python dictionary."""
  194. output = copy.deepcopy(self.__dict__)
  195. return output
  196. def to_json_string(self):
  197. """Serializes this instance to a JSON string."""
  198. return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
  199. def to_json_file(self, json_file_path):
  200. """ Save this instance to a json file."""
  201. with open(json_file_path, "w", encoding='utf-8') as writer:
  202. writer.write(self.to_json_string())
  203. try:
  204. from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
  205. except ImportError:
  206. logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
  207. class BertLayerNorm(nn.Module):
  208. def __init__(self, hidden_size, eps=1e-12):
  209. """Construct a layernorm module in the TF style (epsilon inside the square root).
  210. """
  211. super(BertLayerNorm, self).__init__()
  212. self.weight = nn.Parameter(torch.ones(hidden_size))
  213. self.bias = nn.Parameter(torch.zeros(hidden_size))
  214. self.variance_epsilon = eps
  215. def forward(self, x):
  216. u = x.mean(-1, keepdim=True)
  217. s = (x - u).pow(2).mean(-1, keepdim=True)
  218. x = (x - u) / torch.sqrt(s + self.variance_epsilon)
  219. return self.weight * x + self.bias
  220. class BertEmbeddings(nn.Module):
  221. """Construct the embeddings from word, position and token_type embeddings.
  222. """
  223. def __init__(self, config):
  224. super(BertEmbeddings, self).__init__()
  225. self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
  226. self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
  227. self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
  228. # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
  229. # any TensorFlow checkpoint file
  230. self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
  231. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  232. def forward(self, input_ids, token_type_ids=None):
  233. seq_length = input_ids.size(1)
  234. position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
  235. position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
  236. if token_type_ids is None:
  237. token_type_ids = torch.zeros_like(input_ids)
  238. words_embeddings = self.word_embeddings(input_ids)
  239. position_embeddings = self.position_embeddings(position_ids)
  240. token_type_embeddings = self.token_type_embeddings(token_type_ids)
  241. embeddings = words_embeddings + position_embeddings + token_type_embeddings
  242. embeddings = self.LayerNorm(embeddings)
  243. embeddings = self.dropout(embeddings)
  244. return embeddings
  245. class BertSelfAttention(nn.Module):
  246. def __init__(self, config):
  247. super(BertSelfAttention, self).__init__()
  248. if config.hidden_size % config.num_attention_heads != 0:
  249. raise ValueError(
  250. "The hidden size (%d) is not a multiple of the number of attention "
  251. "heads (%d)" % (config.hidden_size, config.num_attention_heads))
  252. self.num_attention_heads = config.num_attention_heads
  253. self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
  254. self.all_head_size = self.num_attention_heads * self.attention_head_size
  255. self.query = nn.Linear(config.hidden_size, self.all_head_size)
  256. self.key = nn.Linear(config.hidden_size, self.all_head_size)
  257. self.value = nn.Linear(config.hidden_size, self.all_head_size)
  258. self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
  259. def transpose_for_scores(self, x):
  260. new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
  261. x = x.view(*new_x_shape)
  262. return x.permute(0, 2, 1, 3)
  263. def forward(self, hidden_states, attention_mask):
  264. mixed_query_layer = self.query(hidden_states)
  265. mixed_key_layer = self.key(hidden_states)
  266. mixed_value_layer = self.value(hidden_states)
  267. query_layer = self.transpose_for_scores(mixed_query_layer)
  268. key_layer = self.transpose_for_scores(mixed_key_layer)
  269. value_layer = self.transpose_for_scores(mixed_value_layer)
  270. # Take the dot product between "query" and "key" to get the raw attention scores.
  271. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
  272. attention_scores = attention_scores / math.sqrt(self.attention_head_size)
  273. # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
  274. attention_scores = attention_scores + attention_mask
  275. # Normalize the attention scores to probabilities.
  276. attention_probs = nn.Softmax(dim=-1)(attention_scores)
  277. # This is actually dropping out entire tokens to attend to, which might
  278. # seem a bit unusual, but is taken from the original Transformer paper.
  279. attention_probs = self.dropout(attention_probs)
  280. context_layer = torch.matmul(attention_probs, value_layer)
  281. context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
  282. new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
  283. context_layer = context_layer.view(*new_context_layer_shape)
  284. return context_layer
  285. class BertSelfOutput(nn.Module):
  286. def __init__(self, config):
  287. super(BertSelfOutput, self).__init__()
  288. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  289. self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
  290. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  291. def forward(self, hidden_states, input_tensor):
  292. hidden_states = self.dense(hidden_states)
  293. hidden_states = self.dropout(hidden_states)
  294. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  295. return hidden_states
  296. class BertAttention(nn.Module):
  297. def __init__(self, config):
  298. super(BertAttention, self).__init__()
  299. self.self = BertSelfAttention(config)
  300. self.output = BertSelfOutput(config)
  301. def forward(self, input_tensor, attention_mask):
  302. self_output = self.self(input_tensor, attention_mask)
  303. attention_output = self.output(self_output, input_tensor)
  304. return attention_output
  305. class BertIntermediate(nn.Module):
  306. def __init__(self, config):
  307. super(BertIntermediate, self).__init__()
  308. self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
  309. if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
  310. self.intermediate_act_fn = ACT2FN[config.hidden_act]
  311. else:
  312. self.intermediate_act_fn = config.hidden_act
  313. def forward(self, hidden_states):
  314. hidden_states = self.dense(hidden_states)
  315. hidden_states = self.intermediate_act_fn(hidden_states)
  316. return hidden_states
  317. class BertOutput(nn.Module):
  318. def __init__(self, config):
  319. super(BertOutput, self).__init__()
  320. self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
  321. self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
  322. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  323. def forward(self, hidden_states, input_tensor):
  324. hidden_states = self.dense(hidden_states)
  325. hidden_states = self.dropout(hidden_states)
  326. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  327. return hidden_states
  328. class BertLayer(nn.Module):
  329. def __init__(self, config):
  330. super(BertLayer, self).__init__()
  331. self.attention = BertAttention(config)
  332. self.intermediate = BertIntermediate(config)
  333. self.output = BertOutput(config)
  334. def forward(self, hidden_states, attention_mask):
  335. attention_output = self.attention(hidden_states, attention_mask)
  336. intermediate_output = self.intermediate(attention_output)
  337. layer_output = self.output(intermediate_output, attention_output)
  338. return layer_output
  339. class BertEncoder(nn.Module):
  340. def __init__(self, config):
  341. super(BertEncoder, self).__init__()
  342. layer = BertLayer(config)
  343. self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
  344. def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
  345. all_encoder_layers = []
  346. for layer_module in self.layer:
  347. hidden_states = layer_module(hidden_states, attention_mask)
  348. if output_all_encoded_layers:
  349. all_encoder_layers.append(hidden_states)
  350. if not output_all_encoded_layers:
  351. all_encoder_layers.append(hidden_states)
  352. return all_encoder_layers
  353. class BertPooler(nn.Module):
  354. def __init__(self, config):
  355. super(BertPooler, self).__init__()
  356. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  357. self.activation = nn.Tanh()
  358. def forward(self, hidden_states):
  359. # We "pool" the model by simply taking the hidden state corresponding
  360. # to the first token.
  361. first_token_tensor = hidden_states[:, 0]
  362. pooled_output = self.dense(first_token_tensor)
  363. pooled_output = self.activation(pooled_output)
  364. return pooled_output
  365. class BertPredictionHeadTransform(nn.Module):
  366. def __init__(self, config):
  367. super(BertPredictionHeadTransform, self).__init__()
  368. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  369. if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
  370. self.transform_act_fn = ACT2FN[config.hidden_act]
  371. else:
  372. self.transform_act_fn = config.hidden_act
  373. self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
  374. def forward(self, hidden_states):
  375. hidden_states = self.dense(hidden_states)
  376. hidden_states = self.transform_act_fn(hidden_states)
  377. hidden_states = self.LayerNorm(hidden_states)
  378. return hidden_states
  379. class BertLMPredictionHead(nn.Module):
  380. def __init__(self, config, bert_model_embedding_weights):
  381. super(BertLMPredictionHead, self).__init__()
  382. self.transform = BertPredictionHeadTransform(config)
  383. # The output weights are the same as the input embeddings, but there is
  384. # an output-only bias for each token.
  385. self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
  386. bert_model_embedding_weights.size(0),
  387. bias=False)
  388. self.decoder.weight = bert_model_embedding_weights
  389. self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
  390. def forward(self, hidden_states):
  391. hidden_states = self.transform(hidden_states)
  392. hidden_states = self.decoder(hidden_states) + self.bias
  393. return hidden_states
  394. class BertOnlyMLMHead(nn.Module):
  395. def __init__(self, config, bert_model_embedding_weights):
  396. super(BertOnlyMLMHead, self).__init__()
  397. self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
  398. def forward(self, sequence_output):
  399. prediction_scores = self.predictions(sequence_output)
  400. return prediction_scores
  401. class BertOnlyNSPHead(nn.Module):
  402. def __init__(self, config):
  403. super(BertOnlyNSPHead, self).__init__()
  404. self.seq_relationship = nn.Linear(config.hidden_size, 2)
  405. def forward(self, pooled_output):
  406. seq_relationship_score = self.seq_relationship(pooled_output)
  407. return seq_relationship_score
  408. class BertPreTrainingHeads(nn.Module):
  409. def __init__(self, config, bert_model_embedding_weights):
  410. super(BertPreTrainingHeads, self).__init__()
  411. self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
  412. self.seq_relationship = nn.Linear(config.hidden_size, 2)
  413. def forward(self, sequence_output, pooled_output):
  414. prediction_scores = self.predictions(sequence_output)
  415. seq_relationship_score = self.seq_relationship(pooled_output)
  416. return prediction_scores, seq_relationship_score
  417. class BertPreTrainedModel(nn.Module):
  418. """ An abstract class to handle weights initialization and
  419. a simple interface for dowloading and loading pretrained models.
  420. """
  421. def __init__(self, config, *inputs, **kwargs):
  422. super(BertPreTrainedModel, self).__init__()
  423. if not isinstance(config, BertConfig):
  424. raise ValueError(
  425. "Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
  426. "To create a model from a Google pretrained model use "
  427. "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
  428. self.__class__.__name__, self.__class__.__name__
  429. ))
  430. self.config = config
  431. def init_bert_weights(self, module):
  432. """ Initialize the weights.
  433. """
  434. if isinstance(module, (nn.Linear, nn.Embedding)):
  435. # Slightly different from the TF version which uses truncated_normal for initialization
  436. # cf https://github.com/pytorch/pytorch/pull/5617
  437. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  438. elif isinstance(module, BertLayerNorm):
  439. module.bias.data.zero_()
  440. module.weight.data.fill_(1.0)
  441. if isinstance(module, nn.Linear) and module.bias is not None:
  442. module.bias.data.zero_()
  443. @classmethod
  444. def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
  445. """
  446. Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
  447. Download and cache the pre-trained model file if needed.
  448. Params:
  449. pretrained_model_name_or_path: either:
  450. - a str with the name of a pre-trained model to load selected in the list of:
  451. . `bert-base-uncased`
  452. . `bert-large-uncased`
  453. . `bert-base-cased`
  454. . `bert-large-cased`
  455. . `bert-base-multilingual-uncased`
  456. . `bert-base-multilingual-cased`
  457. . `bert-base-chinese`
  458. - a path or url to a pretrained model archive containing:
  459. . `bert_config.json` a configuration file for the model
  460. . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
  461. - a path or url to a pretrained model archive containing:
  462. . `bert_config.json` a configuration file for the model
  463. . `model.chkpt` a TensorFlow checkpoint
  464. from_tf: should we load the weights from a locally saved TensorFlow checkpoint
  465. cache_dir: an optional path to a folder in which the pre-trained models will be cached.
  466. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
  467. *inputs, **kwargs: additional input for the specific Bert class
  468. (ex: num_labels for BertForSequenceClassification)
  469. """
  470. state_dict = kwargs.get('state_dict', None)
  471. kwargs.pop('state_dict', None)
  472. cache_dir = kwargs.get('cache_dir', None)
  473. kwargs.pop('cache_dir', None)
  474. from_tf = kwargs.get('from_tf', False)
  475. kwargs.pop('from_tf', None)
  476. if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
  477. archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
  478. else:
  479. archive_file = pretrained_model_name_or_path
  480. # redirect to the cache, if necessary
  481. try:
  482. resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
  483. except EnvironmentError:
  484. logger.error(
  485. "Model name '{}' was not found in model name list ({}). "
  486. "We assumed '{}' was a path or url but couldn't find any file "
  487. "associated to this path or url.".format(
  488. pretrained_model_name_or_path,
  489. ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
  490. archive_file))
  491. return None
  492. if resolved_archive_file == archive_file:
  493. logger.info("loading archive file {}".format(archive_file))
  494. else:
  495. logger.info("loading archive file {} from cache at {}".format(
  496. archive_file, resolved_archive_file))
  497. tempdir = None
  498. if os.path.isdir(resolved_archive_file) or from_tf:
  499. serialization_dir = resolved_archive_file
  500. else:
  501. # Extract archive to temp dir
  502. tempdir = tempfile.mkdtemp()
  503. logger.info("extracting archive file {} to temp dir {}".format(
  504. resolved_archive_file, tempdir))
  505. with tarfile.open(resolved_archive_file, 'r:gz') as archive:
  506. archive.extractall(tempdir)
  507. serialization_dir = tempdir
  508. # Load config
  509. config_file = os.path.join(serialization_dir, CONFIG_NAME)
  510. if not os.path.exists(config_file):
  511. # Backward compatibility with old naming format
  512. config_file = os.path.join(serialization_dir, BERT_CONFIG_NAME)
  513. config = BertConfig.from_json_file(config_file)
  514. logger.info("Model config {}".format(config))
  515. # Instantiate model.
  516. model = cls(config, *inputs, **kwargs)
  517. if state_dict is None and not from_tf:
  518. weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
  519. state_dict = torch.load(weights_path, map_location='cpu')
  520. if tempdir:
  521. # Clean up temp dir
  522. shutil.rmtree(tempdir)
  523. if from_tf:
  524. # Directly load from a TensorFlow checkpoint
  525. weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
  526. return load_tf_weights_in_bert(model, weights_path)
  527. # Load from a PyTorch state_dict
  528. old_keys = []
  529. new_keys = []
  530. for key in state_dict.keys():
  531. new_key = None
  532. if 'gamma' in key:
  533. new_key = key.replace('gamma', 'weight')
  534. if 'beta' in key:
  535. new_key = key.replace('beta', 'bias')
  536. if new_key:
  537. old_keys.append(key)
  538. new_keys.append(new_key)
  539. for old_key, new_key in zip(old_keys, new_keys):
  540. state_dict[new_key] = state_dict.pop(old_key)
  541. missing_keys = []
  542. unexpected_keys = []
  543. error_msgs = []
  544. # copy state_dict so _load_from_state_dict can modify it
  545. metadata = getattr(state_dict, '_metadata', None)
  546. state_dict = state_dict.copy()
  547. if metadata is not None:
  548. state_dict._metadata = metadata
  549. def load(module, prefix=''):
  550. local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
  551. module._load_from_state_dict(
  552. state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
  553. for name, child in module._modules.items():
  554. if child is not None:
  555. load(child, prefix + name + '.')
  556. start_prefix = ''
  557. if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
  558. start_prefix = 'bert.'
  559. load(model, prefix=start_prefix)
  560. if len(missing_keys) > 0:
  561. logger.info("Weights of {} not initialized from pretrained model: {}".format(
  562. model.__class__.__name__, missing_keys))
  563. if len(unexpected_keys) > 0:
  564. logger.info("Weights from pretrained model not used in {}: {}".format(
  565. model.__class__.__name__, unexpected_keys))
  566. if len(error_msgs) > 0:
  567. raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
  568. model.__class__.__name__, "\n\t".join(error_msgs)))
  569. return model
  570. class BertModel(BertPreTrainedModel):
  571. """BERT model ("Bidirectional Embedding Representations from a Transformer").
  572. Params:
  573. config: a BertConfig class instance with the configuration to build a new model
  574. Inputs:
  575. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  576. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  577. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  578. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  579. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  580. a `sentence B` token (see BERT paper for more details).
  581. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  582. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  583. input sequence length in the current batch. It's the mask that we typically use for attention when
  584. a batch has varying length sentences.
  585. `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
  586. Outputs: Tuple of (encoded_layers, pooled_output)
  587. `encoded_layers`: controled by `output_all_encoded_layers` argument:
  588. - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
  589. of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
  590. encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
  591. - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
  592. to the last attention block of shape [batch_size, sequence_length, hidden_size],
  593. `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
  594. classifier pretrained on top of the hidden state associated to the first character of the
  595. input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
  596. Example usage:
  597. ```python
  598. # Already been converted into WordPiece token ids
  599. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  600. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  601. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  602. config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  603. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  604. model = modeling.BertModel(config=config)
  605. all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
  606. ```
  607. """
  608. def __init__(self, config):
  609. super(BertModel, self).__init__(config)
  610. self.embeddings = BertEmbeddings(config)
  611. self.encoder = BertEncoder(config)
  612. self.pooler = BertPooler(config)
  613. self.apply(self.init_bert_weights)
  614. def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
  615. if attention_mask is None:
  616. attention_mask = torch.ones_like(input_ids)
  617. if token_type_ids is None:
  618. token_type_ids = torch.zeros_like(input_ids)
  619. # We create a 3D attention mask from a 2D tensor mask.
  620. # Sizes are [batch_size, 1, 1, to_seq_length]
  621. # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
  622. # this attention mask is more simple than the triangular masking of causal attention
  623. # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
  624. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
  625. # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
  626. # masked positions, this operation will create a tensor which is 0.0 for
  627. # positions we want to attend and -10000.0 for masked positions.
  628. # Since we are adding it to the raw scores before the softmax, this is
  629. # effectively the same as removing these entirely.
  630. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
  631. extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
  632. embedding_output = self.embeddings(input_ids, token_type_ids)
  633. encoded_layers = self.encoder(embedding_output,
  634. extended_attention_mask,
  635. output_all_encoded_layers=output_all_encoded_layers)
  636. sequence_output = encoded_layers[-1]
  637. pooled_output = self.pooler(sequence_output)
  638. if not output_all_encoded_layers:
  639. encoded_layers = encoded_layers[-1]
  640. return encoded_layers, pooled_output
  641. class BertForPreTraining(BertPreTrainedModel):
  642. """BERT model with pre-training heads.
  643. This module comprises the BERT model followed by the two pre-training heads:
  644. - the masked language modeling head, and
  645. - the next sentence classification head.
  646. Params:
  647. config: a BertConfig class instance with the configuration to build a new model.
  648. Inputs:
  649. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  650. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  651. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  652. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  653. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  654. a `sentence B` token (see BERT paper for more details).
  655. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  656. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  657. input sequence length in the current batch. It's the mask that we typically use for attention when
  658. a batch has varying length sentences.
  659. `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
  660. with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
  661. is only computed for the labels set in [0, ..., vocab_size]
  662. `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
  663. with indices selected in [0, 1].
  664. 0 => next sentence is the continuation, 1 => next sentence is a random sentence.
  665. Outputs:
  666. if `masked_lm_labels` and `next_sentence_label` are not `None`:
  667. Outputs the total_loss which is the sum of the masked language modeling loss and the next
  668. sentence classification loss.
  669. if `masked_lm_labels` or `next_sentence_label` is `None`:
  670. Outputs a tuple comprising
  671. - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
  672. - the next sentence classification logits of shape [batch_size, 2].
  673. Example usage:
  674. ```python
  675. # Already been converted into WordPiece token ids
  676. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  677. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  678. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  679. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  680. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  681. model = BertForPreTraining(config)
  682. masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
  683. ```
  684. """
  685. def __init__(self, config):
  686. super(BertForPreTraining, self).__init__(config)
  687. self.bert = BertModel(config)
  688. self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
  689. self.apply(self.init_bert_weights)
  690. def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):
  691. sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
  692. output_all_encoded_layers=False)
  693. prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
  694. if masked_lm_labels is not None and next_sentence_label is not None:
  695. loss_fct = CrossEntropyLoss(ignore_index=-1)
  696. masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
  697. next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
  698. total_loss = masked_lm_loss + next_sentence_loss
  699. return total_loss
  700. else:
  701. return prediction_scores, seq_relationship_score
  702. class BertForMaskedLM(BertPreTrainedModel):
  703. """BERT model with the masked language modeling head.
  704. This module comprises the BERT model followed by the masked language modeling head.
  705. Params:
  706. config: a BertConfig class instance with the configuration to build a new model.
  707. Inputs:
  708. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  709. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  710. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  711. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  712. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  713. a `sentence B` token (see BERT paper for more details).
  714. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  715. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  716. input sequence length in the current batch. It's the mask that we typically use for attention when
  717. a batch has varying length sentences.
  718. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
  719. with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
  720. is only computed for the labels set in [0, ..., vocab_size]
  721. Outputs:
  722. if `masked_lm_labels` is not `None`:
  723. Outputs the masked language modeling loss.
  724. if `masked_lm_labels` is `None`:
  725. Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
  726. Example usage:
  727. ```python
  728. # Already been converted into WordPiece token ids
  729. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  730. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  731. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  732. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  733. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  734. model = BertForMaskedLM(config)
  735. masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
  736. ```
  737. """
  738. def __init__(self, config):
  739. super(BertForMaskedLM, self).__init__(config)
  740. self.bert = BertModel(config)
  741. self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
  742. self.apply(self.init_bert_weights)
  743. def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
  744. sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
  745. output_all_encoded_layers=False)
  746. prediction_scores = self.cls(sequence_output)
  747. if masked_lm_labels is not None:
  748. loss_fct = CrossEntropyLoss(ignore_index=-1)
  749. masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
  750. return masked_lm_loss
  751. else:
  752. return prediction_scores
  753. class BertForNextSentencePrediction(BertPreTrainedModel):
  754. """BERT model with next sentence prediction head.
  755. This module comprises the BERT model followed by the next sentence classification head.
  756. Params:
  757. config: a BertConfig class instance with the configuration to build a new model.
  758. Inputs:
  759. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  760. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  761. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  762. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  763. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  764. a `sentence B` token (see BERT paper for more details).
  765. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  766. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  767. input sequence length in the current batch. It's the mask that we typically use for attention when
  768. a batch has varying length sentences.
  769. `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
  770. with indices selected in [0, 1].
  771. 0 => next sentence is the continuation, 1 => next sentence is a random sentence.
  772. Outputs:
  773. if `next_sentence_label` is not `None`:
  774. Outputs the total_loss which is the sum of the masked language modeling loss and the next
  775. sentence classification loss.
  776. if `next_sentence_label` is `None`:
  777. Outputs the next sentence classification logits of shape [batch_size, 2].
  778. Example usage:
  779. ```python
  780. # Already been converted into WordPiece token ids
  781. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  782. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  783. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  784. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  785. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  786. model = BertForNextSentencePrediction(config)
  787. seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
  788. ```
  789. """
  790. def __init__(self, config):
  791. super(BertForNextSentencePrediction, self).__init__(config)
  792. self.bert = BertModel(config)
  793. self.cls = BertOnlyNSPHead(config)
  794. self.apply(self.init_bert_weights)
  795. def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
  796. _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
  797. output_all_encoded_layers=False)
  798. seq_relationship_score = self.cls( pooled_output)
  799. if next_sentence_label is not None:
  800. loss_fct = CrossEntropyLoss(ignore_index=-1)
  801. next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
  802. return next_sentence_loss
  803. else:
  804. return seq_relationship_score
  805. class BertForSequenceClassification(BertPreTrainedModel):
  806. """BERT model for classification.
  807. This module is composed of the BERT model with a linear layer on top of
  808. the pooled output.
  809. Params:
  810. `config`: a BertConfig class instance with the configuration to build a new model.
  811. `num_labels`: the number of classes for the classifier. Default = 2.
  812. Inputs:
  813. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  814. with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts
  815. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  816. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  817. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  818. a `sentence B` token (see BERT paper for more details).
  819. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  820. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  821. input sequence length in the current batch. It's the mask that we typically use for attention when
  822. a batch has varying length sentences.
  823. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
  824. with indices selected in [0, ..., num_labels].
  825. Outputs:
  826. if `labels` is not `None`:
  827. Outputs the CrossEntropy classification loss of the output with the labels.
  828. if `labels` is `None`:
  829. Outputs the classification logits of shape [batch_size, num_labels].
  830. Example usage:
  831. ```python
  832. # Already been converted into WordPiece token ids
  833. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  834. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  835. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  836. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  837. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  838. num_labels = 2
  839. model = BertForSequenceClassification(config, num_labels)
  840. logits = model(input_ids, token_type_ids, input_mask)
  841. ```
  842. """
  843. def __init__(self, config, num_labels):
  844. super(BertForSequenceClassification, self).__init__(config)
  845. self.num_labels = num_labels
  846. self.bert = BertModel(config)
  847. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  848. self.classifier = nn.Linear(config.hidden_size, num_labels)
  849. self.apply(self.init_bert_weights)
  850. def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
  851. _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
  852. pooled_output = self.dropout(pooled_output)
  853. logits = self.classifier(pooled_output)
  854. if labels is not None:
  855. loss_fct = CrossEntropyLoss()
  856. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  857. return loss
  858. else:
  859. return logits
  860. class BertForMultipleChoice(BertPreTrainedModel):
  861. """BERT model for multiple choice tasks.
  862. This module is composed of the BERT model with a linear layer on top of
  863. the pooled output.
  864. Params:
  865. `config`: a BertConfig class instance with the configuration to build a new model.
  866. `num_choices`: the number of classes for the classifier. Default = 2.
  867. Inputs:
  868. `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
  869. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  870. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  871. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
  872. with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
  873. and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
  874. `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
  875. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  876. input sequence length in the current batch. It's the mask that we typically use for attention when
  877. a batch has varying length sentences.
  878. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
  879. with indices selected in [0, ..., num_choices].
  880. Outputs:
  881. if `labels` is not `None`:
  882. Outputs the CrossEntropy classification loss of the output with the labels.
  883. if `labels` is `None`:
  884. Outputs the classification logits of shape [batch_size, num_labels].
  885. Example usage:
  886. ```python
  887. # Already been converted into WordPiece token ids
  888. input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
  889. input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
  890. token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
  891. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  892. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  893. num_choices = 2
  894. model = BertForMultipleChoice(config, num_choices)
  895. logits = model(input_ids, token_type_ids, input_mask)
  896. ```
  897. """
  898. def __init__(self, config, num_choices):
  899. super(BertForMultipleChoice, self).__init__(config)
  900. self.num_choices = num_choices
  901. self.bert = BertModel(config)
  902. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  903. self.classifier = nn.Linear(config.hidden_size, 1)
  904. self.apply(self.init_bert_weights)
  905. def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
  906. flat_input_ids = input_ids.view(-1, input_ids.size(-1))
  907. flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
  908. flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
  909. _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)
  910. pooled_output = self.dropout(pooled_output)
  911. logits = self.classifier(pooled_output)
  912. reshaped_logits = logits.view(-1, self.num_choices)
  913. if labels is not None:
  914. loss_fct = CrossEntropyLoss()
  915. loss = loss_fct(reshaped_logits, labels)
  916. return loss
  917. else:
  918. return reshaped_logits
  919. class BertForTokenClassification(BertPreTrainedModel):
  920. """BERT model for token-level classification.
  921. This module is composed of the BERT model with a linear layer on top of
  922. the full hidden state of the last layer.
  923. Params:
  924. `config`: a BertConfig class instance with the configuration to build a new model.
  925. `num_labels`: the number of classes for the classifier. Default = 2.
  926. Inputs:
  927. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  928. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  929. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  930. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  931. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  932. a `sentence B` token (see BERT paper for more details).
  933. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  934. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  935. input sequence length in the current batch. It's the mask that we typically use for attention when
  936. a batch has varying length sentences.
  937. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
  938. with indices selected in [0, ..., num_labels].
  939. Outputs:
  940. if `labels` is not `None`:
  941. Outputs the CrossEntropy classification loss of the output with the labels.
  942. if `labels` is `None`:
  943. Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
  944. Example usage:
  945. ```python
  946. # Already been converted into WordPiece token ids
  947. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  948. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  949. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  950. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  951. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  952. num_labels = 2
  953. model = BertForTokenClassification(config, num_labels)
  954. logits = model(input_ids, token_type_ids, input_mask)
  955. ```
  956. """
  957. def __init__(self, config, num_labels):
  958. super(BertForTokenClassification, self).__init__(config)
  959. self.num_labels = num_labels
  960. self.bert = BertModel(config)
  961. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  962. self.classifier = nn.Linear(config.hidden_size, num_labels)
  963. self.apply(self.init_bert_weights)
  964. def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
  965. sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
  966. sequence_output = self.dropout(sequence_output)
  967. logits = self.classifier(sequence_output)
  968. if labels is not None:
  969. loss_fct = CrossEntropyLoss()
  970. # Only keep active parts of the loss
  971. if attention_mask is not None:
  972. active_loss = attention_mask.view(-1) == 1
  973. active_logits = logits.view(-1, self.num_labels)[active_loss]
  974. active_labels = labels.view(-1)[active_loss]
  975. loss = loss_fct(active_logits, active_labels)
  976. else:
  977. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  978. return loss
  979. else:
  980. return logits
  981. class BertForQuestionAnswering(BertPreTrainedModel):
  982. """BERT model for Question Answering (span extraction).
  983. This module is composed of the BERT model with a linear layer on top of
  984. the sequence output that computes start_logits and end_logits
  985. Params:
  986. `config`: a BertConfig class instance with the configuration to build a new model.
  987. Inputs:
  988. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  989. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  990. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  991. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  992. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  993. a `sentence B` token (see BERT paper for more details).
  994. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  995. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  996. input sequence length in the current batch. It's the mask that we typically use for attention when
  997. a batch has varying length sentences.
  998. `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
  999. Positions are clamped to the length of the sequence and position outside of the sequence are not taken
  1000. into account for computing the loss.
  1001. `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
  1002. Positions are clamped to the length of the sequence and position outside of the sequence are not taken
  1003. into account for computing the loss.
  1004. Outputs:
  1005. if `start_positions` and `end_positions` are not `None`:
  1006. Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
  1007. if `start_positions` or `end_positions` is `None`:
  1008. Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
  1009. position tokens of shape [batch_size, sequence_length].
  1010. Example usage:
  1011. ```python
  1012. # Already been converted into WordPiece token ids
  1013. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  1014. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  1015. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  1016. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  1017. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  1018. model = BertForQuestionAnswering(config)
  1019. start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
  1020. ```
  1021. """
  1022. def __init__(self, config):
  1023. super(BertForQuestionAnswering, self).__init__(config)
  1024. self.bert = BertModel(config)
  1025. # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
  1026. # self.dropout = nn.Dropout(config.hidden_dropout_prob)
  1027. self.qa_outputs = nn.Linear(config.hidden_size, 2)
  1028. self.apply(self.init_bert_weights)
  1029. def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
  1030. sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
  1031. logits = self.qa_outputs(sequence_output)
  1032. start_logits, end_logits = logits.split(1, dim=-1)
  1033. start_logits = start_logits.squeeze(-1)
  1034. end_logits = end_logits.squeeze(-1)
  1035. if start_positions is not None and end_positions is not None:
  1036. # If we are on multi-GPU, split add a dimension
  1037. if len(start_positions.size()) > 1:
  1038. start_positions = start_positions.squeeze(-1)
  1039. if len(end_positions.size()) > 1:
  1040. end_positions = end_positions.squeeze(-1)
  1041. # sometimes the start/end positions are outside our model inputs, we ignore these terms
  1042. ignored_index = start_logits.size(1)
  1043. start_positions.clamp_(0, ignored_index)
  1044. end_positions.clamp_(0, ignored_index)
  1045. loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
  1046. start_loss = loss_fct(start_logits, start_positions)
  1047. end_loss = loss_fct(end_logits, end_positions)
  1048. total_loss = (start_loss + end_loss) / 2
  1049. return total_loss
  1050. else:
  1051. return start_logits, end_logits

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/IT小白/article/detail/312372
推荐阅读
相关标签
  

闽ICP备14008679号