561 lines
No EOL
21 KiB
Python
561 lines
No EOL
21 KiB
Python
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
|
|
import collections
|
|
import json
|
|
import math
|
|
import os
|
|
import random
|
|
import modeling
|
|
import optimization
|
|
import tokenization
|
|
import six
|
|
import tensorflow as tf
|
|
import horovod.tensorflow as hvd
|
|
import time
|
|
|
|
flags = tf.flags
|
|
FLAGS = None
|
|
|
|
def extract_flags():
|
|
flags.DEFINE_integer(
|
|
"max_seq_length", 384,
|
|
"The maximum total input sequence length after WordPiece tokenization. "
|
|
"Sequences longer than this will be truncated, and sequences shorter "
|
|
"than this will be padded.")
|
|
|
|
flags.DEFINE_integer(
|
|
"doc_stride", 128,
|
|
"When splitting up a long document into chunks, how much stride to "
|
|
"take between chunks.")
|
|
|
|
flags.DEFINE_integer(
|
|
"max_query_length", 64,
|
|
"The maximum number of tokens for the question. Questions longer than "
|
|
"this will be truncated to this length.")
|
|
|
|
flags.DEFINE_bool(
|
|
"version_2_with_negative", False,
|
|
"If true, the SQuAD examples contain some that do not have an answer.")
|
|
|
|
flags.DEFINE_string("train_file", None,
|
|
"SQuAD json for training. E.g., train-v1.1.json")
|
|
|
|
flags.DEFINE_string(
|
|
"predict_file", None,
|
|
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
|
|
|
|
flags.DEFINE_string(
|
|
"squad_dir", None,
|
|
"The output directory where the model checkpoints will be written.")
|
|
|
|
flags.DEFINE_string("vocab_file", None,
|
|
"The vocabulary file that the BERT model was trained on.")
|
|
|
|
flags.DEFINE_bool(
|
|
"do_lower_case", True,
|
|
"Whether to lower case the input text. Should be True for uncased "
|
|
"models and False for cased models.")
|
|
|
|
flags.DEFINE_bool(
|
|
"verbose_logging", False,
|
|
"If true, all of the warnings related to data processing will be printed. "
|
|
"A number of warnings are expected for a normal SQuAD evaluation.")
|
|
flags.mark_flag_as_required("train_file")
|
|
flags.mark_flag_as_required("predict_file")
|
|
flags.mark_flag_as_required("squad_dir")
|
|
flags.mark_flag_as_required("vocab_file")
|
|
return flags.FLAGS
|
|
|
|
class SquadExample(object):
|
|
"""A single training/test example for simple sequence classification.
|
|
|
|
For examples without an answer, the start and end position are -1.
|
|
"""
|
|
|
|
def __init__(self,
|
|
qas_id,
|
|
question_text,
|
|
doc_tokens,
|
|
orig_answer_text=None,
|
|
start_position=None,
|
|
end_position=None,
|
|
is_impossible=False):
|
|
self.qas_id = qas_id
|
|
self.question_text = question_text
|
|
self.doc_tokens = doc_tokens
|
|
self.orig_answer_text = orig_answer_text
|
|
self.start_position = start_position
|
|
self.end_position = end_position
|
|
self.is_impossible = is_impossible
|
|
|
|
def __str__(self):
|
|
return self.__repr__()
|
|
|
|
def __repr__(self):
|
|
s = ""
|
|
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
|
|
s += ", question_text: %s" % (
|
|
tokenization.printable_text(self.question_text))
|
|
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
|
|
if self.start_position:
|
|
s += ", start_position: %d" % (self.start_position)
|
|
if self.start_position:
|
|
s += ", end_position: %d" % (self.end_position)
|
|
if self.start_position:
|
|
s += ", is_impossible: %r" % (self.is_impossible)
|
|
return s
|
|
|
|
class InputFeatures(object):
|
|
"""A single set of features of data."""
|
|
|
|
def __init__(self,
|
|
unique_id,
|
|
example_index,
|
|
doc_span_index,
|
|
tokens,
|
|
token_to_orig_map,
|
|
token_is_max_context,
|
|
input_ids,
|
|
input_mask,
|
|
segment_ids,
|
|
start_position=None,
|
|
end_position=None,
|
|
is_impossible=None):
|
|
self.unique_id = unique_id
|
|
self.example_index = example_index
|
|
self.doc_span_index = doc_span_index
|
|
self.tokens = tokens
|
|
self.token_to_orig_map = token_to_orig_map
|
|
self.token_is_max_context = token_is_max_context
|
|
self.input_ids = input_ids
|
|
self.input_mask = input_mask
|
|
self.segment_ids = segment_ids
|
|
self.start_position = start_position
|
|
self.end_position = end_position
|
|
self.is_impossible = is_impossible
|
|
|
|
def read_squad_examples(input_file, is_training, version_2_with_negative=False):
|
|
"""Read a SQuAD json file into a list of SquadExample."""
|
|
with tf.gfile.Open(input_file, "r") as reader:
|
|
input_data = json.load(reader)["data"]
|
|
|
|
def is_whitespace(c):
|
|
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
|
|
return True
|
|
return False
|
|
|
|
examples = []
|
|
for entry in input_data:
|
|
for paragraph in entry["paragraphs"]:
|
|
paragraph_text = paragraph["context"]
|
|
doc_tokens = []
|
|
char_to_word_offset = []
|
|
prev_is_whitespace = True
|
|
for c in paragraph_text:
|
|
if is_whitespace(c):
|
|
prev_is_whitespace = True
|
|
else:
|
|
if prev_is_whitespace:
|
|
doc_tokens.append(c)
|
|
else:
|
|
doc_tokens[-1] += c
|
|
prev_is_whitespace = False
|
|
char_to_word_offset.append(len(doc_tokens) - 1)
|
|
|
|
for qa in paragraph["qas"]:
|
|
qas_id = qa["id"]
|
|
question_text = qa["question"]
|
|
start_position = None
|
|
end_position = None
|
|
orig_answer_text = None
|
|
is_impossible = False
|
|
if is_training:
|
|
|
|
if version_2_with_negative:
|
|
is_impossible = qa["is_impossible"]
|
|
if (len(qa["answers"]) != 1) and (not is_impossible):
|
|
raise ValueError(
|
|
"For training, each question should have exactly 1 answer.")
|
|
if not is_impossible:
|
|
answer = qa["answers"][0]
|
|
orig_answer_text = answer["text"]
|
|
answer_offset = answer["answer_start"]
|
|
answer_length = len(orig_answer_text)
|
|
start_position = char_to_word_offset[answer_offset]
|
|
end_position = char_to_word_offset[answer_offset + answer_length -
|
|
1]
|
|
# Only add answers where the text can be exactly recovered from the
|
|
# document. If this CAN'T happen it's likely due to weird Unicode
|
|
# stuff so we will just skip the example.
|
|
#
|
|
# Note that this means for training mode, every example is NOT
|
|
# guaranteed to be preserved.
|
|
actual_text = " ".join(
|
|
doc_tokens[start_position:(end_position + 1)])
|
|
cleaned_answer_text = " ".join(
|
|
tokenization.whitespace_tokenize(orig_answer_text))
|
|
if actual_text.find(cleaned_answer_text) == -1:
|
|
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
|
|
actual_text, cleaned_answer_text)
|
|
continue
|
|
else:
|
|
start_position = -1
|
|
end_position = -1
|
|
orig_answer_text = ""
|
|
|
|
example = SquadExample(
|
|
qas_id=qas_id,
|
|
question_text=question_text,
|
|
doc_tokens=doc_tokens,
|
|
orig_answer_text=orig_answer_text,
|
|
start_position=start_position,
|
|
end_position=end_position,
|
|
is_impossible=is_impossible)
|
|
examples.append(example)
|
|
|
|
return examples
|
|
|
|
def _check_is_max_context(doc_spans, cur_span_index, position):
|
|
"""Check if this is the 'max context' doc span for the token."""
|
|
|
|
# Because of the sliding window approach taken to scoring documents, a single
|
|
# token can appear in multiple documents. E.g.
|
|
# Doc: the man went to the store and bought a gallon of milk
|
|
# Span A: the man went to the
|
|
# Span B: to the store and bought
|
|
# Span C: and bought a gallon of
|
|
# ...
|
|
#
|
|
# Now the word 'bought' will have two scores from spans B and C. We only
|
|
# want to consider the score with "maximum context", which we define as
|
|
# the *minimum* of its left and right context (the *sum* of left and
|
|
# right context will always be the same, of course).
|
|
#
|
|
# In the example the maximum context for 'bought' would be span C since
|
|
# it has 1 left context and 3 right context, while span B has 4 left context
|
|
# and 0 right context.
|
|
best_score = None
|
|
best_span_index = None
|
|
for (span_index, doc_span) in enumerate(doc_spans):
|
|
end = doc_span.start + doc_span.length - 1
|
|
if position < doc_span.start:
|
|
continue
|
|
if position > end:
|
|
continue
|
|
num_left_context = position - doc_span.start
|
|
num_right_context = end - position
|
|
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
|
|
if best_score is None or score > best_score:
|
|
best_score = score
|
|
best_span_index = span_index
|
|
|
|
return cur_span_index == best_span_index
|
|
|
|
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
|
|
orig_answer_text):
|
|
"""Returns tokenized answer spans that better match the annotated answer."""
|
|
|
|
# The SQuAD annotations are character based. We first project them to
|
|
# whitespace-tokenized words. But then after WordPiece tokenization, we can
|
|
# often find a "better match". For example:
|
|
#
|
|
# Question: What year was John Smith born?
|
|
# Context: The leader was John Smith (1895-1943).
|
|
# Answer: 1895
|
|
#
|
|
# The original whitespace-tokenized answer will be "(1895-1943).". However
|
|
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
|
|
# the exact answer, 1895.
|
|
#
|
|
# However, this is not always possible. Consider the following:
|
|
#
|
|
# Question: What country is the top exporter of electornics?
|
|
# Context: The Japanese electronics industry is the lagest in the world.
|
|
# Answer: Japan
|
|
#
|
|
# In this case, the annotator chose "Japan" as a character sub-span of
|
|
# the word "Japanese". Since our WordPiece tokenizer does not split
|
|
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
|
|
# in SQuAD, but does happen.
|
|
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
|
|
|
|
for new_start in range(input_start, input_end + 1):
|
|
for new_end in range(input_end, new_start - 1, -1):
|
|
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
|
|
if text_span == tok_answer_text:
|
|
return (new_start, new_end)
|
|
|
|
return (input_start, input_end)
|
|
|
|
|
|
def convert_examples_to_features(examples, tokenizer, max_seq_length,
|
|
doc_stride, max_query_length, is_training,
|
|
output_fn, verbose_logging=False):
|
|
"""Loads a data file into a list of `InputBatch`s."""
|
|
|
|
unique_id = 1000000000
|
|
|
|
for (example_index, example) in enumerate(examples):
|
|
query_tokens = tokenizer.tokenize(example.question_text)
|
|
|
|
if len(query_tokens) > max_query_length:
|
|
query_tokens = query_tokens[0:max_query_length]
|
|
|
|
tok_to_orig_index = []
|
|
orig_to_tok_index = []
|
|
all_doc_tokens = []
|
|
for (i, token) in enumerate(example.doc_tokens):
|
|
orig_to_tok_index.append(len(all_doc_tokens))
|
|
sub_tokens = tokenizer.tokenize(token)
|
|
for sub_token in sub_tokens:
|
|
tok_to_orig_index.append(i)
|
|
all_doc_tokens.append(sub_token)
|
|
|
|
tok_start_position = None
|
|
tok_end_position = None
|
|
if is_training and example.is_impossible:
|
|
tok_start_position = -1
|
|
tok_end_position = -1
|
|
if is_training and not example.is_impossible:
|
|
tok_start_position = orig_to_tok_index[example.start_position]
|
|
if example.end_position < len(example.doc_tokens) - 1:
|
|
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
|
|
else:
|
|
tok_end_position = len(all_doc_tokens) - 1
|
|
(tok_start_position, tok_end_position) = _improve_answer_span(
|
|
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
|
|
example.orig_answer_text)
|
|
|
|
# The -3 accounts for [CLS], [SEP] and [SEP]
|
|
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
|
|
|
|
# We can have documents that are longer than the maximum sequence length.
|
|
# To deal with this we do a sliding window approach, where we take chunks
|
|
# of the up to our max length with a stride of `doc_stride`.
|
|
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
|
|
"DocSpan", ["start", "length"])
|
|
doc_spans = []
|
|
start_offset = 0
|
|
while start_offset < len(all_doc_tokens):
|
|
length = len(all_doc_tokens) - start_offset
|
|
if length > max_tokens_for_doc:
|
|
length = max_tokens_for_doc
|
|
doc_spans.append(_DocSpan(start=start_offset, length=length))
|
|
if start_offset + length == len(all_doc_tokens):
|
|
break
|
|
start_offset += min(length, doc_stride)
|
|
|
|
for (doc_span_index, doc_span) in enumerate(doc_spans):
|
|
tokens = []
|
|
token_to_orig_map = {}
|
|
token_is_max_context = {}
|
|
segment_ids = []
|
|
tokens.append("[CLS]")
|
|
segment_ids.append(0)
|
|
for token in query_tokens:
|
|
tokens.append(token)
|
|
segment_ids.append(0)
|
|
tokens.append("[SEP]")
|
|
segment_ids.append(0)
|
|
|
|
for i in range(doc_span.length):
|
|
split_token_index = doc_span.start + i
|
|
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
|
|
|
|
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
|
|
split_token_index)
|
|
token_is_max_context[len(tokens)] = is_max_context
|
|
tokens.append(all_doc_tokens[split_token_index])
|
|
segment_ids.append(1)
|
|
tokens.append("[SEP]")
|
|
segment_ids.append(1)
|
|
|
|
input_ids = tokenizer.convert_tokens_to_ids(tokens)
|
|
|
|
# The mask has 1 for real tokens and 0 for padding tokens. Only real
|
|
# tokens are attended to.
|
|
input_mask = [1] * len(input_ids)
|
|
|
|
# Zero-pad up to the sequence length.
|
|
while len(input_ids) < max_seq_length:
|
|
input_ids.append(0)
|
|
input_mask.append(0)
|
|
segment_ids.append(0)
|
|
|
|
assert len(input_ids) == max_seq_length
|
|
assert len(input_mask) == max_seq_length
|
|
assert len(segment_ids) == max_seq_length
|
|
|
|
start_position = None
|
|
end_position = None
|
|
if is_training and not example.is_impossible:
|
|
# For training, if our document chunk does not contain an annotation
|
|
# we throw it out, since there is nothing to predict.
|
|
doc_start = doc_span.start
|
|
doc_end = doc_span.start + doc_span.length - 1
|
|
out_of_span = False
|
|
if not (tok_start_position >= doc_start and
|
|
tok_end_position <= doc_end):
|
|
out_of_span = True
|
|
if out_of_span:
|
|
start_position = 0
|
|
end_position = 0
|
|
else:
|
|
doc_offset = len(query_tokens) + 2
|
|
start_position = tok_start_position - doc_start + doc_offset
|
|
end_position = tok_end_position - doc_start + doc_offset
|
|
|
|
if is_training and example.is_impossible:
|
|
start_position = 0
|
|
end_position = 0
|
|
|
|
if verbose_logging and example_index < 20:
|
|
tf.compat.v1.logging.info("*** Example ***")
|
|
tf.compat.v1.logging.info("unique_id: %s" % (unique_id))
|
|
tf.compat.v1.logging.info("example_index: %s" % (example_index))
|
|
tf.compat.v1.logging.info("doc_span_index: %s" % (doc_span_index))
|
|
tf.compat.v1.logging.info("tokens: %s" % " ".join(
|
|
[tokenization.printable_text(x) for x in tokens]))
|
|
tf.compat.v1.logging.info("token_to_orig_map: %s" % " ".join(
|
|
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
|
|
tf.compat.v1.logging.info("token_is_max_context: %s" % " ".join([
|
|
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
|
|
]))
|
|
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
|
|
tf.compat.v1.logging.info(
|
|
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
|
|
tf.compat.v1.logging.info(
|
|
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
|
|
if is_training and example.is_impossible:
|
|
tf.compat.v1.logging.info("impossible example")
|
|
if is_training and not example.is_impossible:
|
|
answer_text = " ".join(tokens[start_position:(end_position + 1)])
|
|
tf.compat.v1.logging.info("start_position: %d" % (start_position))
|
|
tf.compat.v1.logging.info("end_position: %d" % (end_position))
|
|
tf.compat.v1.logging.info(
|
|
"answer: %s" % (tokenization.printable_text(answer_text)))
|
|
|
|
feature = InputFeatures(
|
|
unique_id=unique_id,
|
|
example_index=example_index,
|
|
doc_span_index=doc_span_index,
|
|
tokens=tokens,
|
|
token_to_orig_map=token_to_orig_map,
|
|
token_is_max_context=token_is_max_context,
|
|
input_ids=input_ids,
|
|
input_mask=input_mask,
|
|
segment_ids=segment_ids,
|
|
start_position=start_position,
|
|
end_position=end_position,
|
|
is_impossible=example.is_impossible)
|
|
|
|
# Run callback
|
|
output_fn(feature)
|
|
|
|
unique_id += 1
|
|
|
|
class FeatureWriter(object):
|
|
"""Writes InputFeature to TF example file."""
|
|
|
|
def __init__(self, filename, is_training):
|
|
self.filename = filename
|
|
self.is_training = is_training
|
|
self.num_features = 0
|
|
self._writer = tf.python_io.TFRecordWriter(filename)
|
|
|
|
def process_feature(self, feature):
|
|
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
|
|
self.num_features += 1
|
|
|
|
def create_int_feature(values):
|
|
feature = tf.train.Feature(
|
|
int64_list=tf.train.Int64List(value=list(values)))
|
|
return feature
|
|
|
|
features = collections.OrderedDict()
|
|
features["unique_ids"] = create_int_feature([feature.unique_id])
|
|
features["input_ids"] = create_int_feature(feature.input_ids)
|
|
features["input_mask"] = create_int_feature(feature.input_mask)
|
|
features["segment_ids"] = create_int_feature(feature.segment_ids)
|
|
|
|
if self.is_training:
|
|
features["start_positions"] = create_int_feature([feature.start_position])
|
|
features["end_positions"] = create_int_feature([feature.end_position])
|
|
impossible = 0
|
|
if feature.is_impossible:
|
|
impossible = 1
|
|
features["is_impossible"] = create_int_feature([impossible])
|
|
|
|
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
|
|
self._writer.write(tf_example.SerializeToString())
|
|
|
|
def close(self):
|
|
self._writer.close()
|
|
|
|
def main():
|
|
|
|
FLAGS = extract_flags()
|
|
tokenizer = tokenization.FullTokenizer(
|
|
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
|
|
tf.gfile.MakeDirs(FLAGS.squad_dir + "/final_tfrecords_sharded")
|
|
# We write to a temporary file to avoid storing very large constant tensors
|
|
# in memory.
|
|
train_examples = read_squad_examples(
|
|
input_file=FLAGS.train_file, is_training=True,
|
|
version_2_with_negative=FLAGS.version_2_with_negative)
|
|
train_writer = FeatureWriter(
|
|
filename=os.path.join(FLAGS.squad_dir, "final_tfrecords_sharded/train.tf_record"),
|
|
is_training=True)
|
|
convert_examples_to_features(
|
|
examples=train_examples,
|
|
tokenizer=tokenizer,
|
|
max_seq_length=FLAGS.max_seq_length,
|
|
doc_stride=FLAGS.doc_stride,
|
|
max_query_length=FLAGS.max_query_length,
|
|
is_training=True,
|
|
output_fn=train_writer.process_feature,
|
|
verbose_logging=FLAGS.verbose_logging)
|
|
train_writer.close()
|
|
|
|
|
|
eval_examples = read_squad_examples(
|
|
input_file=FLAGS.predict_file, is_training=False,
|
|
version_2_with_negative=FLAGS.version_2_with_negative)
|
|
|
|
eval_writer = FeatureWriter(
|
|
filename=os.path.join(FLAGS.squad_dir, "final_tfrecords_sharded/eval.tf_record"),
|
|
is_training=False)
|
|
eval_features = []
|
|
|
|
def append_feature(feature):
|
|
eval_features.append(feature)
|
|
eval_writer.process_feature(feature)
|
|
|
|
convert_examples_to_features(
|
|
examples=eval_examples,
|
|
tokenizer=tokenizer,
|
|
max_seq_length=FLAGS.max_seq_length,
|
|
doc_stride=FLAGS.doc_stride,
|
|
max_query_length=FLAGS.max_query_length,
|
|
is_training=False,
|
|
output_fn=append_feature,
|
|
verbose_logging=FLAGS.verbose_logging)
|
|
eval_writer.close()
|
|
|
|
if __name__ == "__main__":
|
|
main() |