1. Fix LGTM alerts, remove useless module from python files.

This commit is contained in:
bhsueh 2020-03-06 12:09:50 +00:00
parent e7c94040e8
commit 77505ce75c
11 changed files with 48 additions and 19 deletions

View file

@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import tensorflow as tf

View file

@ -14,16 +14,12 @@
import tensorflow as tf
import numpy as np
import os
import math
import six
import argparse
import numpy as np
from utils.common import time_test, DecodingArgument, int_result_cross_check, TransformerArgument
from utils.decoding import tf_decoding, generate_encoder_result, op_decoding
from utils.common import DecodingArgument, TransformerArgument
from utils.decoding import tf_decoding
from utils.encoder import tf_encoder, op_encoder
if __name__ == "__main__":
parser = argparse.ArgumentParser()
@ -49,8 +45,6 @@ if __name__ == "__main__":
help='vocabulary size. (default: 30000).')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)')
parser.add_argument('-time', '--test_time', type=int, default=0, metavar='BOOL',
help='test the time or not. (default: False (0)), True is 1.')
parser.add_argument('-decoder', '--decoder_type', type=int, default=2, metavar='NUMBER',
help='Decoder type:'
+ ' type 0: only run tf decoder;'

View file

@ -14,12 +14,9 @@
import tensorflow as tf
import numpy as np
import os
import math
import six
import argparse
from utils.common import time_test, DecodingArgument, int_result_cross_check, TransformerArgument
from utils.decoding import tf_decoding, generate_encoder_result, op_decoding
from utils.decoding import tf_decoding, op_decoding
from utils.encoder import tf_encoder, op_encoder
if __name__ == "__main__":

View file

@ -1,3 +1,17 @@
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logdir="decoder-log"
mkdir ${logdir}
export CUDA_VISIBLE_DEVICES=1

View file

@ -1,3 +1,17 @@
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logdir="decoding-log"
mkdir ${logdir}
export CUDA_VISIBLE_DEVICES=1

View file

@ -16,9 +16,8 @@ from __future__ import print_function
import tensorflow as tf
import numpy as np
import argparse
from utils.common import time_test, DecodingArgument
from utils.decoding import tf_decoding, generate_encoder_result, op_decoding
from utils.common import DecodingArgument
from utils.decoding import tf_decoding, op_decoding
from opennmt.utils import misc
from opennmt.encoders.self_attention_encoder import SelfAttentionEncoder
from opennmt.decoders.self_attention_decoder import SelfAttentionDecoder

View file

@ -1,3 +1,17 @@
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import os

View file

@ -15,7 +15,6 @@
from __future__ import print_function
import tensorflow as tf
import numpy as np
from datetime import datetime
import sys
import pickle

View file

@ -19,7 +19,6 @@ import six
import os
from common import create_initializer
def gelu(x):
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))

View file

@ -17,7 +17,6 @@ import abc
import tensorflow as tf
from reducer import SumReducer
class PositionEncoder(tf.keras.layers.Layer):
"""Base class for position encoders."""

View file

@ -15,7 +15,6 @@
import abc
import tensorflow as tf
def pad_in_time(x, padding_length):
"""Helper function to pad a tensor in the time dimension and retain the static depth dimension."""
return tf.pad(x, [[0, 0], [0, padding_length], [0, 0]])