Merge pull request #748 from hXl3s/RN50/argparse_fix

Minor fixes in ConvNets
This commit is contained in:
nv-kkudrynski 2020-11-09 13:44:17 +01:00 committed by GitHub
commit 0b455ffeac
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 0 additions and 46 deletions

View file

@ -61,7 +61,6 @@ def initialize_model(args):
model.load_state_dict(
{k.replace("module.", ""): v for k, v in state_dict.items()}
)
model.load_state_dict(state_dict)
return model.half() if args.fp16 else model

View file

@ -173,51 +173,6 @@ def parse_cmdline(available_arch):
help="Quantize weights and activations during training using symmetric quantization."
)
p.add_argument(
'--finetune_checkpoint',
required=False,
default=None,
type=str,
help="Path to pre-trained checkpoint which will be used for fine-tuning"
)
_add_bool_argument(
parser=p, name="use_final_conv", default=False, required=False, help="Use cosine learning rate schedule."
)
p.add_argument(
'--quant_delay',
type=int,
default=0,
required=False,
help="Number of steps to be run before quantization starts to happen"
)
_add_bool_argument(
parser=p,
name="quantize",
default=False,
required=False,
help="Quantize weights and activations during training. (Defaults to Assymmetric quantization)"
)
_add_bool_argument(
parser=p,
name="use_qdq",
default=False,
required=False,
help="Use QDQV3 op instead of FakeQuantWithMinMaxVars op for quantization. QDQv3 does only scaling"
)
_add_bool_argument(
parser=p,
name="symmetric",
default=False,
required=False,
help="Quantize weights and activations during training using symmetric quantization."
)
p.add_argument(
'--log_filename',
type=str,