diff --git a/PyTorch/SpeechRecognition/Jasper/common/dali/pipeline.py b/PyTorch/SpeechRecognition/Jasper/common/dali/pipeline.py index aa7d27f5..bdbb26b4 100644 --- a/PyTorch/SpeechRecognition/Jasper/common/dali/pipeline.py +++ b/PyTorch/SpeechRecognition/Jasper/common/dali/pipeline.py @@ -136,7 +136,7 @@ class DaliPipeline(): audio = audio.gpu() if self.dither_coeff != 0.: - audio = audio + fn.random.normal(device=preprocessing_device) * self.dither_coeff + audio = audio + fn.random.normal(audio) * self.dither_coeff audio = fn.preemphasis_filter(audio, preemph_coeff=preemph_coeff) diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/dali/pipeline.py b/PyTorch/SpeechRecognition/QuartzNet/common/dali/pipeline.py index 8b3e93ef..251ae8e2 100644 --- a/PyTorch/SpeechRecognition/QuartzNet/common/dali/pipeline.py +++ b/PyTorch/SpeechRecognition/QuartzNet/common/dali/pipeline.py @@ -167,8 +167,7 @@ def dali_asr_pipeline(train_pipeline, # True if training, False if validation audio = audio.gpu() if dither_coeff != 0.: - audio = audio + fn.random.normal(device=preprocessing_device - ) * dither_coeff + audio = audio + fn.random.normal(audio) * dither_coeff audio = fn.preemphasis_filter(audio, preemph_coeff=preemph_coeff)