NeMo/Dockerfile

106 lines
3.9 KiB
Docker
Raw Permalink Normal View History

# syntax=docker/dockerfile:experimental
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[BigNLP] Merge Megatron GPT to main (#2975) * fix gpu init after removing debug print in mpu Signed-off-by: ericharper <complex451@gmail.com> * add fused_adam Signed-off-by: ericharper <complex451@gmail.com> * check ds is not none before logging len Signed-off-by: ericharper <complex451@gmail.com> * set fp16 arg to true and fix enum conflict Signed-off-by: ericharper <complex451@gmail.com> * make fp16 arg configurable Signed-off-by: ericharper <complex451@gmail.com> * add grad clip from megatron Signed-off-by: ericharper <complex451@gmail.com> * Linear warmup with cosine annealing and constant holding (#2846) * Testing cosine schedule Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Style fixes Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Fixes Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * More fixes Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * update config for constant steps in schedule Signed-off-by: ericharper <complex451@gmail.com> * temporarily import enum from megatron Signed-off-by: ericharper <complex451@gmail.com> * add grad clip for fp32 Signed-off-by: ericharper <complex451@gmail.com> * update check for _del_model_without_trainer Signed-off-by: ericharper <complex451@gmail.com> * updating restore for model parallel Signed-off-by: ericharper <complex451@gmail.com> * add predict script Signed-off-by: ericharper <complex451@gmail.com> * update test iters Signed-off-by: ericharper <complex451@gmail.com> * add barrier Signed-off-by: ericharper <complex451@gmail.com> * return if clip_val is 0 or None Signed-off-by: ericharper <complex451@gmail.com> * when using amp clip grads after they are unscaled Signed-off-by: ericharper <complex451@gmail.com> * make native amp scaler hyperparams configurable Signed-off-by: ericharper <complex451@gmail.com> * (1) nvfuser, (2) amp-casting decoration (#2894) * (1) nvfuser, (2) amp-casting decoration Signed-off-by: Sangkug Lym <slym@nvidia.com> * support bf16 Signed-off-by: Sangkug Lym <slym@nvidia.com> * update package info Signed-off-by: ericharper <complex451@gmail.com> * add set device to constructor Signed-off-by: ericharper <complex451@gmail.com> * set_device in constructor Signed-off-by: ericharper <complex451@gmail.com> * [BigNLP] Remove megatron-lm dependency. (#2910) * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * add load_fused_kernels Signed-off-by: ericharper <complex451@gmail.com> * add load_fused_kernels Signed-off-by: ericharper <complex451@gmail.com> * update megatron_init Signed-off-by: ericharper <complex451@gmail.com> * add fused kernels Signed-off-by: ericharper <complex451@gmail.com> * add fused kernels Signed-off-by: ericharper <complex451@gmail.com> * update process batch Signed-off-by: ericharper <complex451@gmail.com> * remove erroneous import Signed-off-by: ericharper <complex451@gmail.com> * remove erroneous import Signed-off-by: ericharper <complex451@gmail.com> * remove erroneous import Signed-off-by: ericharper <complex451@gmail.com> * add megatron clip_grad Signed-off-by: ericharper <complex451@gmail.com> * trying to resolve circular import error Signed-off-by: ericharper <complex451@gmail.com> * rename file Signed-off-by: ericharper <complex451@gmail.com> * remove non-gpt models and datasets from __init__ files Signed-off-by: ericharper <complex451@gmail.com> * set device in constructorfor gpu init Signed-off-by: ericharper <complex451@gmail.com> * set device in constructorfor gpu init Signed-off-by: ericharper <complex451@gmail.com> * set_device in constructor Signed-off-by: ericharper <complex451@gmail.com> * clean config Signed-off-by: ericharper <complex451@gmail.com> * update MegatronDataset Signed-off-by: ericharper <complex451@gmail.com> * clean up MegatronModule Signed-off-by: ericharper <complex451@gmail.com> * clean up MegatronModule Signed-off-by: ericharper <complex451@gmail.com> * rename fp16 and bf16 flags to fused_softmax_input_in_fp16/bf16 Signed-off-by: ericharper <complex451@gmail.com> * rename to fused_fp16 Signed-off-by: ericharper <complex451@gmail.com> * add fused_fp16 arg to LayerNorm calls Signed-off-by: ericharper <complex451@gmail.com> * fix arg name Signed-off-by: ericharper <complex451@gmail.com> * fix arg name Signed-off-by: ericharper <complex451@gmail.com> * fix import Signed-off-by: ericharper <complex451@gmail.com> * update arg Signed-off-by: ericharper <complex451@gmail.com> * skip warmup default to True Signed-off-by: ericharper <complex451@gmail.com> * skip warmup default to True Signed-off-by: ericharper <complex451@gmail.com> * Adding complete method to MegatronGPTModel (#2935) Signed-off-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> * make ffn_hidden_size mandatory Signed-off-by: ericharper <complex451@gmail.com> * Manually migrating timing of step into branch (#2937) * 1. Manually migrating timing of step into branch. Signed-off-by: Micha Livne <mlivne@nvidia.com> * 1. Updated file name and content. Signed-off-by: Micha Livne <mlivne@nvidia.com> * 1. Updated to latest code. Signed-off-by: Micha Livne <mlivne@nvidia.com> Co-authored-by: Micha Livne <mlivne@nvidia.com> * remove unused imports Signed-off-by: ericharper <complex451@gmail.com> * remove unused import Signed-off-by: ericharper <complex451@gmail.com> * remove unused import Signed-off-by: ericharper <complex451@gmail.com> * remove unused import Signed-off-by: ericharper <complex451@gmail.com> * check fused_fp16 and fused_bf16 are not both True Signed-off-by: ericharper <complex451@gmail.com> * update predict script for model parallel .nemo Signed-off-by: ericharper <complex451@gmail.com> * typo Signed-off-by: ericharper <complex451@gmail.com> * typo Signed-off-by: ericharper <complex451@gmail.com> Co-authored-by: Oleksii Kuchaiev <okuchaiev@users.noreply.github.com> Co-authored-by: Micha Livne <michalivne@users.noreply.github.com> Co-authored-by: Micha Livne <mlivne@nvidia.com> * NVfuser (#2943) * activation checkpoint recompute Signed-off-by: Sangkug Lym <slym@nvidia.com> * selective nvfuser setup * Megatron gpt bfloat support (#2926) * Save/restore fix Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Another merge Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Bf16 args in init Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Set precision Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Remove debug stuff Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * add bf16 casting decorator Signed-off-by: Sangkug Lym <slym@nvidia.com> * Bfloat layernorm propagation Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * activation checkpoint recompute Signed-off-by: Sangkug Lym <slym@nvidia.com> * selective nvfuser setup * More arg removal Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Remove BERTDataset Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * update to latest apex and patch transformer autocast Signed-off-by: ericharper <complex451@gmail.com> Co-authored-by: Sangkug Lym <slym@nvidia.com> Co-authored-by: ericharper <complex451@gmail.com> * don't set jit for bf16 Signed-off-by: ericharper <complex451@gmail.com> * replace apex.mpu Signed-off-by: ericharper <complex451@gmail.com> * fix grad clip Signed-off-by: ericharper <complex451@gmail.com> * NVFuser fixes (#2951) * Fuser fixes Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Remove dummy handler Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Remove PTL plugin based logic for fusion Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * remove duplicated file Signed-off-by: ericharper <complex451@gmail.com> * typo (#2960) Signed-off-by: ericharper <complex451@gmail.com> * [BigNLP] Script to convert GPT checkpoint to .nemo (#2958) * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * add load_fused_kernels Signed-off-by: ericharper <complex451@gmail.com> * add load_fused_kernels Signed-off-by: ericharper <complex451@gmail.com> * update megatron_init Signed-off-by: ericharper <complex451@gmail.com> * add fused kernels Signed-off-by: ericharper <complex451@gmail.com> * add fused kernels Signed-off-by: ericharper <complex451@gmail.com> * update process batch Signed-off-by: ericharper <complex451@gmail.com> * remove erroneous import Signed-off-by: ericharper <complex451@gmail.com> * remove erroneous import Signed-off-by: ericharper <complex451@gmail.com> * remove erroneous import Signed-off-by: ericharper <complex451@gmail.com> * add megatron clip_grad Signed-off-by: ericharper <complex451@gmail.com> * trying to resolve circular import error Signed-off-by: ericharper <complex451@gmail.com> * rename file Signed-off-by: ericharper <complex451@gmail.com> * remove non-gpt models and datasets from __init__ files Signed-off-by: ericharper <complex451@gmail.com> * set device in constructorfor gpu init Signed-off-by: ericharper <complex451@gmail.com> * set device in constructorfor gpu init Signed-off-by: ericharper <complex451@gmail.com> * set_device in constructor Signed-off-by: ericharper <complex451@gmail.com> * clean config Signed-off-by: ericharper <complex451@gmail.com> * update MegatronDataset Signed-off-by: ericharper <complex451@gmail.com> * clean up MegatronModule Signed-off-by: ericharper <complex451@gmail.com> * clean up MegatronModule Signed-off-by: ericharper <complex451@gmail.com> * rename fp16 and bf16 flags to fused_softmax_input_in_fp16/bf16 Signed-off-by: ericharper <complex451@gmail.com> * rename to fused_fp16 Signed-off-by: ericharper <complex451@gmail.com> * add fused_fp16 arg to LayerNorm calls Signed-off-by: ericharper <complex451@gmail.com> * fix arg name Signed-off-by: ericharper <complex451@gmail.com> * fix arg name Signed-off-by: ericharper <complex451@gmail.com> * fix import Signed-off-by: ericharper <complex451@gmail.com> * update arg Signed-off-by: ericharper <complex451@gmail.com> * skip warmup default to True Signed-off-by: ericharper <complex451@gmail.com> * skip warmup default to True Signed-off-by: ericharper <complex451@gmail.com> * Adding complete method to MegatronGPTModel (#2935) Signed-off-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> * make ffn_hidden_size mandatory Signed-off-by: ericharper <complex451@gmail.com> * Manually migrating timing of step into branch (#2937) * 1. Manually migrating timing of step into branch. Signed-off-by: Micha Livne <mlivne@nvidia.com> * 1. Updated file name and content. Signed-off-by: Micha Livne <mlivne@nvidia.com> * 1. Updated to latest code. Signed-off-by: Micha Livne <mlivne@nvidia.com> Co-authored-by: Micha Livne <mlivne@nvidia.com> * remove unused imports Signed-off-by: ericharper <complex451@gmail.com> * remove unused import Signed-off-by: ericharper <complex451@gmail.com> * remove unused import Signed-off-by: ericharper <complex451@gmail.com> * remove unused import Signed-off-by: ericharper <complex451@gmail.com> * check fused_fp16 and fused_bf16 are not both True Signed-off-by: ericharper <complex451@gmail.com> * update predict script for model parallel .nemo Signed-off-by: ericharper <complex451@gmail.com> * typo Signed-off-by: ericharper <complex451@gmail.com> * add script to convert .ckpt to .nemo Signed-off-by: ericharper <complex451@gmail.com> * in progress Signed-off-by: ericharper <complex451@gmail.com> * update Signed-off-by: ericharper <complex451@gmail.com> * convert mp checkpoints to nemo Signed-off-by: ericharper <complex451@gmail.com> * update help Signed-off-by: ericharper <complex451@gmail.com> * add safeguard for model parallel save_to Signed-off-by: ericharper <complex451@gmail.com> * adjust NLPModel save_to to be safer for model parallel Signed-off-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> Co-authored-by: Oleksii Kuchaiev <okuchaiev@users.noreply.github.com> Co-authored-by: Micha Livne <michalivne@users.noreply.github.com> Co-authored-by: Micha Livne <mlivne@nvidia.com> Co-authored-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> * [BigNLP] Update GPT evaluation to work with tensor model parallel (#2959) * in progress Signed-off-by: ericharper <complex451@gmail.com> * update args Signed-off-by: ericharper <complex451@gmail.com> * add request dataset Signed-off-by: ericharper <complex451@gmail.com> * tokenize request Signed-off-by: ericharper <complex451@gmail.com> * in progress Signed-off-by: ericharper <complex451@gmail.com> * able to run Signed-off-by: ericharper <complex451@gmail.com> * reduce logits Signed-off-by: ericharper <complex451@gmail.com> * capture response Signed-off-by: ericharper <complex451@gmail.com> * squeeze and unsqueeze Signed-off-by: ericharper <complex451@gmail.com> * handle non model parallel case Signed-off-by: ericharper <complex451@gmail.com> * clean imports Signed-off-by: ericharper <complex451@gmail.com> * add file Signed-off-by: ericharper <complex451@gmail.com> * convert logits to log_probs Signed-off-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> * rename logits to log_probs Signed-off-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> Co-authored-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> * add megatron gpt pretraining Signed-off-by: ericharper <complex451@gmail.com> * add megatron gpt pretraining Signed-off-by: ericharper <complex451@gmail.com> * add megatron gpt pretraining Signed-off-by: ericharper <complex451@gmail.com> * updating to work with latest megatron Signed-off-by: ericharper <complex451@gmail.com> * updating to work with latest megatron Signed-off-by: ericharper <complex451@gmail.com> * update _del_model Signed-off-by: ericharper <complex451@gmail.com> * adding gpt model Signed-off-by: ericharper <complex451@gmail.com> * adding gpt model Signed-off-by: ericharper <complex451@gmail.com> * adding gpt model Signed-off-by: ericharper <complex451@gmail.com> * instantiate GPTmodel Signed-off-by: ericharper <complex451@gmail.com> * adding build dataset Signed-off-by: ericharper <complex451@gmail.com> * build megatron dataset in .setup Signed-off-by: ericharper <complex451@gmail.com> * setup dataloader Signed-off-by: ericharper <complex451@gmail.com> * add vocab_file and merge_file to megatron init Signed-off-by: ericharper <complex451@gmail.com> * add forward Signed-off-by: ericharper <complex451@gmail.com> * add train loss Signed-off-by: ericharper <complex451@gmail.com> * add optimizer Signed-off-by: ericharper <complex451@gmail.com> * add exp_manager Signed-off-by: ericharper <complex451@gmail.com> * multi-gpu is working Signed-off-by: ericharper <complex451@gmail.com> * adding val loop Signed-off-by: ericharper <complex451@gmail.com> * style Signed-off-by: ericharper <complex451@gmail.com> * adding val loop Signed-off-by: ericharper <complex451@gmail.com> * fix ranks Signed-off-by: ericharper <complex451@gmail.com> * fix model parallel checkpoint saving Signed-off-by: ericharper <complex451@gmail.com> * fix _del_model Signed-off-by: ericharper <complex451@gmail.com> * added megatron batch sampler Signed-off-by: ericharper <complex451@gmail.com> * try to fix num steps Signed-off-by: ericharper <complex451@gmail.com> * add wandb to config Signed-off-by: ericharper <complex451@gmail.com> * log lr Signed-off-by: ericharper <complex451@gmail.com> * add warmup ratio to config Signed-off-by: ericharper <complex451@gmail.com> * update configs Signed-off-by: ericharper <complex451@gmail.com> * update configs Signed-off-by: ericharper <complex451@gmail.com> * add cpu init to args Signed-off-by: ericharper <complex451@gmail.com> * update config Signed-off-by: ericharper <complex451@gmail.com> * update config Signed-off-by: ericharper <complex451@gmail.com> * Initial megatron dataset port Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Fix merge conflicts Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * License fixes and megatron model porting Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Style fixes Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * More fixes to import from nemo rather than megatron Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Fix circular imports Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Style fixes Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Revert config file Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Restructure further to avoid circular imports Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * add Makefile Signed-off-by: ericharper <complex451@gmail.com> * Add megatron modules Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * add license Signed-off-by: ericharper <complex451@gmail.com> * Port from latest megatron Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * update cfg Signed-off-by: ericharper <complex451@gmail.com> * update config Signed-off-by: ericharper <complex451@gmail.com> * add _del_model_without_trainer Signed-off-by: ericharper <complex451@gmail.com> * add data preprocessing script Signed-off-by: ericharper <complex451@gmail.com> * update config Signed-off-by: ericharper <complex451@gmail.com> * use apex mpu Signed-off-by: ericharper <complex451@gmail.com> * replace print_rank_0 with nemo utils logging Signed-off-by: ericharper <complex451@gmail.com> * use apex mpu Signed-off-by: ericharper <complex451@gmail.com> * use apex mpu Signed-off-by: ericharper <complex451@gmail.com> * add use_cpu_initialization Signed-off-by: ericharper <complex451@gmail.com> * fixing autoresume in progress Signed-off-by: ericharper <complex451@gmail.com> * properly removing last checkpoint Signed-off-by: ericharper <complex451@gmail.com> * log consumed samples Signed-off-by: ericharper <complex451@gmail.com> * fix mp autoresume Signed-off-by: ericharper <complex451@gmail.com> * add NLPSaveRestoreConnector Signed-off-by: ericharper <complex451@gmail.com> * Megatron GPT training with NeMo tokenizers (#2818) * Update files from megatron repo Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Remove non NLP data related files from megatron Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Merge megatron and nemo tokenizers Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Remove get_tokenizer() calls from gpt model Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Update tokenizer yaml config Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * add todo Signed-off-by: ericharper <complex451@gmail.com> * update config Signed-off-by: ericharper <complex451@gmail.com> * make init_method_std configurable Signed-off-by: ericharper <complex451@gmail.com> * make gpu init work by setting random seed earlier Signed-off-by: ericharper <complex451@gmail.com> * fix gpu init after removing debug print in mpu Signed-off-by: ericharper <complex451@gmail.com> * add fused_adam Signed-off-by: ericharper <complex451@gmail.com> * check ds is not none before logging len Signed-off-by: ericharper <complex451@gmail.com> * set fp16 arg to true and fix enum conflict Signed-off-by: ericharper <complex451@gmail.com> * make fp16 arg configurable Signed-off-by: ericharper <complex451@gmail.com> * add grad clip from megatron Signed-off-by: ericharper <complex451@gmail.com> * Linear warmup with cosine annealing and constant holding (#2846) * Testing cosine schedule Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Style fixes Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Fixes Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * More fixes Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * update config for constant steps in schedule Signed-off-by: ericharper <complex451@gmail.com> * temporarily import enum from megatron Signed-off-by: ericharper <complex451@gmail.com> * add grad clip for fp32 Signed-off-by: ericharper <complex451@gmail.com> * update check for _del_model_without_trainer Signed-off-by: ericharper <complex451@gmail.com> * updating restore for model parallel Signed-off-by: ericharper <complex451@gmail.com> * add predict script Signed-off-by: ericharper <complex451@gmail.com> * update test iters Signed-off-by: ericharper <complex451@gmail.com> * add barrier Signed-off-by: ericharper <complex451@gmail.com> * return if clip_val is 0 or None Signed-off-by: ericharper <complex451@gmail.com> * when using amp clip grads after they are unscaled Signed-off-by: ericharper <complex451@gmail.com> * make native amp scaler hyperparams configurable Signed-off-by: ericharper <complex451@gmail.com> * (1) nvfuser, (2) amp-casting decoration (#2894) * (1) nvfuser, (2) amp-casting decoration Signed-off-by: Sangkug Lym <slym@nvidia.com> * support bf16 Signed-off-by: Sangkug Lym <slym@nvidia.com> * update package info Signed-off-by: ericharper <complex451@gmail.com> * add set device to constructor Signed-off-by: ericharper <complex451@gmail.com> * set_device in constructor Signed-off-by: ericharper <complex451@gmail.com> * [BigNLP] Remove megatron-lm dependency. (#2910) * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * add load_fused_kernels Signed-off-by: ericharper <complex451@gmail.com> * add load_fused_kernels Signed-off-by: ericharper <complex451@gmail.com> * update megatron_init Signed-off-by: ericharper <complex451@gmail.com> * add fused kernels Signed-off-by: ericharper <complex451@gmail.com> * add fused kernels Signed-off-by: ericharper <complex451@gmail.com> * update process batch Signed-off-by: ericharper <complex451@gmail.com> * remove erroneous import Signed-off-by: ericharper <complex451@gmail.com> * remove erroneous import Signed-off-by: ericharper <complex451@gmail.com> * remove erroneous import Signed-off-by: ericharper <complex451@gmail.com> * add megatron clip_grad Signed-off-by: ericharper <complex451@gmail.com> * trying to resolve circular import error Signed-off-by: ericharper <complex451@gmail.com> * rename file Signed-off-by: ericharper <complex451@gmail.com> * remove non-gpt models and datasets from __init__ files Signed-off-by: ericharper <complex451@gmail.com> * set device in constructorfor gpu init Signed-off-by: ericharper <complex451@gmail.com> * set device in constructorfor gpu init Signed-off-by: ericharper <complex451@gmail.com> * set_device in constructor Signed-off-by: ericharper <complex451@gmail.com> * clean config Signed-off-by: ericharper <complex451@gmail.com> * update MegatronDataset Signed-off-by: ericharper <complex451@gmail.com> * clean up MegatronModule Signed-off-by: ericharper <complex451@gmail.com> * clean up MegatronModule Signed-off-by: ericharper <complex451@gmail.com> * rename fp16 and bf16 flags to fused_softmax_input_in_fp16/bf16 Signed-off-by: ericharper <complex451@gmail.com> * rename to fused_fp16 Signed-off-by: ericharper <complex451@gmail.com> * add fused_fp16 arg to LayerNorm calls Signed-off-by: ericharper <complex451@gmail.com> * fix arg name Signed-off-by: ericharper <complex451@gmail.com> * fix arg name Signed-off-by: ericharper <complex451@gmail.com> * fix import Signed-off-by: ericharper <complex451@gmail.com> * update arg Signed-off-by: ericharper <complex451@gmail.com> * skip warmup default to True Signed-off-by: ericharper <complex451@gmail.com> * skip warmup default to True Signed-off-by: ericharper <complex451@gmail.com> * Adding complete method to MegatronGPTModel (#2935) Signed-off-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> * make ffn_hidden_size mandatory Signed-off-by: ericharper <complex451@gmail.com> * Manually migrating timing of step into branch (#2937) * 1. Manually migrating timing of step into branch. Signed-off-by: Micha Livne <mlivne@nvidia.com> * 1. Updated file name and content. Signed-off-by: Micha Livne <mlivne@nvidia.com> * 1. Updated to latest code. Signed-off-by: Micha Livne <mlivne@nvidia.com> Co-authored-by: Micha Livne <mlivne@nvidia.com> * remove unused imports Signed-off-by: ericharper <complex451@gmail.com> * remove unused import Signed-off-by: ericharper <complex451@gmail.com> * remove unused import Signed-off-by: ericharper <complex451@gmail.com> * remove unused import Signed-off-by: ericharper <complex451@gmail.com> * check fused_fp16 and fused_bf16 are not both True Signed-off-by: ericharper <complex451@gmail.com> * update predict script for model parallel .nemo Signed-off-by: ericharper <complex451@gmail.com> * typo Signed-off-by: ericharper <complex451@gmail.com> * typo Signed-off-by: ericharper <complex451@gmail.com> Co-authored-by: Oleksii Kuchaiev <okuchaiev@users.noreply.github.com> Co-authored-by: Micha Livne <michalivne@users.noreply.github.com> Co-authored-by: Micha Livne <mlivne@nvidia.com> * NVfuser (#2943) * activation checkpoint recompute Signed-off-by: Sangkug Lym <slym@nvidia.com> * selective nvfuser setup * Megatron gpt bfloat support (#2926) * Save/restore fix Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Another merge Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Bf16 args in init Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Set precision Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Remove debug stuff Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * add bf16 casting decorator Signed-off-by: Sangkug Lym <slym@nvidia.com> * Bfloat layernorm propagation Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * activation checkpoint recompute Signed-off-by: Sangkug Lym <slym@nvidia.com> * selective nvfuser setup * More arg removal Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Remove BERTDataset Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * update to latest apex and patch transformer autocast Signed-off-by: ericharper <complex451@gmail.com> Co-authored-by: Sangkug Lym <slym@nvidia.com> Co-authored-by: ericharper <complex451@gmail.com> * don't set jit for bf16 Signed-off-by: ericharper <complex451@gmail.com> * replace apex.mpu Signed-off-by: ericharper <complex451@gmail.com> * fix grad clip Signed-off-by: ericharper <complex451@gmail.com> * NVFuser fixes (#2951) * Fuser fixes Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Remove dummy handler Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * Remove PTL plugin based logic for fusion Signed-off-by: MaximumEntropy <sandeep.subramanian.1@umontreal.ca> * remove duplicated file Signed-off-by: ericharper <complex451@gmail.com> * typo (#2960) Signed-off-by: ericharper <complex451@gmail.com> * [BigNLP] Script to convert GPT checkpoint to .nemo (#2958) * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * remove args in progress Signed-off-by: ericharper <complex451@gmail.com> * add load_fused_kernels Signed-off-by: ericharper <complex451@gmail.com> * add load_fused_kernels Signed-off-by: ericharper <complex451@gmail.com> * update megatron_init Signed-off-by: ericharper <complex451@gmail.com> * add fused kernels Signed-off-by: ericharper <complex451@gmail.com> * add fused kernels Signed-off-by: ericharper <complex451@gmail.com> * update process batch Signed-off-by: ericharper <complex451@gmail.com> * remove erroneous import Signed-off-by: ericharper <complex451@gmail.com> * remove erroneous import Signed-off-by: ericharper <complex451@gmail.com> * remove erroneous import Signed-off-by: ericharper <complex451@gmail.com> * add megatron clip_grad Signed-off-by: ericharper <complex451@gmail.com> * trying to resolve circular import error Signed-off-by: ericharper <complex451@gmail.com> * rename file Signed-off-by: ericharper <complex451@gmail.com> * remove non-gpt models and datasets from __init__ files Signed-off-by: ericharper <complex451@gmail.com> * set device in constructorfor gpu init Signed-off-by: ericharper <complex451@gmail.com> * set device in constructorfor gpu init Signed-off-by: ericharper <complex451@gmail.com> * set_device in constructor Signed-off-by: ericharper <complex451@gmail.com> * clean config Signed-off-by: ericharper <complex451@gmail.com> * update MegatronDataset Signed-off-by: ericharper <complex451@gmail.com> * clean up MegatronModule Signed-off-by: ericharper <complex451@gmail.com> * clean up MegatronModule Signed-off-by: ericharper <complex451@gmail.com> * rename fp16 and bf16 flags to fused_softmax_input_in_fp16/bf16 Signed-off-by: ericharper <complex451@gmail.com> * rename to fused_fp16 Signed-off-by: ericharper <complex451@gmail.com> * add fused_fp16 arg to LayerNorm calls Signed-off-by: ericharper <complex451@gmail.com> * fix arg name Signed-off-by: ericharper <complex451@gmail.com> * fix arg name Signed-off-by: ericharper <complex451@gmail.com> * fix import Signed-off-by: ericharper <complex451@gmail.com> * update arg Signed-off-by: ericharper <complex451@gmail.com> * skip warmup default to True Signed-off-by: ericharper <complex451@gmail.com> * skip warmup default to True Signed-off-by: ericharper <complex451@gmail.com> * Adding complete method to MegatronGPTModel (#2935) Signed-off-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> * make ffn_hidden_size mandatory Signed-off-by: ericharper <complex451@gmail.com> * Manually migrating timing of step into branch (#2937) * 1. Manually migrating timing of step into branch. Signed-off-by: Micha Livne <mlivne@nvidia.com> * 1. Updated file name and content. Signed-off-by: Micha Livne <mlivne@nvidia.com> * 1. Updated to latest code. Signed-off-by: Micha Livne <mlivne@nvidia.com> Co-authored-by: Micha Livne <mlivne@nvidia.com> * remove unused imports Signed-off-by: ericharper <complex451@gmail.com> * remove unused import Signed-off-by: ericharper <complex451@gmail.com> * remove unused import Signed-off-by: ericharper <complex451@gmail.com> * remove unused import Signed-off-by: ericharper <complex451@gmail.com> * check fused_fp16 and fused_bf16 are not both True Signed-off-by: ericharper <complex451@gmail.com> * update predict script for model parallel .nemo Signed-off-by: ericharper <complex451@gmail.com> * typo Signed-off-by: ericharper <complex451@gmail.com> * add script to convert .ckpt to .nemo Signed-off-by: ericharper <complex451@gmail.com> * in progress Signed-off-by: ericharper <complex451@gmail.com> * update Signed-off-by: ericharper <complex451@gmail.com> * convert mp checkpoints to nemo Signed-off-by: ericharper <complex451@gmail.com> * update help Signed-off-by: ericharper <complex451@gmail.com> * add safeguard for model parallel save_to Signed-off-by: ericharper <complex451@gmail.com> * adjust NLPModel save_to to be safer for model parallel Signed-off-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> Co-authored-by: Oleksii Kuchaiev <okuchaiev@users.noreply.github.com> Co-authored-by: Micha Livne <michalivne@users.noreply.github.com> Co-authored-by: Micha Livne <mlivne@nvidia.com> Co-authored-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> * [BigNLP] Update GPT evaluation to work with tensor model parallel (#2959) * in progress Signed-off-by: ericharper <complex451@gmail.com> * update args Signed-off-by: ericharper <complex451@gmail.com> * add request dataset Signed-off-by: ericharper <complex451@gmail.com> * tokenize request Signed-off-by: ericharper <complex451@gmail.com> * in progress Signed-off-by: ericharper <complex451@gmail.com> * able to run Signed-off-by: ericharper <complex451@gmail.com> * reduce logits Signed-off-by: ericharper <complex451@gmail.com> * capture response Signed-off-by: ericharper <complex451@gmail.com> * squeeze and unsqueeze Signed-off-by: ericharper <complex451@gmail.com> * handle non model parallel case Signed-off-by: ericharper <complex451@gmail.com> * clean imports Signed-off-by: ericharper <complex451@gmail.com> * add file Signed-off-by: ericharper <complex451@gmail.com> * convert logits to log_probs Signed-off-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> * rename logits to log_probs Signed-off-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> Co-authored-by: Oleksii Kuchaiev <okuchaiev@nvidia.com> * style Signed-off-by: ericharper <complex451@gmail.com> * fix copyright headers Signed-off-by: ericharper <complex451@gmail.com> * fix copyright headers Signed-off-by: ericharper <complex451@gmail.com> * remove old TimingCallback Signed-off-by: ericharper <complex451@gmail.com> * style Signed-off-by: ericharper <complex451@gmail.com> * update jenkins to use latest apex and sandeep's fork Signed-off-by: ericharper <complex451@gmail.com> * update jenkins Signed-off-by: ericharper <complex451@gmail.com> * update jenkins Signed-off-by: ericharper <complex451@gmail.com> * update jenkins Signed-off-by: ericharper <complex451@gmail.com> * update jenkins Signed-off-by: ericharper <complex451@gmail.com> * try 2109 container Signed-off-by: ericharper <complex451@gmail.com> * try cuda container Signed-off-by: ericharper <complex451@gmail.com> * use internal container Signed-off-by: ericharper <complex451@gmail.com> * update checkpoint tests Signed-off-by: ericharper <complex451@gmail.com> * fix scheduler args Signed-off-by: ericharper <complex451@gmail.com> * update eval Signed-off-by: ericharper <complex451@gmail.com> * style Signed-off-by: ericharper <complex451@gmail.com> * update jenkins to use ptl 1.5 rc Signed-off-by: ericharper <complex451@gmail.com> * add import guard to jenkins Signed-off-by: ericharper <complex451@gmail.com> * add import guard to jenkins Signed-off-by: ericharper <complex451@gmail.com> * remove deterministic Signed-off-by: ericharper <complex451@gmail.com> * install numba .53 Signed-off-by: ericharper <complex451@gmail.com> * allow for more variance Signed-off-by: ericharper <complex451@gmail.com> * update trainer config dataclass Signed-off-by: ericharper <complex451@gmail.com> * test_get_optimizer on gpu Signed-off-by: ericharper <complex451@gmail.com> * revert comment Signed-off-by: ericharper <complex451@gmail.com> * change trainer config default to 32 Signed-off-by: ericharper <complex451@gmail.com> * [BigNLP] Remove fused kernel code instead use Apex (#2984) * remove fused_kernels Signed-off-by: ericharper <complex451@gmail.com> * remove fused_kernels Signed-off-by: ericharper <complex451@gmail.com> * remove fused layer norm and fused softmax and use apex instead Signed-off-by: ericharper <complex451@gmail.com> * update imports Signed-off-by: ericharper <complex451@gmail.com> * remove comment Signed-off-by: ericharper <complex451@gmail.com> * use apex enums Signed-off-by: ericharper <complex451@gmail.com> * use apex enums Signed-off-by: ericharper <complex451@gmail.com> * add tab Signed-off-by: ericharper <complex451@gmail.com> * Timer with sliding window (#3002) Co-authored-by: Micha Livne <michalivne@users.noreply.github.com> * revert tab Signed-off-by: ericharper <complex451@gmail.com> * check for rank zero Signed-off-by: ericharper <complex451@gmail.com> * check for rank zero Signed-off-by: ericharper <complex451@gmail.com> * try explicit log dir Signed-off-by: ericharper <complex451@gmail.com> * add + Signed-off-by: ericharper <complex451@gmail.com> * don't rm Signed-off-by: ericharper <complex451@gmail.com> * make dir if it doesn't exist Signed-off-by: ericharper <complex451@gmail.com> * create mp nemo file in temp directory Signed-off-by: ericharper <complex451@gmail.com> * simplify mp save_to Signed-off-by: ericharper <complex451@gmail.com> * handle mp 1 case Signed-off-by: ericharper <complex451@gmail.com> * style fix Signed-off-by: ericharper <complex451@gmail.com> * remove files Signed-off-by: ericharper <complex451@gmail.com> * fix consumed_samples when resuming Signed-off-by: ericharper <complex451@gmail.com> * fix reinstall.sh Signed-off-by: ericharper <complex451@gmail.com> * update req Signed-off-by: ericharper <complex451@gmail.com> * add more detailed log for dataloaders Signed-off-by: ericharper <complex451@gmail.com> * check if cuda is available before using fused_adam Signed-off-by: ericharper <complex451@gmail.com> * revert comment Signed-off-by: ericharper <complex451@gmail.com> * update eval script to use model.freeze Signed-off-by: ericharper <complex451@gmail.com> * log train loss averaged over gradient accumulation steps Signed-off-by: ericharper <complex451@gmail.com> * check copyright earlier Signed-off-by: ericharper <complex451@gmail.com> * todo Signed-off-by: ericharper <complex451@gmail.com> * override SaveRestoreConnector in NLPModel init Signed-off-by: ericharper <complex451@gmail.com> * move to scripts Signed-off-by: ericharper <complex451@gmail.com> * remove star import Signed-off-by: ericharper <complex451@gmail.com> * remove comments Signed-off-by: ericharper <complex451@gmail.com> * remove unused dataset Signed-off-by: ericharper <complex451@gmail.com> * removed barrier Signed-off-by: ericharper <complex451@gmail.com> * check cfg Signed-off-by: ericharper <complex451@gmail.com> * remove logging Signed-off-by: ericharper <complex451@gmail.com> * freeze, unfreeze Signed-off-by: ericharper <complex451@gmail.com> * return None Signed-off-by: ericharper <complex451@gmail.com> * remove unused imports Signed-off-by: ericharper <complex451@gmail.com> * add TODO Signed-off-by: ericharper <complex451@gmail.com> * typecheck Signed-off-by: ericharper <complex451@gmail.com> * typo Signed-off-by: ericharper <complex451@gmail.com> * todo Signed-off-by: ericharper <complex451@gmail.com> * add common native plugin Signed-off-by: ericharper <complex451@gmail.com> * restore with trainer Signed-off-by: ericharper <complex451@gmail.com> * style Signed-off-by: ericharper <complex451@gmail.com> * deprecate megatron-lm bert Signed-off-by: ericharper <complex451@gmail.com> * deprecate megatron-lm bert Signed-off-by: ericharper <complex451@gmail.com> * compile helpers ont he fly Signed-off-by: ericharper <complex451@gmail.com> * remove amp_level Signed-off-by: ericharper <complex451@gmail.com> * remove amp_level from configs Signed-off-by: ericharper <complex451@gmail.com> * add missing import Signed-off-by: ericharper <complex451@gmail.com> * typo Signed-off-by: ericharper <complex451@gmail.com> * remove amp_level Signed-off-by: ericharper <complex451@gmail.com> * use fast huggingface tokenizers by default Signed-off-by: ericharper <complex451@gmail.com> * deal with huggingface tokenizer positional args Signed-off-by: ericharper <complex451@gmail.com> * deal with huggingface tokenizer positional args Signed-off-by: ericharper <complex451@gmail.com> * deal with huggingface tokenizer positional args Signed-off-by: ericharper <complex451@gmail.com> * revert use_fast default to False Signed-off-by: ericharper <complex451@gmail.com> * return super training_epoch_end Signed-off-by: ericharper <complex451@gmail.com> * remove optimizer_idx arg from training_step Signed-off-by: ericharper <complex451@gmail.com> * remove unused arg from on_train_epoch_end Signed-off-by: ericharper <complex451@gmail.com> * add restore_from_path to nemo config Signed-off-by: ericharper <complex451@gmail.com> * add comment Signed-off-by: ericharper <complex451@gmail.com> * revert Signed-off-by: ericharper <complex451@gmail.com> * override connector if not subclassing NLPSaveRestoreConnector for model parallel save Signed-off-by: ericharper <complex451@gmail.com> * update test optimizer Signed-off-by: ericharper <complex451@gmail.com> * clean up Signed-off-by: ericharper <complex451@gmail.com> * clean up Signed-off-by: ericharper <complex451@gmail.com> * clean up Signed-off-by: ericharper <complex451@gmail.com> * clean up Signed-off-by: ericharper <complex451@gmail.com> * make data_prefix mandatory in config Signed-off-by: ericharper <complex451@gmail.com> * update installation instructions on readme Signed-off-by: ericharper <complex451@gmail.com> * update dockerfile Signed-off-by: ericharper <complex451@gmail.com> * add todo Signed-off-by: ericharper <complex451@gmail.com> * raise error if trying to use always_save_nemo with model parallel model Signed-off-by: ericharper <complex451@gmail.com> * remove comment Signed-off-by: ericharper <complex451@gmail.com> Co-authored-by: Sandeep Subramanian <sandeep.subramanian.1@umontreal.ca> Co-authored-by: Sangkug Lym <slym@nvidia.com> Co-authored-by: Oleksii Kuchaiev <okuchaiev@users.noreply.github.com> Co-authored-by: Micha Livne <michalivne@users.noreply.github.com> Co-authored-by: Micha Livne <mlivne@nvidia.com> Co-authored-by: Oleksii Kuchaiev <okuchaiev@nvidia.com>
2021-10-21 05:06:37 +02:00
ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:21.10-py3
# build an image that includes only the nemo dependencies, ensures that dependencies
# are included first for optimal caching, and useful for building a development
# image (by specifying build target as `nemo-deps`)
FROM ${BASE_IMAGE} as nemo-deps
# Ensure apt-get won't prompt for selecting options
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y \
libsndfile1 sox \
libfreetype6 \
python-setuptools swig \
python-dev ffmpeg && \
rm -rf /var/lib/apt/lists/*
# uninstall stuff from base container
RUN pip uninstall -y sacrebleu torchtext
# build torchaudio (change latest release version to match pytorch)
WORKDIR /tmp/torchaudio_build
RUN git clone --depth 1 --branch release/0.9 https://github.com/pytorch/audio.git && \
cd audio && \
git submodule update --init --recursive && \
BUILD_SOX=1 python setup.py install && \
cd .. && rm -r audio
# TODO: remove when 21.04 container is released
# build torchtext
WORKDIR /tmp/torchtext_build
RUN git clone --branch v0.8.1 https://github.com/pytorch/text.git && \
cd text && \
git submodule update --init --recursive && \
python setup.py clean install && \
cd .. && rm -r text
#install TRT tools: PT quantization support and ONNX graph optimizer
WORKDIR /tmp/trt_build
RUN git clone https://github.com/NVIDIA/TensorRT.git && \
cd TensorRT/tools/onnx-graphsurgeon && python setup.py install && \
cd ../pytorch-quantization && \
python setup.py install && \
rm -fr /tmp/trt_build
# install nemo dependencies
WORKDIR /tmp/nemo
COPY requirements .
RUN for f in $(ls requirements*.txt); do pip install --disable-pip-version-check --no-cache-dir -r $f; done
# install nemo_text_processing dependencies
COPY nemo_text_processing /tmp/nemo/nemo_text_processing/
RUN /bin/bash /tmp/nemo/nemo_text_processing/setup.sh
# copy nemo source into a scratch image
FROM scratch as nemo-src
COPY . .
# start building the final container
FROM nemo-deps as nemo
ARG NEMO_VERSION=1.4.0
# Check that NEMO_VERSION is set. Build will fail without this. Expose NEMO and base container
# version information as runtime environment variable for introspection purposes
RUN /usr/bin/test -n "$NEMO_VERSION" && \
/bin/echo "export NEMO_VERSION=${NEMO_VERSION}" >> /root/.bashrc && \
/bin/echo "export BASE_IMAGE=${BASE_IMAGE}" >> /root/.bashrc
RUN --mount=from=nemo-src,target=/tmp/nemo cd /tmp/nemo && pip install ".[all]" && \
python -c "import nemo.collections.asr as nemo_asr" && \
python -c "import nemo.collections.nlp as nemo_nlp" && \
python -c "import nemo.collections.tts as nemo_tts" && \
python -c "import nemo_text_processing.text_normalization as text_normalization"
# TODO: Try to remove once 21.07 container is the base container
# install pinned numba version
RUN conda install -c numba numba=0.53.1
# copy scripts/examples/tests into container for end user
WORKDIR /workspace/nemo
COPY scripts /workspace/nemo/scripts
COPY examples /workspace/nemo/examples
COPY tests /workspace/nemo/tests
COPY tutorials /workspace/nemo/tutorials
# COPY README.rst LICENSE /workspace/nemo/
RUN printf "#!/bin/bash\njupyter lab --no-browser --allow-root --ip=0.0.0.0" >> start-jupyter.sh && \
chmod +x start-jupyter.sh