diff --git "a/attnserver.run_attnserver.slurm.sh.343204.err.log" "b/attnserver.run_attnserver.slurm.sh.343204.err.log" --- "a/attnserver.run_attnserver.slurm.sh.343204.err.log" +++ "b/attnserver.run_attnserver.slurm.sh.343204.err.log" @@ -5359,3 +5359,2151 @@ W0621 21:17:28.010000 723166 site-packages/torch/distributed/run.py:766] ******* warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( +[rank2]:[W621 21:18:44.409658238 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank4]:[W621 21:18:44.441068810 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank0]:[W621 21:18:44.478074015 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank6]:[W621 21:18:44.487685548 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank1]:[W621 21:18:44.498289251 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 21:18:44.637969313 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 21:18:44.873655299 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank3]:[W621 21:18:44.950263632 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank14]:[W621 21:18:44.630919397 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank11]:[W621 21:18:44.667621385 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank15]:[W621 21:18:44.675390069 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank8]:[W621 21:18:44.676725920 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank10]:[W621 21:18:44.685507608 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank12]:[W621 21:18:44.697650599 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank9]:[W621 21:18:44.715479536 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank13]:[W621 21:18:44.840583295 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) ++ set +x ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=12288 ++ PROF_CTX_LENGTH=12288 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L12288*tp8.cp2.bs8.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L12288*tp8.cp2.bs8.json' ']' ++ echo 'Running ctx_length=12288, TP_SIZE=8, CP_SIZE=2, BATCH_SIZE=8' ++ srun bash ./attnserver.sh ++ which python3 ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 2 --node_rank 0 --rdzv_id 343204 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-600:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 2 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 12288 --max-position-embeddings 12288 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 2 --node_rank 1 --rdzv_id 343204 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-600:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 2 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 12288 --max-position-embeddings 12288 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:18:51.371000 1707922 site-packages/torch/distributed/run.py:766] +W0621 21:18:51.371000 1707922 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:18:51.371000 1707922 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:18:51.371000 1707922 site-packages/torch/distributed/run.py:766] ***************************************** +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:18:51.483000 726299 site-packages/torch/distributed/run.py:766] +W0621 21:18:51.483000 726299 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:18:51.483000 726299 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:18:51.483000 726299 site-packages/torch/distributed/run.py:766] ***************************************** +[rank0]:[W621 21:19:14.855862543 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank9]:[W621 21:19:14.456953363 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 21:19:14.867146486 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 21:19:14.906819310 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank10]:[W621 21:19:14.498809737 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank8]:[W621 21:19:14.528773940 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 21:19:14.950194064 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank15]:[W621 21:19:14.542943149 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 21:19:14.952575489 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 21:19:14.953382784 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank13]:[W621 21:19:14.545201382 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 21:19:14.954762134 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank11]:[W621 21:19:14.545402373 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank14]:[W621 21:19:14.546664289 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank12]:[W621 21:19:14.547840895 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 21:19:14.957498062 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank10]: Traceback (most recent call last): +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank10]: pretrain( +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank10]: iteration, num_floating_point_operations_so_far = train( +[rank10]: ^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank10]: ) = train_step( +[rank10]: ^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank10]: losses_reduced = forward_backward_func( +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank10]: output_tensor, num_tokens = forward_step( +[rank10]: ^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank10]: output_tensor = model( +[rank10]: ^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank10]: return self._call_impl(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank10]: return forward_call(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank10]: return self.module(*inputs, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank10]: return self._call_impl(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank10]: return forward_call(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank10]: outputs = self.module(*inputs, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank10]: return self._call_impl(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank10]: return forward_call(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank10]: hidden_states = self.decoder( +[rank10]: ^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank10]: return self._call_impl(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank10]: return forward_call(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank10]: hidden_states, context = layer( +[rank10]: ^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank10]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank10]: return self._call_impl(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank10]: return forward_call(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank10]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank10]: attention_output_with_bias = self.self_attention( +[rank10]: ^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank10]: return self._call_impl(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank10]: return forward_call(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank10]: core_attn_out = self.core_attention( +[rank10]: ^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank10]: return self._call_impl(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank10]: return forward_call(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank10]: core_attn_out = super().forward( +[rank10]: ^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank10]: return self.fused_attention( +[rank10]: ^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank10]: return self._call_impl(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank10]: return forward_call(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank10]: return fn(*args, **kwargs) +[rank10]: ^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank10]: output = attn_forward_func_with_cp( +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank10]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank10]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 904, in forward +[rank10]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank10]: ^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank10]: output_tensors = tex.fused_attn_fwd( +[rank10]: ^^^^^^^^^^^^^^^^^^^ +[rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 384.00 MiB. GPU 2 has a total capacity of 139.81 GiB of which 140.94 MiB is free. Including non-PyTorch memory, this process has 139.67 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank11]: Traceback (most recent call last): +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank11]: pretrain( +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank11]: iteration, num_floating_point_operations_so_far = train( +[rank11]: ^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank11]: ) = train_step( +[rank11]: ^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank11]: losses_reduced = forward_backward_func( +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank11]: output_tensor, num_tokens = forward_step( +[rank11]: ^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank11]: output_tensor = model( +[rank11]: ^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank11]: return self._call_impl(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank11]: return forward_call(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank11]: return self.module(*inputs, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank11]: return self._call_impl(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank11]: return forward_call(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank11]: outputs = self.module(*inputs, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank11]: return self._call_impl(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank11]: return forward_call(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank11]: hidden_states = self.decoder( +[rank11]: ^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank11]: return self._call_impl(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank11]: return forward_call(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank11]: hidden_states, context = layer( +[rank11]: ^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank11]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank11]: return self._call_impl(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank11]: return forward_call(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank11]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank11]: attention_output_with_bias = self.self_attention( +[rank11]: ^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank11]: return self._call_impl(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank11]: return forward_call(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank11]: core_attn_out = self.core_attention( +[rank11]: ^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank11]: return self._call_impl(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank11]: return forward_call(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank11]: core_attn_out = super().forward( +[rank11]: ^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank11]: return self.fused_attention( +[rank11]: ^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank11]: return self._call_impl(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank11]: return forward_call(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank11]: return fn(*args, **kwargs) +[rank11]: ^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank11]: output = attn_forward_func_with_cp( +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank11]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank11]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 904, in forward +[rank11]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank11]: ^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank11]: output_tensors = tex.fused_attn_fwd( +[rank11]: ^^^^^^^^^^^^^^^^^^^ +[rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 384.00 MiB. GPU 3 has a total capacity of 139.81 GiB of which 156.94 MiB is free. Including non-PyTorch memory, this process has 139.65 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank14]: Traceback (most recent call last): +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank14]: pretrain( +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank14]: iteration, num_floating_point_operations_so_far = train( +[rank14]: ^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank14]: ) = train_step( +[rank14]: ^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank14]: losses_reduced = forward_backward_func( +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank14]: output_tensor, num_tokens = forward_step( +[rank14]: ^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank14]: output_tensor = model( +[rank14]: ^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank14]: return self._call_impl(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank14]: return forward_call(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank14]: return self.module(*inputs, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank14]: return self._call_impl(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank14]: return forward_call(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank14]: outputs = self.module(*inputs, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank14]: return self._call_impl(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank14]: return forward_call(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank14]: hidden_states = self.decoder( +[rank14]: ^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank14]: return self._call_impl(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank14]: return forward_call(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank14]: hidden_states, context = layer( +[rank14]: ^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank14]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank14]: return self._call_impl(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank14]: return forward_call(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank14]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank14]: attention_output_with_bias = self.self_attention( +[rank14]: ^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank14]: return self._call_impl(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank14]: return forward_call(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank14]: core_attn_out = self.core_attention( +[rank14]: ^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank14]: return self._call_impl(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank14]: return forward_call(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank14]: core_attn_out = super().forward( +[rank14]: ^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank14]: return self.fused_attention( +[rank14]: ^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank14]: return self._call_impl(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank14]: return forward_call(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank14]: return fn(*args, **kwargs) +[rank14]: ^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank14]: output = attn_forward_func_with_cp( +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank14]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank14]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 904, in forward +[rank14]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank14]: ^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank14]: output_tensors = tex.fused_attn_fwd( +[rank14]: ^^^^^^^^^^^^^^^^^^^ +[rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 384.00 MiB. GPU 6 has a total capacity of 139.81 GiB of which 140.94 MiB is free. Including non-PyTorch memory, this process has 139.67 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank12]: Traceback (most recent call last): +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank12]: pretrain( +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank12]: iteration, num_floating_point_operations_so_far = train( +[rank12]: ^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank12]: ) = train_step( +[rank12]: ^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank12]: losses_reduced = forward_backward_func( +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank12]: output_tensor, num_tokens = forward_step( +[rank12]: ^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank12]: output_tensor = model( +[rank12]: ^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank12]: return self._call_impl(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank12]: return forward_call(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank12]: return self.module(*inputs, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank12]: return self._call_impl(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank12]: return forward_call(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank12]: outputs = self.module(*inputs, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank12]: return self._call_impl(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank12]: return forward_call(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank12]: hidden_states = self.decoder( +[rank12]: ^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank12]: return self._call_impl(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank12]: return forward_call(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank12]: hidden_states, context = layer( +[rank12]: ^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank12]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank12]: return self._call_impl(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank12]: return forward_call(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank12]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank12]: attention_output_with_bias = self.self_attention( +[rank12]: ^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank12]: return self._call_impl(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank12]: return forward_call(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank12]: core_attn_out = self.core_attention( +[rank12]: ^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank12]: return self._call_impl(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank12]: return forward_call(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank12]: core_attn_out = super().forward( +[rank12]: ^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank12]: return self.fused_attention( +[rank12]: ^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank12]: return self._call_impl(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank12]: return forward_call(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank12]: return fn(*args, **kwargs) +[rank12]: ^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank12]: output = attn_forward_func_with_cp( +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank12]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank12]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 904, in forward +[rank12]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank12]: ^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank12]: output_tensors = tex.fused_attn_fwd( +[rank12]: ^^^^^^^^^^^^^^^^^^^ +[rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 384.00 MiB. GPU 4 has a total capacity of 139.81 GiB of which 140.94 MiB is free. Including non-PyTorch memory, this process has 139.67 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank8]: Traceback (most recent call last): +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank8]: pretrain( +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank8]: iteration, num_floating_point_operations_so_far = train( +[rank8]: ^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank8]: ) = train_step( +[rank8]: ^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank8]: losses_reduced = forward_backward_func( +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank8]: output_tensor, num_tokens = forward_step( +[rank8]: ^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank8]: output_tensor = model( +[rank8]: ^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank8]: return self._call_impl(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank8]: return forward_call(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank8]: return self.module(*inputs, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank8]: return self._call_impl(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank8]: return forward_call(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank8]: outputs = self.module(*inputs, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank8]: return self._call_impl(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank8]: return forward_call(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank8]: hidden_states = self.decoder( +[rank8]: ^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank8]: return self._call_impl(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank8]: return forward_call(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank8]: hidden_states, context = layer( +[rank8]: ^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank8]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank8]: return self._call_impl(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank8]: return forward_call(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank8]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank8]: attention_output_with_bias = self.self_attention( +[rank8]: ^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank8]: return self._call_impl(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank8]: return forward_call(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank8]: core_attn_out = self.core_attention( +[rank8]: ^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank8]: return self._call_impl(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank8]: return forward_call(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank8]: core_attn_out = super().forward( +[rank8]: ^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank8]: return self.fused_attention( +[rank8]: ^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank8]: return self._call_impl(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank8]: return forward_call(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank8]: return fn(*args, **kwargs) +[rank8]: ^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank8]: output = attn_forward_func_with_cp( +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank8]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank8]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 904, in forward +[rank8]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank8]: ^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank8]: output_tensors = tex.fused_attn_fwd( +[rank8]: ^^^^^^^^^^^^^^^^^^^ +[rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 384.00 MiB. GPU 0 has a total capacity of 139.81 GiB of which 134.94 MiB is free. Including non-PyTorch memory, this process has 139.67 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank15]: Traceback (most recent call last): +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank15]: pretrain( +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank15]: iteration, num_floating_point_operations_so_far = train( +[rank15]: ^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank15]: ) = train_step( +[rank15]: ^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank15]: losses_reduced = forward_backward_func( +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank15]: output_tensor, num_tokens = forward_step( +[rank15]: ^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank15]: output_tensor = model( +[rank15]: ^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank15]: return self._call_impl(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank15]: return forward_call(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank15]: return self.module(*inputs, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank15]: return self._call_impl(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank15]: return forward_call(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank15]: outputs = self.module(*inputs, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank15]: return self._call_impl(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank15]: return forward_call(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank15]: hidden_states = self.decoder( +[rank15]: ^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank15]: return self._call_impl(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank15]: return forward_call(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank15]: hidden_states, context = layer( +[rank15]: ^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank15]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank15]: return self._call_impl(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank15]: return forward_call(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank15]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank15]: attention_output_with_bias = self.self_attention( +[rank15]: ^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank15]: return self._call_impl(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank15]: return forward_call(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank15]: core_attn_out = self.core_attention( +[rank15]: ^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank15]: return self._call_impl(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank15]: return forward_call(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank15]: core_attn_out = super().forward( +[rank15]: ^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank15]: return self.fused_attention( +[rank15]: ^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank15]: return self._call_impl(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank15]: return forward_call(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank15]: return fn(*args, **kwargs) +[rank15]: ^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank15]: output = attn_forward_func_with_cp( +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank15]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank15]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 904, in forward +[rank15]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank15]: ^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank15]: output_tensors = tex.fused_attn_fwd( +[rank15]: ^^^^^^^^^^^^^^^^^^^ +[rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 384.00 MiB. GPU 7 has a total capacity of 139.81 GiB of which 156.94 MiB is free. Including non-PyTorch memory, this process has 139.65 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank9]: Traceback (most recent call last): +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank9]: pretrain( +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank9]: iteration, num_floating_point_operations_so_far = train( +[rank9]: ^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank9]: ) = train_step( +[rank9]: ^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank9]: losses_reduced = forward_backward_func( +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank9]: output_tensor, num_tokens = forward_step( +[rank9]: ^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank9]: output_tensor = model( +[rank9]: ^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank9]: return self._call_impl(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank9]: return forward_call(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank9]: return self.module(*inputs, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank9]: return self._call_impl(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank9]: return forward_call(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank9]: outputs = self.module(*inputs, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank9]: return self._call_impl(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank9]: return forward_call(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank9]: hidden_states = self.decoder( +[rank9]: ^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank9]: return self._call_impl(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank9]: return forward_call(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank9]: hidden_states, context = layer( +[rank9]: ^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank9]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank9]: return self._call_impl(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank9]: return forward_call(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank9]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: Traceback (most recent call last): +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank7]: pretrain( +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank7]: iteration, num_floating_point_operations_so_far = train( +[rank7]: ^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank7]: ) = train_step( +[rank7]: ^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank7]: losses_reduced = forward_backward_func( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank9]: attention_output_with_bias = self.self_attention( +[rank9]: ^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank9]: return self._call_impl(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank9]: return forward_call(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank9]: core_attn_out = self.core_attention( +[rank7]: output_tensor, num_tokens = forward_step( +[rank7]: ^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank7]: output_tensor = model( +[rank7]: ^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank7]: return self._call_impl(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: ^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank9]: return self._call_impl(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank9]: return forward_call(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank9]: core_attn_out = super().forward( +[rank9]: ^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank7]: return forward_call(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank7]: return self.module(*inputs, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank7]: return self._call_impl(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank7]: return forward_call(*args, **kwargs) +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank9]: return self.fused_attention( +[rank9]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank7]: outputs = self.module(*inputs, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank7]: return self._call_impl(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank7]: return forward_call(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank7]: hidden_states = self.decoder( +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank9]: return self._call_impl(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank9]: return forward_call(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank9]: return fn(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank7]: ^^^^^^^^^^^^^ +[rank9]: output = attn_forward_func_with_cp( +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank9]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank9]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 904, in forward +[rank9]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank7]: return self._call_impl(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank7]: return forward_call(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank7]: hidden_states, context = layer( +[rank7]: ^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank7]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank9]: ^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank9]: output_tensors = tex.fused_attn_fwd( +[rank9]: ^^^^^^^^^^^^^^^^^^^ +[rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 384.00 MiB. GPU 1 has a total capacity of 139.81 GiB of which 156.94 MiB is free. Including non-PyTorch memory, this process has 139.65 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank7]: return self._call_impl(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank7]: return forward_call(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank7]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: Traceback (most recent call last): +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank13]: pretrain( +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank13]: iteration, num_floating_point_operations_so_far = train( +[rank13]: ^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank13]: ) = train_step( +[rank13]: ^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank13]: losses_reduced = forward_backward_func( +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank7]: attention_output_with_bias = self.self_attention( +[rank7]: ^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank7]: return self._call_impl(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank7]: return forward_call(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank7]: core_attn_out = self.core_attention( +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank13]: output_tensor, num_tokens = forward_step( +[rank13]: ^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank13]: output_tensor = model( +[rank13]: ^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank13]: return self._call_impl(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank7]: return self._call_impl(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank7]: return forward_call(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank7]: core_attn_out = super().forward( +[rank7]: ^^^^^^^^^^^^^^^^ +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank13]: return forward_call(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank13]: return self.module(*inputs, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank13]: return self._call_impl(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank7]: return self.fused_attention( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank13]: return forward_call(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank13]: outputs = self.module(*inputs, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank13]: return self._call_impl(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank13]: return forward_call(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank7]: return self._call_impl(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank7]: return forward_call(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank7]: return fn(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank13]: hidden_states = self.decoder( +[rank13]: ^^^^^^^^^^^^^ +[rank7]: output = attn_forward_func_with_cp( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank7]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank7]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 1051, in forward +[rank7]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank13]: return self._call_impl(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank13]: return forward_call(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank13]: hidden_states, context = layer( +[rank13]: ^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank13]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank7]: output_tensors = tex.fused_attn_fwd( +[rank7]: ^^^^^^^^^^^^^^^^^^^ +[rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 192.00 MiB. GPU 7 has a total capacity of 139.81 GiB of which 140.94 MiB is free. Including non-PyTorch memory, this process has 139.67 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank2]: Traceback (most recent call last): +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank13]: return self._call_impl(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank13]: return forward_call(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank13]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank2]: pretrain( +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank2]: iteration, num_floating_point_operations_so_far = train( +[rank2]: ^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank2]: ) = train_step( +[rank2]: ^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank2]: losses_reduced = forward_backward_func( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank13]: attention_output_with_bias = self.self_attention( +[rank13]: ^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank13]: return self._call_impl(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank13]: return forward_call(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank13]: core_attn_out = self.core_attention( +[rank2]: output_tensor, num_tokens = forward_step( +[rank2]: ^^^^^^^^^^^^^ +[rank13]: ^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank13]: return self._call_impl(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank13]: return forward_call(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank13]: core_attn_out = super().forward( +[rank13]: ^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank2]: output_tensor = model( +[rank2]: ^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank2]: return self._call_impl(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank2]: return forward_call(*args, **kwargs) +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank13]: return self.fused_attention( +[rank13]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank2]: return self.module(*inputs, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank2]: return self._call_impl(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank2]: return forward_call(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank2]: outputs = self.module(*inputs, **kwargs) +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank13]: return self._call_impl(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank13]: return forward_call(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank13]: return fn(*args, **kwargs) +[rank13]: ^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank2]: return self._call_impl(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank2]: return forward_call(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank2]: hidden_states = self.decoder( +[rank2]: ^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank13]: output = attn_forward_func_with_cp( +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank13]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank13]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 904, in forward +[rank13]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank2]: return self._call_impl(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank2]: return forward_call(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank2]: hidden_states, context = layer( +[rank2]: ^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank2]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: ^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank13]: output_tensors = tex.fused_attn_fwd( +[rank13]: ^^^^^^^^^^^^^^^^^^^ +[rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 384.00 MiB. GPU 5 has a total capacity of 139.81 GiB of which 156.94 MiB is free. Including non-PyTorch memory, this process has 139.65 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank2]: return self._call_impl(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank2]: return forward_call(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank2]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank2]: attention_output_with_bias = self.self_attention( +[rank2]: ^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank2]: return self._call_impl(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank2]: return forward_call(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank2]: core_attn_out = self.core_attention( +[rank2]: ^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank2]: return self._call_impl(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank2]: return forward_call(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank2]: core_attn_out = super().forward( +[rank2]: ^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank2]: return self.fused_attention( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank2]: return self._call_impl(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank2]: return forward_call(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank2]: return fn(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank2]: output = attn_forward_func_with_cp( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank2]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank2]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 1051, in forward +[rank2]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank2]: ^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank2]: output_tensors = tex.fused_attn_fwd( +[rank2]: ^^^^^^^^^^^^^^^^^^^ +[rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 192.00 MiB. GPU 2 has a total capacity of 139.81 GiB of which 156.94 MiB is free. Including non-PyTorch memory, this process has 139.65 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank1]: Traceback (most recent call last): +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank1]: pretrain( +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank1]: iteration, num_floating_point_operations_so_far = train( +[rank1]: ^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank1]: ) = train_step( +[rank1]: ^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank1]: losses_reduced = forward_backward_func( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank1]: output_tensor, num_tokens = forward_step( +[rank1]: ^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank1]: output_tensor = model( +[rank1]: ^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank1]: return self._call_impl(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank1]: return forward_call(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank1]: return self.module(*inputs, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank1]: return self._call_impl(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank1]: return forward_call(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank1]: outputs = self.module(*inputs, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank1]: return self._call_impl(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank1]: return forward_call(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank1]: hidden_states = self.decoder( +[rank1]: ^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank1]: return self._call_impl(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank1]: return forward_call(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank1]: hidden_states, context = layer( +[rank1]: ^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank1]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank1]: return self._call_impl(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank1]: return forward_call(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank1]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank1]: attention_output_with_bias = self.self_attention( +[rank1]: ^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank1]: return self._call_impl(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank1]: return forward_call(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank1]: core_attn_out = self.core_attention( +[rank1]: ^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank1]: return self._call_impl(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank1]: return forward_call(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank1]: core_attn_out = super().forward( +[rank1]: ^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank1]: return self.fused_attention( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank1]: return self._call_impl(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank1]: return forward_call(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank1]: return fn(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank1]: output = attn_forward_func_with_cp( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank1]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank1]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 1051, in forward +[rank1]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank1]: ^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank1]: output_tensors = tex.fused_attn_fwd( +[rank1]: ^^^^^^^^^^^^^^^^^^^ +[rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 192.00 MiB. GPU 1 has a total capacity of 139.81 GiB of which 140.94 MiB is free. Including non-PyTorch memory, this process has 139.67 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank4]: Traceback (most recent call last): +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank4]: pretrain( +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank4]: iteration, num_floating_point_operations_so_far = train( +[rank4]: ^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank4]: ) = train_step( +[rank4]: ^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank4]: losses_reduced = forward_backward_func( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank4]: output_tensor, num_tokens = forward_step( +[rank4]: ^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank4]: output_tensor = model( +[rank4]: ^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank4]: return self._call_impl(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank4]: return forward_call(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank4]: return self.module(*inputs, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank4]: return self._call_impl(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank4]: return forward_call(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank4]: outputs = self.module(*inputs, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank4]: return self._call_impl(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank4]: return forward_call(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank4]: hidden_states = self.decoder( +[rank4]: ^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank4]: return self._call_impl(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank4]: return forward_call(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank4]: hidden_states, context = layer( +[rank4]: ^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank4]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank4]: return self._call_impl(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank4]: return forward_call(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank4]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank4]: attention_output_with_bias = self.self_attention( +[rank4]: ^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank4]: return self._call_impl(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank4]: return forward_call(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank4]: core_attn_out = self.core_attention( +[rank4]: ^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank4]: return self._call_impl(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank4]: return forward_call(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank4]: core_attn_out = super().forward( +[rank4]: ^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank4]: return self.fused_attention( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank4]: return self._call_impl(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank4]: return forward_call(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank4]: return fn(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank4]: output = attn_forward_func_with_cp( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank4]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank4]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 1051, in forward +[rank4]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank4]: ^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank4]: output_tensors = tex.fused_attn_fwd( +[rank4]: ^^^^^^^^^^^^^^^^^^^ +[rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 192.00 MiB. GPU 4 has a total capacity of 139.81 GiB of which 156.94 MiB is free. Including non-PyTorch memory, this process has 139.65 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank0]: Traceback (most recent call last): +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank0]: pretrain( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank0]: iteration, num_floating_point_operations_so_far = train( +[rank0]: ^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank0]: ) = train_step( +[rank0]: ^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank0]: losses_reduced = forward_backward_func( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank0]: output_tensor, num_tokens = forward_step( +[rank0]: ^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank0]: output_tensor = model( +[rank0]: ^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank0]: return self._call_impl(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank0]: return forward_call(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank0]: return self.module(*inputs, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank0]: return self._call_impl(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank0]: return forward_call(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank0]: outputs = self.module(*inputs, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank0]: return self._call_impl(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank0]: return forward_call(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank0]: hidden_states = self.decoder( +[rank0]: ^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank0]: return self._call_impl(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank0]: return forward_call(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank0]: hidden_states, context = layer( +[rank0]: ^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank0]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank0]: return self._call_impl(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank0]: return forward_call(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank0]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank0]: attention_output_with_bias = self.self_attention( +[rank0]: ^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank0]: return self._call_impl(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank0]: return forward_call(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank0]: core_attn_out = self.core_attention( +[rank0]: ^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank0]: return self._call_impl(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank0]: return forward_call(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank0]: core_attn_out = super().forward( +[rank0]: ^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank0]: return self.fused_attention( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank0]: return self._call_impl(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank0]: return forward_call(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank0]: return fn(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank0]: output = attn_forward_func_with_cp( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank0]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank0]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 1051, in forward +[rank0]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank0]: ^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank0]: output_tensors = tex.fused_attn_fwd( +[rank0]: ^^^^^^^^^^^^^^^^^^^ +[rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 192.00 MiB. GPU 0 has a total capacity of 139.81 GiB of which 150.94 MiB is free. Including non-PyTorch memory, this process has 139.65 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank3]: Traceback (most recent call last): +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank3]: pretrain( +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank3]: iteration, num_floating_point_operations_so_far = train( +[rank3]: ^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank3]: ) = train_step( +[rank3]: ^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank3]: losses_reduced = forward_backward_func( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank3]: output_tensor, num_tokens = forward_step( +[rank3]: ^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank3]: output_tensor = model( +[rank3]: ^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank3]: return self._call_impl(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank3]: return forward_call(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank3]: return self.module(*inputs, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank3]: return self._call_impl(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank3]: return forward_call(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank3]: outputs = self.module(*inputs, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank3]: return self._call_impl(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank3]: return forward_call(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank3]: hidden_states = self.decoder( +[rank3]: ^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank3]: return self._call_impl(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank3]: return forward_call(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank3]: hidden_states, context = layer( +[rank3]: ^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank3]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank3]: return self._call_impl(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank3]: return forward_call(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank3]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank3]: attention_output_with_bias = self.self_attention( +[rank3]: ^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank3]: return self._call_impl(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank3]: return forward_call(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank3]: core_attn_out = self.core_attention( +[rank3]: ^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank3]: return self._call_impl(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank3]: return forward_call(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank3]: core_attn_out = super().forward( +[rank3]: ^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank3]: return self.fused_attention( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank3]: return self._call_impl(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank3]: return forward_call(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank3]: return fn(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank3]: output = attn_forward_func_with_cp( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank3]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank3]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 1051, in forward +[rank3]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank3]: ^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank3]: output_tensors = tex.fused_attn_fwd( +[rank3]: ^^^^^^^^^^^^^^^^^^^ +[rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 192.00 MiB. GPU 3 has a total capacity of 139.81 GiB of which 140.94 MiB is free. Including non-PyTorch memory, this process has 139.67 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank6]: Traceback (most recent call last): +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank6]: pretrain( +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank6]: iteration, num_floating_point_operations_so_far = train( +[rank6]: ^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank6]: ) = train_step( +[rank6]: ^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank6]: losses_reduced = forward_backward_func( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank6]: output_tensor, num_tokens = forward_step( +[rank6]: ^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank6]: output_tensor = model( +[rank6]: ^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank6]: return self._call_impl(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank6]: return forward_call(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank6]: return self.module(*inputs, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank6]: return self._call_impl(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank6]: return forward_call(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank6]: outputs = self.module(*inputs, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank6]: return self._call_impl(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank6]: return forward_call(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank6]: hidden_states = self.decoder( +[rank6]: ^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank6]: return self._call_impl(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank6]: return forward_call(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank6]: hidden_states, context = layer( +[rank6]: ^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank6]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank6]: return self._call_impl(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank6]: return forward_call(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank6]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank6]: attention_output_with_bias = self.self_attention( +[rank6]: ^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank6]: return self._call_impl(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank6]: return forward_call(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank6]: core_attn_out = self.core_attention( +[rank6]: ^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank6]: return self._call_impl(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank6]: return forward_call(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank6]: core_attn_out = super().forward( +[rank6]: ^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank6]: return self.fused_attention( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank6]: return self._call_impl(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank6]: return forward_call(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank6]: return fn(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank6]: output = attn_forward_func_with_cp( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank6]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank6]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 1051, in forward +[rank6]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank6]: ^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank6]: output_tensors = tex.fused_attn_fwd( +[rank6]: ^^^^^^^^^^^^^^^^^^^ +[rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 192.00 MiB. GPU 6 has a total capacity of 139.81 GiB of which 156.94 MiB is free. Including non-PyTorch memory, this process has 139.65 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank5]: Traceback (most recent call last): +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank5]: pretrain( +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank5]: iteration, num_floating_point_operations_so_far = train( +[rank5]: ^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank5]: ) = train_step( +[rank5]: ^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank5]: losses_reduced = forward_backward_func( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank5]: output_tensor, num_tokens = forward_step( +[rank5]: ^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 460, in forward_step +[rank5]: output_tensor = model( +[rank5]: ^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank5]: return self._call_impl(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank5]: return forward_call(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/distributed/data_parallel_base.py", line 22, in forward +[rank5]: return self.module(*inputs, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank5]: return self._call_impl(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank5]: return forward_call(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/module.py", line 236, in forward +[rank5]: outputs = self.module(*inputs, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank5]: return self._call_impl(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank5]: return forward_call(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_model.py", line 338, in forward +[rank5]: hidden_states = self.decoder( +[rank5]: ^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank5]: return self._call_impl(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank5]: return forward_call(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_block.py", line 557, in forward +[rank5]: hidden_states, context = layer( +[rank5]: ^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 875, in __call__ +[rank5]: return super(MegatronModule, self).__call__(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank5]: return self._call_impl(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank5]: return forward_call(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 436, in forward +[rank5]: pre_mlp_layernorm_output, residual, context = self._forward_attention(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/transformer_layer.py", line 499, in _forward_attention +[rank5]: attention_output_with_bias = self.self_attention( +[rank5]: ^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank5]: return self._call_impl(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank5]: return forward_call(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/transformer/attention.py", line 736, in forward +[rank5]: core_attn_out = self.core_attention( +[rank5]: ^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank5]: return self._call_impl(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank5]: return forward_call(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/extensions/transformer_engine.py", line 863, in forward +[rank5]: core_attn_out = super().forward( +[rank5]: ^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/dot_product_attention.py", line 1097, in forward +[rank5]: return self.fused_attention( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl +[rank5]: return self._call_impl(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl +[rank5]: return forward_call(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 838, in _fn +[rank5]: return fn(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/backends.py", line 1564, in forward +[rank5]: output = attn_forward_func_with_cp( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 3619, in attn_forward_func_with_cp +[rank5]: out = AttnFuncWithCPAndKVP2P.apply(*args) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/autograd/function.py", line 575, in apply +[rank5]: return super().apply(*args, **kwargs) # type: ignore[misc] +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/attention/dot_product_attention/context_parallel.py", line 1051, in forward +[rank5]: out_per_step[i], aux_ctx_tensors = fused_attn_fwd( +[rank5]: ^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpp_extensions/fused_attn.py", line 279, in fused_attn_fwd +[rank5]: output_tensors = tex.fused_attn_fwd( +[rank5]: ^^^^^^^^^^^^^^^^^^^ +[rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 192.00 MiB. GPU 5 has a total capacity of 139.81 GiB of which 140.94 MiB is free. Including non-PyTorch memory, this process has 139.67 GiB memory in use. Of the allocated memory 137.31 GiB is allocated by PyTorch, and 288.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +W0621 21:19:34.642000 726299 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 726388 closing signal SIGTERM +W0621 21:19:34.644000 726299 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 726389 closing signal SIGTERM +W0621 21:19:34.645000 726299 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 726390 closing signal SIGTERM +W0621 21:19:34.646000 726299 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 726391 closing signal SIGTERM +W0621 21:19:34.646000 726299 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 726392 closing signal SIGTERM +W0621 21:19:34.647000 726299 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 726393 closing signal SIGTERM +W0621 21:19:34.647000 726299 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 726394 closing signal SIGTERM +W0621 21:19:34.658000 1707922 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1707993 closing signal SIGTERM +W0621 21:19:34.661000 1707922 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1707995 closing signal SIGTERM +W0621 21:19:34.662000 1707922 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1707996 closing signal SIGTERM +W0621 21:19:34.662000 1707922 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1707997 closing signal SIGTERM +W0621 21:19:34.662000 1707922 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1707998 closing signal SIGTERM +W0621 21:19:34.663000 1707922 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1707999 closing signal SIGTERM +W0621 21:19:34.663000 1707922 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1708000 closing signal SIGTERM +E0621 21:19:36.064000 726299 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 726395) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:19:34 + host : fs-mbz-gpu-600 + rank : 7 (local_rank: 7) + exitcode : 1 (pid: 726395) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ +E0621 21:19:36.129000 1707922 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 1707994) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:19:34 + host : fs-mbz-gpu-717 + rank : 9 (local_rank: 1) + exitcode : 1 (pid: 1707994) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ ++ set +x ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=16384 ++ PROF_CTX_LENGTH=16384 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L16384*tp8.cp2.bs8.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L16384*tp8.cp2.bs8.json' ']' ++ echo 'Running ctx_length=16384, TP_SIZE=8, CP_SIZE=2, BATCH_SIZE=8' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 2 --node_rank 0 --rdzv_id 343204 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-600:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 2 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 16384 --max-position-embeddings 16384 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 2 --node_rank 1 --rdzv_id 343204 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-600:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 2 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 16384 --max-position-embeddings 16384 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:19:39.107000 728683 site-packages/torch/distributed/run.py:766] +W0621 21:19:39.107000 728683 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:19:39.107000 728683 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:19:39.107000 728683 site-packages/torch/distributed/run.py:766] ***************************************** +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:19:39.337000 1710248 site-packages/torch/distributed/run.py:766] +W0621 21:19:39.337000 1710248 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:19:39.337000 1710248 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:19:39.337000 1710248 site-packages/torch/distributed/run.py:766] *****************************************