{"id":528,"date":"2025-06-21T20:59:00","date_gmt":"2025-06-21T12:59:00","guid":{"rendered":"https:\/\/189505.xyz\/?p=528"},"modified":"2025-06-21T21:59:44","modified_gmt":"2025-06-21T13:59:44","slug":"pytorch-distributed-gather%e9%97%ae%e9%a2%98","status":"publish","type":"post","link":"https:\/\/189505.xyz\/?p=528","title":{"rendered":"pytorch distributed.gather\u95ee\u9898"},"content":{"rendered":"<div id=\"ez-toc-container\" class=\"ez-toc-v2_0_40 counter-hierarchy ez-toc-counter ez-toc-grey ez-toc-container-direction\">\n<div class=\"ez-toc-title-container\">\n<p class=\"ez-toc-title\">Table of Contents<\/p>\n<span class=\"ez-toc-title-toggle\"><a href=\"#\" class=\"ez-toc-pull-right ez-toc-btn ez-toc-btn-xs ez-toc-btn-default ez-toc-toggle\" area-label=\"ez-toc-toggle-icon-1\"><label for=\"item-69e0777fb5a59\" aria-label=\"Table of Content\"><span style=\"display: flex;align-items: center;width: 35px;height: 30px;justify-content: center;direction:ltr;\"><svg style=\"fill: #999;color:#999\" xmlns=\"http:\/\/www.w3.org\/2000\/svg\" class=\"list-377408\" width=\"20px\" height=\"20px\" viewBox=\"0 0 24 24\" fill=\"none\"><path d=\"M6 6H4v2h2V6zm14 0H8v2h12V6zM4 11h2v2H4v-2zm16 0H8v2h12v-2zM4 16h2v2H4v-2zm16 0H8v2h12v-2z\" fill=\"currentColor\"><\/path><\/svg><svg style=\"fill: #999;color:#999\" class=\"arrow-unsorted-368013\" xmlns=\"http:\/\/www.w3.org\/2000\/svg\" width=\"10px\" height=\"10px\" viewBox=\"0 0 24 24\" version=\"1.2\" baseProfile=\"tiny\"><path d=\"M18.2 9.3l-6.2-6.3-6.2 6.3c-.2.2-.3.4-.3.7s.1.5.3.7c.2.2.4.3.7.3h11c.3 0 .5-.1.7-.3.2-.2.3-.5.3-.7s-.1-.5-.3-.7zM5.8 14.7l6.2 6.3 6.2-6.3c.2-.2.3-.5.3-.7s-.1-.5-.3-.7c-.2-.2-.4-.3-.7-.3h-11c-.3 0-.5.1-.7.3-.2.2-.3.5-.3.7s.1.5.3.7z\"\/><\/svg><\/span><\/label><input  type=\"checkbox\" id=\"item-69e0777fb5a59\"><\/a><\/span><\/div>\n<nav><ul class='ez-toc-list ez-toc-list-level-1 ' ><li class='ez-toc-page-1 ez-toc-heading-level-1'><a class=\"ez-toc-link ez-toc-heading-1\" href=\"https:\/\/189505.xyz\/?p=528\/#%E7%8E%B0%E8%B1%A1\" title=\"\n\u73b0\u8c61 \n\">\n\u73b0\u8c61 \n<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-1'><a class=\"ez-toc-link ez-toc-heading-2\" href=\"https:\/\/189505.xyz\/?p=528\/#%E5%A4%9A%E6%9C%BA%E5%A4%9A%E5%8D%A1scatter\" title=\"\n\u591a\u673a\u591a\u5361scatter \n\">\n\u591a\u673a\u591a\u5361scatter \n<\/a><\/li><\/ul><\/nav><\/div>\n<h1><span class=\"ez-toc-section\" id=\"%E7%8E%B0%E8%B1%A1\"><\/span>\n\u73b0\u8c61<br \/>\n<span class=\"ez-toc-section-end\"><\/span><\/h1>\n<p>\u5982\u4e0b\u4ee3\u7801<\/p>\n<pre><code>import torch\n# NCCL_NVLS_ENABLE=0\n# https:\/\/huggingface.co\/blog\/huseinzol05\/tensor-parallelism\n# https:\/\/github.com\/pytorch\/elastic\/tree\/master\/examples\n# https:\/\/docs.pytorch.org\/docs\/stable\/distributed.html#torch.distributed.gather\nimport torch.nn as nn\nimport torch.distributed as dist\nimport os\n\nimport like_logger\nlogger = like_logger.init_logger(__name__)\n\ndef main():\n    local_rank = int(os.environ[&quot;LOCAL_RANK&quot;])\n    world_size = int(os.environ[&quot;WORLD_SIZE&quot;])\n    local_device = f&#039;cuda:{local_rank}&#039;\n    logger.info(f&#039;local_device:{local_device},world size: {world_size}&#039;)\n    #dist.init_process_group(backend=&#039;nccl&#039;, init_method=&quot;env:\/\/&quot;, rank=local_rank, world_size=world_size)\n    dist.init_process_group(backend=&#039;nccl&#039;)\n\n    tensor_size = 2\n\n    output_tensor = torch.zeros(tensor_size, device=local_device)\n    logger.info(f&#039;output tensor dev:{output_tensor.device}&#039;)\n\n    if dist.get_rank() == 0:\n        scatter_list = []\n        for i in range(world_size):\n            scatter_list.append(torch.ones(tensor_size, device=f&#039;cuda:{local_rank}&#039;)*i * 10)\n        all_dev = [elem.device for elem in scatter_list]\n        logger.info(f&quot;all dev:{all_dev}&quot;)\n    else:\n        scatter_list = None\n\n    dist.scatter(output_tensor, scatter_list, src=0)\n\n    logger.info(f&#039;local rank: {local_rank}, output_tensor:{output_tensor}&#039;)\n\n    output_tensor += 1\n    logger.info(f&quot;local rank {local_rank}, dist.get_rank():{dist.get_rank()}, output_tensor:{output_tensor}&quot;)\n    dist.destroy_process_group()\nif __name__ == &quot;__main__&quot;:\n    main()<\/code><\/pre>\n<p>\u5728h100\u4e0a\u8fd0\u884c<\/p>\n<pre><code>torchrun  --nproc-per-node=4  ..\/torchmo\/temp\/h3_tp.py<\/code><\/pre>\n<p>\u62a5\u9519<\/p>\n<pre><code>torch.distributed.DistBackendError: NCCL error in: \/pytorch\/torch\/csrc\/distributed\/c10d\/NCCLUtils.hpp:268, unhandled cuda error (run with NCCL_DEBUG=INFO for details), NCCL version 2.21.5\nncclUnhandledCudaError: Call to CUDA function failed.\nLast error:\nCuda failure 1 &#039;invalid argument&#039;<\/code><\/pre>\n<p>\u7ecf\u8fc7\u53cd\u590d\u6392\u67e5\uff0c\u4ece\u77e5\u4e4e\u5f97\u5230\u7b54\u6848(<a href=\"https:\/\/zhuanlan.zhihu.com\/p\/29263848323\">https:\/\/zhuanlan.zhihu.com\/p\/29263848323<\/a>)<br \/>\n\u9700\u8981\u73af\u5883\u53d8\u91cf<\/p>\n<pre><code>NCCL_NVLS_ENABLE=0 CUDA_VISIBLE_DEVICES=4,5,6,7 torchrun  --nproc-per-node=4  ..\/torchmo\/temp\/h3_tp.py<\/code><\/pre>\n<h1><span class=\"ez-toc-section\" id=\"%E5%A4%9A%E6%9C%BA%E5%A4%9A%E5%8D%A1scatter\"><\/span>\n\u591a\u673a\u591a\u5361scatter<br \/>\n<span class=\"ez-toc-section-end\"><\/span><\/h1>\n<pre><code>like@JYTFY-D1-308-H100-D01-4:~\/package\/torchmo$ cat temp\/h20_scater_multi_gpu.py \nimport torch\nimport torch.distributed as dist\nimport os\nimport like_logger\n\nlogger = like_logger.init_logger(__name__)\n\ndef main():\n    # Get distributed environment variables\n    rank = int(os.environ[&quot;RANK&quot;])\n    world_size = int(os.environ[&quot;WORLD_SIZE&quot;])\n    local_rank = int(os.environ[&quot;LOCAL_RANK&quot;])\n\n    # Set the GPU for this process\n    torch.cuda.set_device(local_rank)\n    device = torch.device(f&quot;cuda:{local_rank}&quot;)\n    log_info = []\n    interest=[&#039;LOCAL_RANK&#039;, &#039;RANK&#039;, &#039;GROUP_RANK&#039;, &#039;ROLE_RANK&#039;, &#039;LOCAL_WORLD_SIZE&#039;, &#039;WORLD_SIZE&#039;, &#039;GROUP_WORLD_SIZE&#039;, &#039;ROLE_WORLD_SIZE&#039;,]\n    for elem in interest:\n        env_value = os.getenv(elem)\n        log_info.append(f&quot;{elem}:{env_value}&quot;)\n    log_line = &quot;,&quot;.join(log_info)\n    logger.info(log_line)\n    #logger.info(f&quot;envs:{os.environ.keys()}&quot;)\n\n    # Initialize process group\n    dist.init_process_group(backend=&quot;nccl&quot;, init_method=&quot;env:\/\/&quot;)\n\n    tensor_size = 10\n    recv_tensor = torch.empty(tensor_size, dtype=torch.float32, device=device)\n\n    if rank == 0:\n        # Create a (world_size x tensor_size) tensor\n        #full_tensor = torch.randn(world_size, tensor_size, dtype=torch.float32, device=device)\n        full_tensor = torch.arange(world_size*tensor_size).reshape((world_size, tensor_size)).float().to(device)\n        scatter_list = [full_tensor[i].contiguous() for i in range(world_size)]\n        print(f&quot;[Rank {rank}] Scattering tensor:\\n{full_tensor}&quot;)\n        dist.scatter(recv_tensor, scatter_list=scatter_list, src=0)\n    else:\n        dist.scatter(recv_tensor, src=0)\n\n    logger.info(f&quot;[Rank {rank}] received tensor: {recv_tensor}&quot;)\n\n    dist.destroy_process_group()\n\nif __name__ == &quot;__main__&quot;:\n    main()\n<\/code><\/pre>\n<p>\u5728\u4e3b\u7ed3\u70b9\u542f\u52a8<\/p>\n<pre><code>(torch2) like@JYTFY-D1-308-H100-D07-2:~\/package\/torchmo$ NCCL_NVLS_ENABLE=0 CUDA_VISIBLE_DEVICES=4,5,6,7 torchrun --nnodes=2 --nproc-per-node=4 --node-rank=0 --rdzv-id=123 --rdzv-backend=c10d --rdzv-endpoint=10.157.101.103:29500 temp\/h20_scater_multi_gpu.py\nW0621 13:44:54.167000 733495 site-packages\/torch\/distributed\/run.py:792] \nW0621 13:44:54.167000 733495 site-packages\/torch\/distributed\/run.py:792] *****************************************\nW0621 13:44:54.167000 733495 site-packages\/torch\/distributed\/run.py:792] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. \nW0621 13:44:54.167000 733495 site-packages\/torch\/distributed\/run.py:792] *****************************************\n733628 INFO 06-21 13:45:01 [h20_scater_multi_gpu.py:23:main] LOCAL_RANK:0,RANK:0,GROUP_RANK:0,ROLE_RANK:0,LOCAL_WORLD_SIZE:4,WORLD_SIZE:8,GROUP_WORLD_SIZE:2,ROLE_WORLD_SIZE:8\n733631 INFO 06-21 13:45:02 [h20_scater_multi_gpu.py:23:main] LOCAL_RANK:3,RANK:3,GROUP_RANK:0,ROLE_RANK:3,LOCAL_WORLD_SIZE:4,WORLD_SIZE:8,GROUP_WORLD_SIZE:2,ROLE_WORLD_SIZE:8\n733629 INFO 06-21 13:45:02 [h20_scater_multi_gpu.py:23:main] LOCAL_RANK:1,RANK:1,GROUP_RANK:0,ROLE_RANK:1,LOCAL_WORLD_SIZE:4,WORLD_SIZE:8,GROUP_WORLD_SIZE:2,ROLE_WORLD_SIZE:8\n733630 INFO 06-21 13:45:02 [h20_scater_multi_gpu.py:23:main] LOCAL_RANK:2,RANK:2,GROUP_RANK:0,ROLE_RANK:2,LOCAL_WORLD_SIZE:4,WORLD_SIZE:8,GROUP_WORLD_SIZE:2,ROLE_WORLD_SIZE:8\n[Rank 0] Scattering tensor:\ntensor([[ 0.,  1.,  2.,  3.,  4.,  5.,  6.,  7.,  8.,  9.],\n        [10., 11., 12., 13., 14., 15., 16., 17., 18., 19.],\n        [20., 21., 22., 23., 24., 25., 26., 27., 28., 29.],\n        [30., 31., 32., 33., 34., 35., 36., 37., 38., 39.],\n        [40., 41., 42., 43., 44., 45., 46., 47., 48., 49.],\n        [50., 51., 52., 53., 54., 55., 56., 57., 58., 59.],\n        [60., 61., 62., 63., 64., 65., 66., 67., 68., 69.],\n        [70., 71., 72., 73., 74., 75., 76., 77., 78., 79.]], device=&#039;cuda:0&#039;)\n733628 INFO 06-21 13:45:04 [h20_scater_multi_gpu.py:42:main] [Rank 0] received tensor: tensor([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], device=&#039;cuda:0&#039;)\n733631 INFO 06-21 13:45:05 [h20_scater_multi_gpu.py:42:main] [Rank 3] received tensor: tensor([30., 31., 32., 33., 34., 35., 36., 37., 38., 39.], device=&#039;cuda:3&#039;)\n733630 INFO 06-21 13:45:05 [h20_scater_multi_gpu.py:42:main] [Rank 2] received tensor: tensor([20., 21., 22., 23., 24., 25., 26., 27., 28., 29.], device=&#039;cuda:2&#039;)\n733629 INFO 06-21 13:45:05 [h20_scater_multi_gpu.py:42:main] [Rank 1] received tensor: tensor([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], device=&#039;cuda:1&#039;)\n<\/code><\/pre>\n<p>\u5728\u526f\u8282\u70b9\u542f\u52a8<\/p>\n<pre><code>(torch2) like@JYTFY-D1-308-H100-C09-4:~\/package\/torchmo$ NCCL_NVLS_ENABLE=0 CUDA_VISIBLE_DEVICES=4,5,6,7 torchrun --nnodes=2 --nproc-per-node=4 --node-rank=1 --rdzv-id=123 --rdzv-backend=c10d --rdzv-endpoint=10.157.101.103:29500 temp\/h20_scater_multi_gpu.py\nW0621 13:44:53.817000 4157747 site-packages\/torch\/distributed\/run.py:792] \nW0621 13:44:53.817000 4157747 site-packages\/torch\/distributed\/run.py:792] *****************************************\nW0621 13:44:53.817000 4157747 site-packages\/torch\/distributed\/run.py:792] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. \nW0621 13:44:53.817000 4157747 site-packages\/torch\/distributed\/run.py:792] *****************************************\n4157822 INFO 06-21 13:45:00 [h20_scater_multi_gpu.py:23:main] LOCAL_RANK:0,RANK:4,GROUP_RANK:1,ROLE_RANK:4,LOCAL_WORLD_SIZE:4,WORLD_SIZE:8,GROUP_WORLD_SIZE:2,ROLE_WORLD_SIZE:8\n4157824 INFO 06-21 13:45:01 [h20_scater_multi_gpu.py:23:main] LOCAL_RANK:2,RANK:6,GROUP_RANK:1,ROLE_RANK:6,LOCAL_WORLD_SIZE:4,WORLD_SIZE:8,GROUP_WORLD_SIZE:2,ROLE_WORLD_SIZE:8\n4157825 INFO 06-21 13:45:01 [h20_scater_multi_gpu.py:23:main] LOCAL_RANK:3,RANK:7,GROUP_RANK:1,ROLE_RANK:7,LOCAL_WORLD_SIZE:4,WORLD_SIZE:8,GROUP_WORLD_SIZE:2,ROLE_WORLD_SIZE:8\n4157823 INFO 06-21 13:45:01 [h20_scater_multi_gpu.py:23:main] LOCAL_RANK:1,RANK:5,GROUP_RANK:1,ROLE_RANK:5,LOCAL_WORLD_SIZE:4,WORLD_SIZE:8,GROUP_WORLD_SIZE:2,ROLE_WORLD_SIZE:8\n4157823 INFO 06-21 13:45:04 [h20_scater_multi_gpu.py:42:main] [Rank 5] received tensor: tensor([50., 51., 52., 53., 54., 55., 56., 57., 58., 59.], device=&#039;cuda:1&#039;)\n4157825 INFO 06-21 13:45:04 [h20_scater_multi_gpu.py:42:main] [Rank 7] received tensor: tensor([70., 71., 72., 73., 74., 75., 76., 77., 78., 79.], device=&#039;cuda:3&#039;)\n4157822 INFO 06-21 13:45:04 [h20_scater_multi_gpu.py:42:main] [Rank 4] received tensor: tensor([40., 41., 42., 43., 44., 45., 46., 47., 48., 49.], device=&#039;cuda:0&#039;)\n4157824 INFO 06-21 13:45:04 [h20_scater_multi_gpu.py:42:main] [Rank 6] received tensor: tensor([60., 61., 62., 63., 64., 65., 66., 67., 68., 69.], device=&#039;cuda:2&#039;)<\/code><\/pre>\n<table>\n<thead>\n<tr>\n<th>\u73af\u5883\u53d8\u91cf<\/th>\n<th>\u542b\u4e49<\/th>\n<\/tr>\n<\/thead>\n<tbody>\n<tr>\n<td>WORLD_SIZE<\/td>\n<td>\u6240\u6709\u673a\u5668GPU \u6570\u7684\u548c<\/td>\n<\/tr>\n<tr>\n<td>RANK<\/td>\n<td>\u5728\u6240\u6709\u8282\u70b9\u4e2d\uff0cGPU ID, node id<\/td>\n<\/tr>\n<tr>\n<td>GROUP_WORLD_SIZE<\/td>\n<td>\u8282\u70b9\u6570<\/td>\n<\/tr>\n<tr>\n<td>GROUP_RANK<\/td>\n<td>group id<\/td>\n<\/tr>\n<tr>\n<td>LOCAL_WORLD_SIZE<\/td>\n<td>\u672c\u673a\u7684GPU\u6570<\/td>\n<\/tr>\n<tr>\n<td>LOCAL_RANK<\/td>\n<td>\u5728\u672c\u8282\u70b9\u5185\u90e8\uff0cGPU  ID<\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n<p>\u4ee5\u4e0a<\/p>\n","protected":false},"excerpt":{"rendered":"<p>\u73b0\u8c61 \u5982\u4e0b\u4ee3\u7801 import torch # NCCL_NVLS_ENABLE=0 # https:\/\/hug &#8230; <a title=\"pytorch distributed.gather\u95ee\u9898\" class=\"read-more\" href=\"https:\/\/189505.xyz\/?p=528\" aria-label=\"More on pytorch distributed.gather\u95ee\u9898\">Read more<\/a><\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[1],"tags":[],"_links":{"self":[{"href":"https:\/\/189505.xyz\/index.php?rest_route=\/wp\/v2\/posts\/528"}],"collection":[{"href":"https:\/\/189505.xyz\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/189505.xyz\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/189505.xyz\/index.php?rest_route=\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/189505.xyz\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=528"}],"version-history":[{"count":9,"href":"https:\/\/189505.xyz\/index.php?rest_route=\/wp\/v2\/posts\/528\/revisions"}],"predecessor-version":[{"id":537,"href":"https:\/\/189505.xyz\/index.php?rest_route=\/wp\/v2\/posts\/528\/revisions\/537"}],"wp:attachment":[{"href":"https:\/\/189505.xyz\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=528"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/189505.xyz\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=528"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/189505.xyz\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=528"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}