{"id":592,"date":"2025-09-23T11:35:59","date_gmt":"2025-09-23T03:35:59","guid":{"rendered":"https:\/\/189505.xyz\/?p=592"},"modified":"2025-09-29T11:28:07","modified_gmt":"2025-09-29T03:28:07","slug":"nvml-gpu-topo","status":"publish","type":"post","link":"https:\/\/189505.xyz\/?p=592","title":{"rendered":"nvml gpu topo"},"content":{"rendered":"<div id=\"ez-toc-container\" class=\"ez-toc-v2_0_40 counter-hierarchy ez-toc-counter ez-toc-grey ez-toc-container-direction\">\n<div class=\"ez-toc-title-container\">\n<p class=\"ez-toc-title\">Table of Contents<\/p>\n<span class=\"ez-toc-title-toggle\"><a href=\"#\" class=\"ez-toc-pull-right ez-toc-btn ez-toc-btn-xs ez-toc-btn-default ez-toc-toggle\" area-label=\"ez-toc-toggle-icon-1\"><label for=\"item-69e01ac2a8939\" aria-label=\"Table of Content\"><span style=\"display: flex;align-items: center;width: 35px;height: 30px;justify-content: center;direction:ltr;\"><svg style=\"fill: #999;color:#999\" xmlns=\"http:\/\/www.w3.org\/2000\/svg\" class=\"list-377408\" width=\"20px\" height=\"20px\" viewBox=\"0 0 24 24\" fill=\"none\"><path d=\"M6 6H4v2h2V6zm14 0H8v2h12V6zM4 11h2v2H4v-2zm16 0H8v2h12v-2zM4 16h2v2H4v-2zm16 0H8v2h12v-2z\" fill=\"currentColor\"><\/path><\/svg><svg style=\"fill: #999;color:#999\" class=\"arrow-unsorted-368013\" xmlns=\"http:\/\/www.w3.org\/2000\/svg\" width=\"10px\" height=\"10px\" viewBox=\"0 0 24 24\" version=\"1.2\" baseProfile=\"tiny\"><path d=\"M18.2 9.3l-6.2-6.3-6.2 6.3c-.2.2-.3.4-.3.7s.1.5.3.7c.2.2.4.3.7.3h11c.3 0 .5-.1.7-.3.2-.2.3-.5.3-.7s-.1-.5-.3-.7zM5.8 14.7l6.2 6.3 6.2-6.3c.2-.2.3-.5.3-.7s-.1-.5-.3-.7c-.2-.2-.4-.3-.7-.3h-11c-.3 0-.5.1-.7.3-.2.2-.3.5-.3.7s.1.5.3.7z\"\/><\/svg><\/span><\/label><input  type=\"checkbox\" id=\"item-69e01ac2a8939\"><\/a><\/span><\/div>\n<nav><ul class='ez-toc-list ez-toc-list-level-1 ' ><li class='ez-toc-page-1 ez-toc-heading-level-1'><a class=\"ez-toc-link ez-toc-heading-1\" href=\"https:\/\/189505.xyz\/?p=592\/#nvmlGpuTopologyLevel_t\" title=\"nvmlGpuTopologyLevel_t\">nvmlGpuTopologyLevel_t<\/a><ul class='ez-toc-list-level-2'><li class='ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-2\" href=\"https:\/\/189505.xyz\/?p=592\/#what_is_dual_gpu\" title=\"what is dual gpu\">what is dual gpu<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-3\" href=\"https:\/\/189505.xyz\/?p=592\/#What_is_a_PCIe_switch\" title=\"What is a PCIe switch?\">What is a PCIe switch?<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-4\" href=\"https:\/\/189505.xyz\/?p=592\/#What_is_a_host_bridge\" title=\"What is a host bridge?\">What is a host bridge?<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-5\" href=\"https:\/\/189505.xyz\/?p=592\/#GPUs_connected_by_host_bridge_lines_same_host_or_different_host\" title=\"GPUs connected by host bridge lines: same host or different host?\">GPUs connected by host bridge lines: same host or different host?<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-6\" href=\"https:\/\/189505.xyz\/?p=592\/#All_devices_in_the_system_lines_same_host_or_different_host\" title=\"All devices in the system lines: same host or different host?\">All devices in the system lines: same host or different host?<\/a><\/li><\/ul><\/li><li class='ez-toc-page-1 ez-toc-heading-level-1'><a class=\"ez-toc-link ez-toc-heading-7\" href=\"https:\/\/189505.xyz\/?p=592\/#can_a_numa_node_have_multiple_cpu_socket\" title=\"can a numa node have multiple cpu socket?\">can a numa node have multiple cpu socket?<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-1'><a class=\"ez-toc-link ez-toc-heading-8\" href=\"https:\/\/189505.xyz\/?p=592\/#nvidia-smi_topo%E8%BE%93%E5%87%BA\" title=\"nvidia-smi topo\u8f93\u51fa\">nvidia-smi topo\u8f93\u51fa<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-1'><a class=\"ez-toc-link ez-toc-heading-9\" href=\"https:\/\/189505.xyz\/?p=592\/#simlnvmlnvidia-smi_topo%E4%B8%AD%E7%9A%84topo%E5%AF%B9%E9%BD%90\" title=\"siml\/nvml\/nvidia-smi topo\u4e2d\u7684topo\u5bf9\u9f50\">siml\/nvml\/nvidia-smi topo\u4e2d\u7684topo\u5bf9\u9f50<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-1'><a class=\"ez-toc-link ez-toc-heading-10\" href=\"https:\/\/189505.xyz\/?p=592\/#%E5%A6%82%E4%BD%95%E6%9F%A5%E6%9C%8D%E5%8A%A1%E5%99%A8%E4%B8%8A%E6%9C%89%E6%B2%A1%E6%9C%89InfiniBand\" title=\"\u5982\u4f55\u67e5\u670d\u52a1\u5668\u4e0a\u6709\u6ca1\u6709InfiniBand\">\u5982\u4f55\u67e5\u670d\u52a1\u5668\u4e0a\u6709\u6ca1\u6709InfiniBand<\/a><ul class='ez-toc-list-level-2'><li class='ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-11\" href=\"https:\/\/189505.xyz\/?p=592\/#ibstat\" title=\"ibstat\">ibstat<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-12\" href=\"https:\/\/189505.xyz\/?p=592\/#ibv_devinfo\" title=\"ibv_devinfo\">ibv_devinfo<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-13\" href=\"https:\/\/189505.xyz\/?p=592\/#lspci_grep_-i_mell\" title=\"lspci | grep -i mell\">lspci | grep -i mell<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-14\" href=\"https:\/\/189505.xyz\/?p=592\/#ip_link_show\" title=\"ip link show\">ip link show<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-15\" href=\"https:\/\/189505.xyz\/?p=592\/#ethtool_-i\" title=\"ethtool -i \">ethtool -i <\/a><\/li><\/ul><\/li><\/ul><\/nav><\/div>\n<h1><span class=\"ez-toc-section\" id=\"nvmlGpuTopologyLevel_t\"><\/span>nvmlGpuTopologyLevel_t<span class=\"ez-toc-section-end\"><\/span><\/h1>\n<pre><code>typedef enum nvmlGpuLevel_enum\n{\n    NVML_TOPOLOGY_INTERNAL           = 0, \/\/ e.g. Tesla K80\n    NVML_TOPOLOGY_SINGLE             = 10, \/\/ all devices that only need traverse a single PCIe switch\n    NVML_TOPOLOGY_MULTIPLE           = 20, \/\/ all devices that need not traverse a host bridge\n    NVML_TOPOLOGY_HOSTBRIDGE         = 30, \/\/ all devices that are connected to the same host bridge\n    NVML_TOPOLOGY_NODE               = 40, \/\/ all devices that are connected to the same NUMA node but possibly multiple host bridges\n    NVML_TOPOLOGY_SYSTEM             = 50  \/\/ all devices in the system\n\n    \/\/ there is purposefully no COUNT here because of the need for spacing above\n} nvmlGpuTopologyLevel_t;\n<\/code><\/pre>\n<p>NVML_TOPOLOGY_INTERNAL<br \/>\nGPUs are on the same board (e.g., dual-GPU card). Fastest connection.<\/p>\n<p>NVML_TOPOLOGY_SINGLE<br \/>\nGPUs share the same PCIe switch, typically the same PCIe slot\/complex. Lower latency and higher bandwidth compared to higher levels.<\/p>\n<p>NVML_TOPOLOGY_MULTIPLE<br \/>\nGPUs are under different PCIe switches but still below the same host bridge. This adds more PCIe hops.<\/p>\n<p>NVML_TOPOLOGY_HOSTBRIDGE<br \/>\nGPUs are connected through a host bridge. A \u201chost bridge\u201d connects the CPU\/system root complex to one or more PCIe hierarchies.<br \/>\n\u2192 So two GPUs connected via host bridge means: they are still on the same host, but their PCIe paths diverge only above the level of switches, at the root complex.<\/p>\n<p>NVML_TOPOLOGY_NODE<br \/>\nGPUs are in different NUMA nodes. This means the CPU sockets differ, so the GPUs are attached to different root complexes. Latency is higher.<\/p>\n<p>NVML_TOPOLOGY_SYSTEM<br \/>\nGPUs are only reachable through the full system interconnect. This is the farthest relationship.<br \/>\n\u2192 Still within the same physical host (unless you\u2019re in a virtualized\/multi-host environment with NVSwitch across nodes, which is rare outside DGX SuperPOD).<\/p>\n<h2><span class=\"ez-toc-section\" id=\"what_is_dual_gpu\"><\/span>what is dual gpu<span class=\"ez-toc-section-end\"><\/span><\/h2>\n<p>A dual-GPU card means a single expansion card (a single PCB that you plug into a PCIe slot) that carries two separate GPU dies (packages), often with their own memory, power regulators, and cooling, but sharing the same board and PCIe interface.<\/p>\n<p>Examples:<\/p>\n<p>NVIDIA GeForce GTX 690 (Kepler, 2012) \u2192 2 \u00d7 GK104 GPUs on one board<\/p>\n<p>NVIDIA Tesla K80 (Kepler, 2014) \u2192 2 \u00d7 GK210 GPUs on one board, often used in datacenters<\/p>\n<p>AMD Radeon HD 7990 (Tahiti, 2013) \u2192 2 \u00d7 Tahiti GPUs<\/p>\n<p>Why it matters for NVML:<\/p>\n<p>If you query topology with NVML, two GPUs on the same PCB (dual-GPU card) will usually return<br \/>\nNVML_TOPOLOGY_INTERNAL \u2192 meaning closest possible connection, since they may share a PCIe bridge chip directly on the card.<\/p>\n<p>So yes \u2014 your interpretation is correct:<br \/>\n\ud83d\udc49 Dual-GPU card = two GPU packages on the same PCB board, plugged into a single PCIe slot.<\/p>\n<h2><span class=\"ez-toc-section\" id=\"What_is_a_PCIe_switch\"><\/span>What is a PCIe switch?<span class=\"ez-toc-section-end\"><\/span><\/h2>\n<p>A hardware component that fans out PCIe lanes. Think of it like an Ethernet switch but for PCIe. Multiple devices (GPUs, NICs, NVMe) can attach under a PCIe switch.<\/p>\n<h2><span class=\"ez-toc-section\" id=\"What_is_a_host_bridge\"><\/span>What is a host bridge?<span class=\"ez-toc-section-end\"><\/span><\/h2>\n<p>The component that connects CPU\/system memory (root complex) to one or more PCIe hierarchies. In multi-socket servers, each CPU typically has its own host bridge.<\/p>\n<h2><span class=\"ez-toc-section\" id=\"GPUs_connected_by_host_bridge_lines_same_host_or_different_host\"><\/span>GPUs connected by host bridge lines: same host or different host?<span class=\"ez-toc-section-end\"><\/span><\/h2>\n<p>Same host. They are just under different PCIe hierarchies attached to the same CPU root complex.<\/p>\n<h2><span class=\"ez-toc-section\" id=\"All_devices_in_the_system_lines_same_host_or_different_host\"><\/span>All devices in the system lines: same host or different host?<span class=\"ez-toc-section-end\"><\/span><\/h2>\n<p>Same host. NVML_TOPOLOGY_SYSTEM means &quot;the farthest possible connection within this system.&quot;<br \/>\nIt doesn\u2019t mean cross-host. NVML itself is per-host and does not describe connections between different physical servers.<\/p>\n<pre><code class=\"language-mermaid\">graph TD\n    CPU[&quot;CPU \/ Root Complex&lt;br\/&gt;(PCIe Controller)&quot;]\n    Switch[&quot;PCIe Switch&lt;br\/&gt;(fan-out)&quot;]\n\n    CPU --&gt;|x16 lanes| Switch\n\n    subgraph &quot;PCIe Slots&quot;\n        Slot1[&quot;PCIe Slot x16&lt;br\/&gt;(16 lanes)&quot;]\n        Slot2[&quot;PCIe Slot x8&lt;br\/&gt;(8 lanes)&quot;]\n        Slot3[&quot;PCIe Slot x4&lt;br\/&gt;(4 lanes)&quot;]\n        Slot4[&quot;PCIe Slot x1&lt;br\/&gt;(1 lane)&quot;]\n    end\n\n    Switch --&gt;|x16 lanes| Slot1\n    Switch --&gt;|x8 lanes| Slot2\n    Switch --&gt;|x4 lanes| Slot3\n    Switch --&gt;|x1 lane| Slot4<\/code><\/pre>\n<h1><span class=\"ez-toc-section\" id=\"can_a_numa_node_have_multiple_cpu_socket\"><\/span>can a numa node have multiple cpu socket?<span class=\"ez-toc-section-end\"><\/span><\/h1>\n<p>Yes \u2014 but it depends on the system architecture.<\/p>\n<p>Most common today:<br \/>\nA NUMA node = 1 CPU socket + its directly attached memory.<br \/>\nThis is the standard mapping in modern x86 servers (Intel Xeon, AMD EPYC). Each socket is its own NUMA node.<\/p>\n<p>Possible but less common:<br \/>\nA NUMA node can include multiple sockets if the firmware\/OS groups them that way. This was more typical in older systems (e.g., some SGI or IBM big-iron machines) or if the BIOS is set to \u201cNUMA = off\u201d \/ \u201cNode Interleaving.\u201d In that case, the OS may see 1 NUMA node spanning multiple sockets.<\/p>\n<p>Also possible:<br \/>\nA single socket can expose multiple NUMA nodes. Example:<\/p>\n<p>AMD EPYC (Naples\/Rome\/Milan) has multiple \u201cCCDs\u201d (chiplets). Each CCD can appear as its own NUMA node, even though they\u2019re on the same physical socket.<\/p>\n<p>\ud83d\udc49 So:<\/p>\n<p>By default: 1 NUMA node \u2248 1 CPU socket.<\/p>\n<p>But depending on system design or BIOS\/firmware config:<\/p>\n<p>One node can span multiple sockets, or<\/p>\n<p>One socket can be split into multiple NUMA nodes.<\/p>\n<h1><span class=\"ez-toc-section\" id=\"nvidia-smi_topo%E8%BE%93%E5%87%BA\"><\/span>nvidia-smi topo\u8f93\u51fa<span class=\"ez-toc-section-end\"><\/span><\/h1>\n<pre><code>55006|JYTFY-D1-308-H100-D01-4|2025-09-22 10:36:21[like@ ~]nvidia-smi topo -m\n        GPU0    GPU1    GPU2    GPU3    GPU4    GPU5    GPU6    GPU7    NIC0    NIC1    NIC2    NIC3    CPU Affinity    NUMA Affinity   GPU NUMA ID\nGPU0     X      NV18    NV18    NV18    NV18    NV18    NV18    NV18    NODE    NODE    SYS     SYS     0,2,4,6,8,10    0               N\/A\nGPU1    NV18     X      NV18    NV18    NV18    NV18    NV18    NV18    PIX     NODE    SYS     SYS     0,2,4,6,8,10    0               N\/A\nGPU2    NV18    NV18     X      NV18    NV18    NV18    NV18    NV18    NODE    PIX     SYS     SYS     0,2,4,6,8,10    0               N\/A\nGPU3    NV18    NV18    NV18     X      NV18    NV18    NV18    NV18    NODE    NODE    SYS     SYS     0,2,4,6,8,10    0               N\/A\nGPU4    NV18    NV18    NV18    NV18     X      NV18    NV18    NV18    SYS     SYS     NODE    NODE    1,3,5,7,9,11    1               N\/A\nGPU5    NV18    NV18    NV18    NV18    NV18     X      NV18    NV18    SYS     SYS     PIX     NODE    1,3,5,7,9,11    1               N\/A\nGPU6    NV18    NV18    NV18    NV18    NV18    NV18     X      NV18    SYS     SYS     NODE    NODE    1,3,5,7,9,11    1               N\/A\nGPU7    NV18    NV18    NV18    NV18    NV18    NV18    NV18     X      SYS     SYS     NODE    PIX     1,3,5,7,9,11    1               N\/A\nNIC0    NODE    PIX     NODE    NODE    SYS     SYS     SYS     SYS      X      NODE    SYS     SYS\nNIC1    NODE    NODE    PIX     NODE    SYS     SYS     SYS     SYS     NODE     X      SYS     SYS\nNIC2    SYS     SYS     SYS     SYS     NODE    PIX     NODE    NODE    SYS     SYS      X      NODE\nNIC3    SYS     SYS     SYS     SYS     NODE    NODE    NODE    PIX     SYS     SYS     NODE     X\n\nLegend:\n\n  X    = Self\n  SYS  = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI\/UPI)\n  NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node\n  PHB  = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU)\n  PXB  = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge)\n  PIX  = Connection traversing at most a single PCIe bridge\n  NV#  = Connection traversing a bonded set of # NVLinks\n\nNIC Legend:\n\n  NIC0: mlx5_0\n  NIC1: mlx5_1\n  NIC2: mlx5_2\n  NIC3: mlx5_3\n<\/code><\/pre>\n<h1><span class=\"ez-toc-section\" id=\"simlnvmlnvidia-smi_topo%E4%B8%AD%E7%9A%84topo%E5%AF%B9%E9%BD%90\"><\/span>siml\/nvml\/nvidia-smi topo\u4e2d\u7684topo\u5bf9\u9f50<span class=\"ez-toc-section-end\"><\/span><\/h1>\n<pre><code># same host, cross numa\nSMI_PATH_SYS = 6, \/\/\/&lt; Cross-NUMA connection\nNVML_TOPOLOGY_SYSTEM             = 50  \/\/ all devices in the system\nSYS  = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI\/UPI)\n\n# same numa, cross host bridge. host bridge=root complex in wikipedia\n    SMI_PATH_NODE = 5, \/\/\/&lt; NUMA node internal\n    NVML_TOPOLOGY_NODE               = 40, \/\/ all devices that are connected to the same NUMA node but possibly multiple host bridges\n    NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node\n\n# same host bridge\n    SMI_PATH_PHB = 4, \/\/\/&lt; PCIe Host Bridge\n    NVML_TOPOLOGY_HOSTBRIDGE         = 30, \/\/ all devices that are connected to the same host bridge\n     PHB  = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU)\nThe label PHB indicates that data must traverse the PCIe Host Bridge, typically meaning the CPU. This path incurs some latency because data must pass through the CPU before reaching its destination.\n\n# different PCIe switches but still below the same host bridge\n    SMI_PATH_PXB = 3, \/\/\/&lt; Multiple PCIe bridges\n    NVML_TOPOLOGY_MULTIPLE           = 20, \/\/ all devices that need not traverse a host bridge\n    PXB  = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge)\n#\n\n    SMI_PATH_PIX = 2, \/\/\/&lt; Single PCIe bridge\n    NVML_TOPOLOGY_SINGLE             = 10, \/\/ all devices that only need traverse a single PCIe switch\nPIX  = Connection traversing at most a single PCIe bridge\n<\/code><\/pre>\n<h1><span class=\"ez-toc-section\" id=\"%E5%A6%82%E4%BD%95%E6%9F%A5%E6%9C%8D%E5%8A%A1%E5%99%A8%E4%B8%8A%E6%9C%89%E6%B2%A1%E6%9C%89InfiniBand\"><\/span>\u5982\u4f55\u67e5\u670d\u52a1\u5668\u4e0a\u6709\u6ca1\u6709InfiniBand<span class=\"ez-toc-section-end\"><\/span><\/h1>\n<h2><span class=\"ez-toc-section\" id=\"ibstat\"><\/span>ibstat<span class=\"ez-toc-section-end\"><\/span><\/h2>\n<p><a href=\"https:\/\/static.189505.xyz\/blogTexts\/ibstat.verbose.siorigin.h100.txt\">https:\/\/static.189505.xyz\/blogTexts\/ibstat.verbose.siorigin.h100.txt<\/a><\/p>\n<h2><span class=\"ez-toc-section\" id=\"ibv_devinfo\"><\/span>ibv_devinfo<span class=\"ez-toc-section-end\"><\/span><\/h2>\n<p><a href=\"https:\/\/static.189505.xyz\/blogTexts\/ibv_devinfo.verbose.siorigin.h100.txt\">https:\/\/static.189505.xyz\/blogTexts\/ibv_devinfo.verbose.siorigin.h100.txt<\/a> <\/p>\n<h2><span class=\"ez-toc-section\" id=\"lspci_grep_-i_mell\"><\/span>lspci | grep -i mell<span class=\"ez-toc-section-end\"><\/span><\/h2>\n<p><a href=\"https:\/\/static.189505.xyz\/blogTexts\/lspcivv.verbose.siorigin.h100.txt\">https:\/\/static.189505.xyz\/blogTexts\/lspcivv.verbose.siorigin.h100.txt<\/a><\/p>\n<h2><span class=\"ez-toc-section\" id=\"ip_link_show\"><\/span>ip link show<span class=\"ez-toc-section-end\"><\/span><\/h2>\n<p><a href=\"https:\/\/static.189505.xyz\/blogTexts\/ip.link.show.siorigin.h100.log\">https:\/\/static.189505.xyz\/blogTexts\/ip.link.show.siorigin.h100.log<\/a><\/p>\n<h2><span class=\"ez-toc-section\" id=\"ethtool_-i\"><\/span>ethtool -i <iface><span class=\"ez-toc-section-end\"><\/span><\/h2>\n<pre><code>for I in `ip link show | grep ibp | awk -F: &#039;{print $2}&#039;`; do echo &quot;=========== eth:$I&quot;; ethtool -i $I; done <\/code><\/pre>\n<p><a href=\"https:\/\/static.189505.xyz\/blogTexts\/ethtool.siorigin.h100.txt\">https:\/\/static.189505.xyz\/blogTexts\/ethtool.siorigin.h100.txt<\/a><\/p>\n","protected":false},"excerpt":{"rendered":"<p>nvmlGpuTopologyLevel_t typedef enum nvmlGpuLevel_enum { &#8230; <a title=\"nvml gpu topo\" class=\"read-more\" href=\"https:\/\/189505.xyz\/?p=592\" aria-label=\"More on nvml gpu topo\">Read more<\/a><\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[1],"tags":[],"_links":{"self":[{"href":"https:\/\/189505.xyz\/index.php?rest_route=\/wp\/v2\/posts\/592"}],"collection":[{"href":"https:\/\/189505.xyz\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/189505.xyz\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/189505.xyz\/index.php?rest_route=\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/189505.xyz\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=592"}],"version-history":[{"count":21,"href":"https:\/\/189505.xyz\/index.php?rest_route=\/wp\/v2\/posts\/592\/revisions"}],"predecessor-version":[{"id":614,"href":"https:\/\/189505.xyz\/index.php?rest_route=\/wp\/v2\/posts\/592\/revisions\/614"}],"wp:attachment":[{"href":"https:\/\/189505.xyz\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=592"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/189505.xyz\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=592"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/189505.xyz\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=592"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}