Skip to content

Commit

Permalink
Generate Python docs from pytorch/pytorch@a470c04
Browse files Browse the repository at this point in the history
  • Loading branch information
pytorchbot committed Apr 19, 2023
1 parent 9d50996 commit ed7a1b2
Show file tree
Hide file tree
Showing 19 changed files with 260 additions and 162 deletions.
Binary file modified docs/2.0/_images/RReLU.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion docs/2.0/_modules/torch/cuda/amp/autocast_mode.html
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,7 @@ <h1>Source code for torch.cuda.amp.autocast_mode</h1><div class="highlight"><pre
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">value</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">):</span>
<span class="n">is_eligible</span> <span class="o">=</span> <span class="p">(</span><span class="n">value</span><span class="o">.</span><span class="n">is_floating_point</span><span class="p">()</span> <span class="ow">and</span> <span class="n">value</span><span class="o">.</span><span class="n">is_cuda</span> <span class="ow">and</span> <span class="p">(</span><span class="n">value</span><span class="o">.</span><span class="n">dtype</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">float64</span><span class="p">))</span>
<span class="k">return</span> <span class="n">value</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">dtype</span><span class="p">)</span> <span class="k">if</span> <span class="n">is_eligible</span> <span class="k">else</span> <span class="n">value</span>
<span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">value</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span>
<span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">value</span><span class="p">,</span> <span class="p">(</span><span class="nb">str</span><span class="p">,</span> <span class="nb">bytes</span><span class="p">)):</span>
<span class="k">return</span> <span class="n">value</span>
<span class="k">elif</span> <span class="n">HAS_NUMPY</span> <span class="ow">and</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">value</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">ndarray</span><span class="p">):</span>
<span class="k">return</span> <span class="n">value</span>
Expand Down
41 changes: 28 additions & 13 deletions docs/2.0/_modules/torch/distributed/distributed_c10d.html
Original file line number Diff line number Diff line change
Expand Up @@ -683,7 +683,6 @@ <h1>Source code for torch.distributed.distributed_c10d</h1><div class="highlight

<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">backend</span><span class="p">:</span> <span class="n">Union</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Backend</span><span class="p">]):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">device_backend_map</span><span class="p">:</span> <span class="n">Dict</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">,</span> <span class="n">Backend</span><span class="p">]</span> <span class="o">=</span> <span class="p">{}</span>
<span class="c1"># error check to make sure the config string is valid</span>

<span class="c1"># Cases for when backend is a single string (without device types)</span>
<span class="k">if</span> <span class="n">backend</span> <span class="o">==</span> <span class="n">Backend</span><span class="o">.</span><span class="n">UNDEFINED</span><span class="p">:</span>
Expand All @@ -700,13 +699,24 @@ <h1>Source code for torch.distributed.distributed_c10d</h1><div class="highlight
<span class="s2">&quot;cuda&quot;</span><span class="p">:</span> <span class="n">backend_val</span><span class="p">,</span>
<span class="p">}</span>
<span class="k">else</span><span class="p">:</span>
<span class="c1"># custom backend string in format of &quot;{device_type1}:{backend1},{device_type2}:{backend2}&quot;</span>
<span class="c1"># TODO</span>
<span class="k">pass</span>

<span class="n">required_devices</span> <span class="o">=</span> <span class="p">[</span><span class="s2">&quot;cpu&quot;</span><span class="p">,</span> <span class="s2">&quot;cuda&quot;</span><span class="p">]</span>
<span class="k">for</span> <span class="n">device</span> <span class="ow">in</span> <span class="n">required_devices</span><span class="p">:</span>
<span class="k">assert</span> <span class="n">device</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">device_backend_map</span>
<span class="c1"># make sure the backend string is in the correct format</span>
<span class="c1"># &quot;{device_type1}:{backend1},{device_type2}:{backend2}&quot;</span>
<span class="c1"># e.g. &quot;cpu:gloo,cuda:nccl&quot;</span>
<span class="n">backend_str_error_message</span> <span class="o">=</span> <span class="sa">f</span><span class="s2">&quot;&quot;&quot;The custom backend string argument is invalid: </span><span class="si">{</span><span class="n">backend</span><span class="si">}</span><span class="s2">.</span>
<span class="s2"> Custom backend string is an experimental feature where the backend string must be in the format:</span>
<span class="s2"> &quot;&lt;device_type1&gt;:&lt;backend1&gt;,&lt;device_type2&gt;:&lt;backend2&gt;...&quot;. e.g. &#39;cpu:gloo,cuda:nccl&#39;&quot;&quot;&quot;</span>

<span class="c1"># parse the backend string and populate the device_backend_map</span>
<span class="k">for</span> <span class="n">device_backend_pair_str</span> <span class="ow">in</span> <span class="n">backend</span><span class="o">.</span><span class="n">lower</span><span class="p">()</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s2">&quot;,&quot;</span><span class="p">):</span>
<span class="n">device_backend_pair</span> <span class="o">=</span> <span class="n">device_backend_pair_str</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s2">&quot;:&quot;</span><span class="p">)</span>
<span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">device_backend_pair</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">2</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Invalid device:backend pairing: </span><span class="se">\</span>
<span class="s2"> </span><span class="si">{</span><span class="n">device_backend_pair_str</span><span class="si">}</span><span class="s2">. </span><span class="si">{</span><span class="n">backend_str_error_message</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="n">device</span><span class="p">,</span> <span class="n">backend</span> <span class="o">=</span> <span class="n">device_backend_pair</span>
<span class="k">if</span> <span class="n">device</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">device_backend_map</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Duplicate device type </span><span class="si">{</span><span class="n">device</span><span class="si">}</span><span class="s2"> </span><span class="se">\</span>
<span class="s2"> in backend string: </span><span class="si">{</span><span class="n">backend</span><span class="si">}</span><span class="s2">. </span><span class="si">{</span><span class="n">backend_str_error_message</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">device_backend_map</span><span class="p">[</span><span class="n">device</span><span class="p">]</span> <span class="o">=</span> <span class="n">Backend</span><span class="p">(</span><span class="n">backend</span><span class="p">)</span>

<span class="k">def</span> <span class="fm">__repr__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="c1"># string with all the device:backend pairs separared by commas</span>
Expand Down Expand Up @@ -1293,7 +1303,9 @@ <h1>Source code for torch.distributed.distributed_c10d</h1><div class="highlight
<span class="sd"> .. note:: Support for multiple backends is experimental. Currently when no backend is</span>
<span class="sd"> specified, both ``gloo`` and ``nccl`` backends will be created. The ``gloo`` backend</span>
<span class="sd"> will be used for collectives with CPU tensors and the ``nccl`` backend will be used</span>
<span class="sd"> for collectives with CUDA tensors.</span>
<span class="sd"> for collectives with CUDA tensors. A custom backend can be specified by passing in</span>
<span class="sd"> a string with format &quot;&lt;device_type&gt;:&lt;backend_name&gt;,&lt;device_type&gt;:&lt;backend_name&gt;&quot;, e.g.</span>
<span class="sd"> &quot;cpu:gloo,cuda:custom_backend&quot;.</span>

<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">global</span> <span class="n">_world</span>
Expand Down Expand Up @@ -1444,6 +1456,9 @@ <h1>Source code for torch.distributed.distributed_c10d</h1><div class="highlight
<span class="n">backend_type</span> <span class="o">=</span> <span class="n">ProcessGroup</span><span class="o">.</span><span class="n">BackendType</span><span class="o">.</span><span class="n">MPI</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">backend_class</span><span class="p">:</span>
<span class="k">return</span> <span class="n">GroupMember</span><span class="o">.</span><span class="n">NON_GROUP_MEMBER</span>
<span class="c1"># create new process group with accurate rank and size</span>
<span class="k">if</span> <span class="n">pg</span><span class="o">.</span><span class="n">rank</span><span class="p">()</span> <span class="o">==</span> <span class="o">-</span><span class="mi">1</span> <span class="ow">and</span> <span class="n">pg</span><span class="o">.</span><span class="n">size</span><span class="p">()</span> <span class="o">==</span> <span class="o">-</span><span class="mi">1</span><span class="p">:</span>
<span class="n">pg</span> <span class="o">=</span> <span class="n">ProcessGroup</span><span class="p">(</span><span class="n">backend_prefix_store</span><span class="p">,</span> <span class="n">backend_class</span><span class="o">.</span><span class="n">rank</span><span class="p">(),</span> <span class="n">backend_class</span><span class="o">.</span><span class="n">size</span><span class="p">(),</span> <span class="n">base_pg_options</span><span class="p">)</span>
<span class="k">elif</span> <span class="n">backend_str</span> <span class="o">==</span> <span class="n">Backend</span><span class="o">.</span><span class="n">GLOO</span><span class="p">:</span>
<span class="c1"># TODO: remove this check after lazy initialization is supported</span>
<span class="c1"># if pg_options is not None:</span>
Expand Down Expand Up @@ -1527,15 +1542,15 @@ <h1>Source code for torch.distributed.distributed_c10d</h1><div class="highlight
<span class="n">timeout</span><span class="o">=</span><span class="n">timeout</span><span class="p">,</span>
<span class="p">)</span>

<span class="c1"># only create single backend pg when backend is set to gloo, nccl, mpi, and ucc</span>
<span class="k">if</span> <span class="n">backend</span> <span class="ow">in</span> <span class="p">[</span><span class="n">Backend</span><span class="o">.</span><span class="n">GLOO</span><span class="p">,</span> <span class="n">Backend</span><span class="o">.</span><span class="n">NCCL</span><span class="p">,</span> <span class="n">Backend</span><span class="o">.</span><span class="n">UCC</span><span class="p">,</span> <span class="n">Backend</span><span class="o">.</span><span class="n">MPI</span><span class="p">]:</span>
<span class="c1"># register only a single backend when all get_device_backend_map values are the same</span>
<span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="nb">set</span><span class="p">(</span><span class="n">backend_config</span><span class="o">.</span><span class="n">get_device_backend_map</span><span class="p">()</span><span class="o">.</span><span class="n">values</span><span class="p">()))</span> <span class="o">==</span> <span class="mi">1</span><span class="p">:</span>
<span class="k">for</span> <span class="n">device</span> <span class="ow">in</span> <span class="n">backend_config</span><span class="o">.</span><span class="n">get_device_backend_map</span><span class="p">()</span><span class="o">.</span><span class="n">keys</span><span class="p">():</span>
<span class="n">pg</span><span class="o">.</span><span class="n">_register_backend</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="n">device</span><span class="p">),</span> <span class="n">backend_type</span><span class="p">,</span> <span class="n">backend_class</span><span class="p">)</span>

<span class="c1"># break out of outer loop to not create any more backends</span>
<span class="k">break</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">pg</span><span class="o">.</span><span class="n">_register_backend</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="n">device</span><span class="p">),</span> <span class="n">backend_type</span><span class="p">,</span> <span class="n">backend_class</span><span class="p">)</span>

<span class="n">pg</span><span class="o">.</span><span class="n">_register_backend</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="n">device</span><span class="p">),</span> <span class="n">backend_type</span><span class="p">,</span> <span class="n">backend_class</span><span class="p">)</span>

<span class="c1"># update global state</span>
<span class="n">_world</span><span class="o">.</span><span class="n">pg_map</span><span class="p">[</span><span class="n">pg</span><span class="p">]</span> <span class="o">=</span> <span class="p">(</span><span class="n">backend</span><span class="p">,</span> <span class="n">prefix_store</span><span class="p">)</span>
Expand Down
Loading

0 comments on commit ed7a1b2

Please sign in to comment.