<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
  <channel>
    <title>PyPI recent updates for tensor-parallel</title>
    <link>https://pypi.tw.martin98.com/project/tensor-parallel/</link>
    <description>Recent updates to the Python Package Index for tensor-parallel</description>
    <language>en</language>    <item>
      <title>2.0.0</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/2.0.0/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Sun, 06 Aug 2023 14:21:47 GMT</pubDate>
    </item>    <item>
      <title>1.3.2</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.3.2/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Thu, 27 Jul 2023 12:02:38 GMT</pubDate>
    </item>    <item>
      <title>1.3.1</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.3.1/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Wed, 26 Jul 2023 18:49:26 GMT</pubDate>
    </item>    <item>
      <title>1.3.0</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.3.0/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Sat, 22 Jul 2023 12:02:32 GMT</pubDate>
    </item>    <item>
      <title>1.2.9</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.2.9/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Fri, 21 Jul 2023 16:02:33 GMT</pubDate>
    </item>    <item>
      <title>1.2.8</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.2.8/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Fri, 23 Jun 2023 13:23:39 GMT</pubDate>
    </item>    <item>
      <title>1.2.7</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.2.7/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Tue, 20 Jun 2023 16:49:09 GMT</pubDate>
    </item>    <item>
      <title>1.2.6</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.2.6/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Mon, 19 Jun 2023 17:08:25 GMT</pubDate>
    </item>    <item>
      <title>1.2.5</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.2.5/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Wed, 14 Jun 2023 15:41:48 GMT</pubDate>
    </item>    <item>
      <title>1.2.4</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.2.4/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Sun, 14 May 2023 19:57:40 GMT</pubDate>
    </item>    <item>
      <title>1.2.3</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.2.3/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Sun, 14 May 2023 16:54:06 GMT</pubDate>
    </item>    <item>
      <title>1.2.2</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.2.2/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Mon, 17 Apr 2023 07:49:37 GMT</pubDate>
    </item>    <item>
      <title>1.2.1</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.2.1/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Mon, 10 Apr 2023 16:53:06 GMT</pubDate>
    </item>    <item>
      <title>1.2.0</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.2.0/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Mon, 03 Apr 2023 12:31:44 GMT</pubDate>
    </item>    <item>
      <title>1.1.4</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.1.4/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Mon, 27 Mar 2023 19:47:52 GMT</pubDate>
    </item>    <item>
      <title>1.1.3</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.1.3/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Thu, 23 Mar 2023 12:06:26 GMT</pubDate>
    </item>    <item>
      <title>1.1.2</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.1.2/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Wed, 22 Mar 2023 14:38:20 GMT</pubDate>
    </item>    <item>
      <title>1.1.1</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.1.1/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Wed, 15 Mar 2023 08:32:27 GMT</pubDate>
    </item>    <item>
      <title>1.1.0</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.1.0/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Mon, 06 Mar 2023 11:06:45 GMT</pubDate>
    </item>    <item>
      <title>1.0.25</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.0.25/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Tue, 21 Feb 2023 19:43:34 GMT</pubDate>
    </item>    <item>
      <title>1.0.24</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.0.24/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Thu, 12 Jan 2023 19:57:42 GMT</pubDate>
    </item>    <item>
      <title>1.0.23</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.0.23/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Tue, 03 Jan 2023 10:22:54 GMT</pubDate>
    </item>    <item>
      <title>1.0.22</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.0.22/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Fri, 30 Dec 2022 23:26:36 GMT</pubDate>
    </item>    <item>
      <title>1.0.19</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.0.19/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Mon, 26 Dec 2022 19:28:22 GMT</pubDate>
    </item>    <item>
      <title>1.0.3</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.0.3/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Thu, 22 Dec 2022 20:26:41 GMT</pubDate>
    </item>    <item>
      <title>1.0.21.dev0</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.0.21.dev0/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Thu, 22 Dec 2022 19:43:58 GMT</pubDate>
    </item>    <item>
      <title>1.0.18</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.0.18/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Thu, 15 Dec 2022 23:56:08 GMT</pubDate>
    </item>    <item>
      <title>1.0.17</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.0.17/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Wed, 14 Dec 2022 19:46:39 GMT</pubDate>
    </item>    <item>
      <title>1.0.16</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.0.16/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Wed, 14 Dec 2022 12:36:00 GMT</pubDate>
    </item>    <item>
      <title>1.0.15</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.0.15/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Wed, 14 Dec 2022 12:26:38 GMT</pubDate>
    </item>    <item>
      <title>1.0.14</title>
      <link>https://pypi.tw.martin98.com/project/tensor-parallel/1.0.14/</link>
      <description>Automatically shard your large model between multiple GPUs, works without torch.distributed</description>
<author>yalisnyak@nes.com</author>      <pubDate>Wed, 14 Dec 2022 11:59:37 GMT</pubDate>
    </item>  </channel>
</rss>