<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
  <channel>
    <title>PyPI recent updates for vllm</title>
    <link>https://pypi.tw.martin98.com/project/vllm/</link>
    <description>Recent updates to the Python Package Index for vllm</description>
    <language>en</language>    <item>
      <title>0.21.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.21.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Fri, 15 May 2026 00:08:22 GMT</pubDate>
    </item>    <item>
      <title>0.20.2</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.20.2/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Sun, 10 May 2026 07:38:37 GMT</pubDate>
    </item>    <item>
      <title>0.20.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.20.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Sun, 03 May 2026 08:24:43 GMT</pubDate>
    </item>    <item>
      <title>0.20.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.20.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Mon, 27 Apr 2026 11:07:22 GMT</pubDate>
    </item>    <item>
      <title>0.19.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.19.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Sat, 18 Apr 2026 05:49:16 GMT</pubDate>
    </item>    <item>
      <title>0.19.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.19.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Fri, 03 Apr 2026 04:04:52 GMT</pubDate>
    </item>    <item>
      <title>0.18.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.18.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Tue, 31 Mar 2026 05:55:41 GMT</pubDate>
    </item>    <item>
      <title>0.18.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.18.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Fri, 20 Mar 2026 22:16:00 GMT</pubDate>
    </item>    <item>
      <title>0.17.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.17.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Wed, 11 Mar 2026 11:03:58 GMT</pubDate>
    </item>    <item>
      <title>0.17.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.17.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Sat, 07 Mar 2026 03:54:02 GMT</pubDate>
    </item>    <item>
      <title>0.16.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.16.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Thu, 26 Feb 2026 03:02:38 GMT</pubDate>
    </item>    <item>
      <title>0.15.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.15.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Thu, 05 Feb 2026 00:18:12 GMT</pubDate>
    </item>    <item>
      <title>0.15.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.15.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Thu, 29 Jan 2026 18:59:48 GMT</pubDate>
    </item>    <item>
      <title>0.14.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.14.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Sat, 24 Jan 2026 21:05:18 GMT</pubDate>
    </item>    <item>
      <title>0.14.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.14.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Tue, 20 Jan 2026 10:50:00 GMT</pubDate>
    </item>    <item>
      <title>0.13.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.13.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Fri, 19 Dec 2025 03:30:32 GMT</pubDate>
    </item>    <item>
      <title>0.12.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.12.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Wed, 03 Dec 2025 11:37:24 GMT</pubDate>
    </item>    <item>
      <title>0.11.2</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.11.2/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Thu, 20 Nov 2025 08:30:43 GMT</pubDate>
    </item>    <item>
      <title>0.11.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.11.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Wed, 19 Nov 2025 01:55:10 GMT</pubDate>
    </item>    <item>
      <title>0.11.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.11.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Sat, 04 Oct 2025 01:39:32 GMT</pubDate>
    </item>    <item>
      <title>0.10.2</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.10.2/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Sat, 13 Sep 2025 23:00:25 GMT</pubDate>
    </item>    <item>
      <title>0.10.1.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.10.1.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Wed, 20 Aug 2025 23:17:09 GMT</pubDate>
    </item>    <item>
      <title>0.10.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.10.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Tue, 19 Aug 2025 02:18:10 GMT</pubDate>
    </item>    <item>
      <title>0.10.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.10.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Fri, 25 Jul 2025 00:22:19 GMT</pubDate>
    </item>    <item>
      <title>0.9.2</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.9.2/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Tue, 08 Jul 2025 04:48:54 GMT</pubDate>
    </item>    <item>
      <title>0.9.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.9.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Tue, 10 Jun 2025 21:46:01 GMT</pubDate>
    </item>    <item>
      <title>0.9.0.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.9.0.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Fri, 30 May 2025 20:16:47 GMT</pubDate>
    </item>    <item>
      <title>0.9.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.9.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Wed, 28 May 2025 01:30:28 GMT</pubDate>
    </item>    <item>
      <title>0.8.5.post1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.8.5.post1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Fri, 02 May 2025 22:31:02 GMT</pubDate>
    </item>    <item>
      <title>0.8.5</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.8.5/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Mon, 28 Apr 2025 23:59:40 GMT</pubDate>
    </item>    <item>
      <title>0.8.4</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.8.4/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Tue, 15 Apr 2025 00:30:02 GMT</pubDate>
    </item>    <item>
      <title>0.8.3</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.8.3/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Sun, 06 Apr 2025 06:47:12 GMT</pubDate>
    </item>    <item>
      <title>0.8.2</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.8.2/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Tue, 25 Mar 2025 18:06:50 GMT</pubDate>
    </item>    <item>
      <title>0.8.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.8.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Wed, 19 Mar 2025 21:28:14 GMT</pubDate>
    </item>    <item>
      <title>0.8.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.8.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Tue, 18 Mar 2025 22:22:25 GMT</pubDate>
    </item>    <item>
      <title>0.7.3</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.7.3/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Thu, 20 Feb 2025 18:01:55 GMT</pubDate>
    </item>    <item>
      <title>0.7.2</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.7.2/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Thu, 06 Feb 2025 18:26:16 GMT</pubDate>
    </item>    <item>
      <title>0.7.1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.7.1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Sat, 01 Feb 2025 23:29:27 GMT</pubDate>
    </item>    <item>
      <title>0.7.0</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.7.0/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Mon, 27 Jan 2025 17:00:35 GMT</pubDate>
    </item>    <item>
      <title>0.6.6.post1</title>
      <link>https://pypi.tw.martin98.com/project/vllm/0.6.6.post1/</link>
      <description>A high-throughput and memory-efficient inference and serving engine for LLMs</description>
      <pubDate>Fri, 27 Dec 2024 07:09:03 GMT</pubDate>
    </item>  </channel>
</rss>