<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
  <channel>
    <title>PyPI recent updates for llm-guard</title>
    <link>https://pypi.tw.martin98.com/project/llm-guard/</link>
    <description>Recent updates to the Python Package Index for llm-guard</description>
    <language>en</language>    <item>
      <title>0.3.16</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.16/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>community@protectai.com</author>      <pubDate>Mon, 19 May 2025 12:12:58 GMT</pubDate>
    </item>    <item>
      <title>0.3.15</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.15/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>community@protectai.com</author>      <pubDate>Thu, 22 Aug 2024 19:39:46 GMT</pubDate>
    </item>    <item>
      <title>0.3.14</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.14/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>community@protectai.com</author>      <pubDate>Mon, 17 Jun 2024 08:57:07 GMT</pubDate>
    </item>    <item>
      <title>0.3.13</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.13/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>community@protectai.com</author>      <pubDate>Fri, 10 May 2024 13:47:21 GMT</pubDate>
    </item>    <item>
      <title>0.3.12</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.12/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>community@protectai.com</author>      <pubDate>Tue, 23 Apr 2024 09:16:37 GMT</pubDate>
    </item>    <item>
      <title>0.3.11</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.11/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>community@protectai.com</author>      <pubDate>Tue, 23 Apr 2024 09:10:22 GMT</pubDate>
    </item>    <item>
      <title>0.3.10</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.10/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>community@protectai.com</author>      <pubDate>Thu, 14 Mar 2024 10:26:37 GMT</pubDate>
    </item>    <item>
      <title>0.3.9</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.9/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>community@protectai.com</author>      <pubDate>Thu, 08 Feb 2024 14:47:41 GMT</pubDate>
    </item>    <item>
      <title>0.3.7</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.7/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Mon, 15 Jan 2024 09:19:01 GMT</pubDate>
    </item>    <item>
      <title>0.3.5</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.5/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Sun, 14 Jan 2024 20:50:14 GMT</pubDate>
    </item>    <item>
      <title>0.3.4</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.4/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Thu, 21 Dec 2023 14:54:58 GMT</pubDate>
    </item>    <item>
      <title>0.3.3</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.3/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Sat, 25 Nov 2023 20:56:22 GMT</pubDate>
    </item>    <item>
      <title>0.3.2</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.2/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Wed, 15 Nov 2023 10:31:18 GMT</pubDate>
    </item>    <item>
      <title>0.3.1</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.1/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Thu, 09 Nov 2023 20:13:19 GMT</pubDate>
    </item>    <item>
      <title>0.3.0</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.3.0/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Sat, 14 Oct 2023 08:58:42 GMT</pubDate>
    </item>    <item>
      <title>0.2.4</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.2.4/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Sat, 07 Oct 2023 20:08:15 GMT</pubDate>
    </item>    <item>
      <title>0.2.3</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.2.3/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Sat, 23 Sep 2023 19:57:31 GMT</pubDate>
    </item>    <item>
      <title>0.2.2</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.2.2/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Thu, 21 Sep 2023 12:29:04 GMT</pubDate>
    </item>    <item>
      <title>0.2.1</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.2.1/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Thu, 21 Sep 2023 11:52:37 GMT</pubDate>
    </item>    <item>
      <title>0.2.0</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.2.0/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Fri, 15 Sep 2023 19:04:48 GMT</pubDate>
    </item>    <item>
      <title>0.1.3</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.1.3/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Sat, 02 Sep 2023 21:04:39 GMT</pubDate>
    </item>    <item>
      <title>0.1.2</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.1.2/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection and jailbreak attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Sat, 26 Aug 2023 21:10:46 GMT</pubDate>
    </item>    <item>
      <title>0.1.1</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.1.1/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection and jailbreak attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Sun, 20 Aug 2023 10:16:50 GMT</pubDate>
    </item>    <item>
      <title>0.1.0</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.1.0/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection and jailbreak attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Sat, 12 Aug 2023 15:43:32 GMT</pubDate>
    </item>    <item>
      <title>0.0.3</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.0.3/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection and jailbreak attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Thu, 10 Aug 2023 21:13:56 GMT</pubDate>
    </item>    <item>
      <title>0.0.2</title>
      <link>https://pypi.tw.martin98.com/project/llm-guard/0.0.2/</link>
      <description>LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By offering sanitization, detection of harmful language, prevention of data leakage, and resistance against prompt injection and jailbreak attacks, LLM-Guard ensures that your interactions with LLMs remain safe and secure.</description>
<author>hello@laiyer.ai</author>      <pubDate>Tue, 08 Aug 2023 21:07:42 GMT</pubDate>
    </item>  </channel>
</rss>