From 86be14d6f0f17325786607e44c9d982a43e48f7b Mon Sep 17 00:00:00 2001 From: "John (Jet Token)" Date: Tue, 7 Feb 2023 00:35:39 -0800 Subject: [PATCH] Rename module to Alignment, add guards as subtopic --- docs/index.rst | 4 +- docs/modules/alignment.rst | 39 ++++++++++++++++++ docs/modules/alignment/guards.rst | 28 +++++++++++++ .../guards/ClassifierExample.png | Bin .../guards/getting_started.ipynb | 6 +-- .../{ => alignment}/guards/key_concepts.md | 0 docs/modules/guards.rst | 27 ------------ docs/reference/modules/guards.rst | 2 +- langchain/alignment/__init__.py | 9 ++++ langchain/{utilities => alignment}/guards.py | 0 tests/unit_tests/utilities/test_guards.py | 2 +- 11 files changed, 84 insertions(+), 33 deletions(-) create mode 100644 docs/modules/alignment.rst create mode 100644 docs/modules/alignment/guards.rst rename docs/modules/{ => alignment}/guards/ClassifierExample.png (100%) rename docs/modules/{ => alignment}/guards/getting_started.ipynb (98%) rename docs/modules/{ => alignment}/guards/key_concepts.md (100%) delete mode 100644 docs/modules/guards.rst create mode 100644 langchain/alignment/__init__.py rename langchain/{utilities => alignment}/guards.py (100%) diff --git a/docs/index.rst b/docs/index.rst index 8c05fd186e7..2621cea7cdd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -59,6 +59,8 @@ These modules are, in increasing order of complexity: - `Memory <./modules/memory.html>`_: Memory is the concept of persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory. +- `Alignment <./modules/alignment.html>`_: Alignment in the context of LLMs is the process of steering a LLM towards a desired behavior or outcome. Langchain provides a set of tools to aid in this process. Currently, the primary tools for alignment are "guards" which aim to prevent unwanted output from reaching the user and unwanted user input from reaching the LLM. Alignment is an active area of research and designing LLMs which are aligned at output time not after output filtering is an open problem. + .. toctree:: :maxdepth: 1 @@ -72,7 +74,7 @@ These modules are, in increasing order of complexity: ./modules/chains.md ./modules/agents.md ./modules/memory.md - ./modules/guards.md + ./modules/alignment.md Use Cases ---------- diff --git a/docs/modules/alignment.rst b/docs/modules/alignment.rst new file mode 100644 index 00000000000..b88203a234c --- /dev/null +++ b/docs/modules/alignment.rst @@ -0,0 +1,39 @@ +Alignment +========================== + +Alignment in the context of LLMs is the process of steering a LLM towards a desired behavior or outcome. Alignment tools and methods can be subdivided into three broad categories based on where in the process of user interaction to LLM response they fall: + +- Methods that aim to modify or block user input to ensure an aligned response +- Methods that aim to modify the LLM itself to ensure an aligned response +- Methods that aim to modify or block the LLM response to ensure an aligned response + +Currently, aligning the LLM itself (ensuring that the LLM always gives safe, accurate, and desired responses when prompted) is an open area of research. LangChain provides tools to assist in alignment via the other two methods, limiting user input and limiting LLM responses. + +--------------------- +Guards +--------------------- + +Guards are one way you can work on aligning your applications to prevent unwanted output or abuse. Guards are a set of directives that can be applied to chains, agents, tools, user inputs, and generally any function that outputs a string. Guards are used to prevent a llm reliant function from outputting text that violates some constraint and for preventing a user from inputting text that violates some constraint. For example, a guard can be used to prevent a chain from outputting text that includes profanity or which is in the wrong language. + +One of the main goals of alignment is safety and security. Guards offer some protection in these areas against things like prompt leaking or users attempting to make agents output racist or otherwise offensive content. Guards can also be used for many other things, though. For example, if your application is specific to a certain industry you may add a guard to prevent agents from outputting irrelevant content. + + +- `Getting Started <./alignment/guards/getting_started.html>`_: An overview of different types of guards and how to use them. + +- `Key Concepts <./alignment/guards/key_concepts.html>`_: A conceptual guide going over the various concepts related to guards. + +.. TODO: Probably want to add how-to guides for sentiment model guards! +.. - `How-To Guides <./alignment/llms/how_to_guides.html>`_: A collection of how-to guides. These highlight how to accomplish various objectives with our LLM class, as well as how to integrate with various LLM providers. + +- `Reference <../reference/modules/guards.html>`_: API reference documentation for all Guard classes. + + +.. toctree:: + :maxdepth: 1 + :name: Guards + :hidden: + + ./alignment/guards/getting_started.ipynb + ./alignment/guards/key_concepts.md + Reference<../reference/modules/guards.rst> + diff --git a/docs/modules/alignment/guards.rst b/docs/modules/alignment/guards.rst new file mode 100644 index 00000000000..48a1b5434e2 --- /dev/null +++ b/docs/modules/alignment/guards.rst @@ -0,0 +1,28 @@ +Guards +========================== + +Guards are one way you can work on aligning your applications to prevent unwanted output or abuse. Guards are a set of directives that can be applied to chains, agents, tools, user inputs, and generally any function that outputs a string. Guards are used to prevent a llm reliant function from outputting text that violates some constraint and for preventing a user from inputting text that violates some constraint. For example, a guard can be used to prevent a chain from outputting text that includes profanity or which is in the wrong language. + +One of the main goals of alignment is safety and security. Guards offer some protection in these areas against things like prompt leaking or users attempting to make agents output racist or otherwise offensive content. Guards can also be used for many other things, though. For example, if your application is specific to a certain industry you may add a guard to prevent agents from outputting irrelevant content. + + +The following sections of documentation are provided: + +- `Getting Started <./guards/getting_started.html>`_: An overview of different types of guards and how to use them. + +- `Key Concepts <./guards/key_concepts.html>`_: A conceptual guide going over the various concepts related to guards. + +.. TODO: Probably want to add how-to guides for sentiment model guards! +.. - `How-To Guides <./llms/how_to_guides.html>`_: A collection of how-to guides. These highlight how to accomplish various objectives with our LLM class, as well as how to integrate with various LLM providers. + +- `Reference <../reference/modules/guards.html>`_: API reference documentation for all Guard classes. + + +.. toctree:: + :maxdepth: 1 + :name: Guards + :hidden: + + ./guards/getting_started.ipynb + ./guards/key_concepts.md + Reference<../reference/modules/guards.rst> diff --git a/docs/modules/guards/ClassifierExample.png b/docs/modules/alignment/guards/ClassifierExample.png similarity index 100% rename from docs/modules/guards/ClassifierExample.png rename to docs/modules/alignment/guards/ClassifierExample.png diff --git a/docs/modules/guards/getting_started.ipynb b/docs/modules/alignment/guards/getting_started.ipynb similarity index 98% rename from docs/modules/guards/getting_started.ipynb rename to docs/modules/alignment/guards/getting_started.ipynb index 8c1af657c0d..35f4d480481 100644 --- a/docs/modules/guards/getting_started.ipynb +++ b/docs/modules/alignment/guards/getting_started.ipynb @@ -29,7 +29,7 @@ "outputs": [], "source": [ "from langchain.llms import OpenAI\n", - "from langchain.utilities.guards import RestrictionGuard\n", + "from langchain.alignment import RestrictionGuard\n", "\n", "llm = OpenAI(temperature=0.9)\n", "\n", @@ -105,7 +105,7 @@ "outputs": [], "source": [ "from langchain.prompts import PromptTemplate\n", - "from langchain.utilities.guards import StringGuard\n", + "from langchain.alignment import StringGuard\n", "from langchain.chains import LLMChain\n", "\n", "llm = OpenAI(temperature=0.9)\n", @@ -203,7 +203,7 @@ "outputs": [], "source": [ "from langchain import LLMChain, OpenAI, PromptTemplate\n", - "from langchain.utilities.guards import CustomGuard\n", + "from langchain.alignment import CustomGuard\n", "\n", "llm = OpenAI(temperature=0.9)\n", "\n", diff --git a/docs/modules/guards/key_concepts.md b/docs/modules/alignment/guards/key_concepts.md similarity index 100% rename from docs/modules/guards/key_concepts.md rename to docs/modules/alignment/guards/key_concepts.md diff --git a/docs/modules/guards.rst b/docs/modules/guards.rst deleted file mode 100644 index fedf510cfa6..00000000000 --- a/docs/modules/guards.rst +++ /dev/null @@ -1,27 +0,0 @@ -Guards -========================== - -Guards are a set of directives that can be applies to chains, agents, tools, and generally any function that outputs a string. Guards are used to prevent a llm reliant function from outputting text that violates some constraint. For example, a guard can be used to prevent a chain from outputting text that includes profanity or which is in the wrong language. - -One of the main reasons to use a guard is for security. Guards offer some protection against things like prompt leaking or users attempting to make agents output racist or otherwise offensive content. Guards can be used for many other uses, though. For example, if your application is specific to a certain industry you may add a guard to prevent agents from outputting irrelevant content. - -The following sections of documentation are provided: - -- `Getting Started <./guards/getting_started.html>`_: An overview of different types of guards and how to use them. - -- `Key Concepts <./guards/key_concepts.html>`_: A conceptual guide going over the various concepts related to guards. - -.. TODO: Probably want to add how-to guides for sentiment model guards! -.. - `How-To Guides <./llms/how_to_guides.html>`_: A collection of how-to guides. These highlight how to accomplish various objectives with our LLM class, as well as how to integrate with various LLM providers. - -- `Reference <../reference/modules/guards.html>`_: API reference documentation for all Guard classes. - - -.. toctree:: - :maxdepth: 1 - :name: LLMs - :hidden: - - ./guards/getting_started.ipynb - ./guards/key_concepts.md - Reference<../reference/modules/guards.rst> diff --git a/docs/reference/modules/guards.rst b/docs/reference/modules/guards.rst index 2ee15bbe169..23915a3f2d8 100644 --- a/docs/reference/modules/guards.rst +++ b/docs/reference/modules/guards.rst @@ -1,7 +1,7 @@ Guards =============================== -.. automodule:: langchain.utilities.guards +.. automodule:: langchain.alignment.guards :members: :undoc-members: diff --git a/langchain/alignment/__init__.py b/langchain/alignment/__init__.py new file mode 100644 index 00000000000..f4e0b9d479d --- /dev/null +++ b/langchain/alignment/__init__.py @@ -0,0 +1,9 @@ +"""Alignment Module""" +from langchain.alignment.guards import ( + BaseGuard, + CustomGuard, + RestrictionGuard, + StringGuard, +) + +__all__ = ["BaseGuard", "CustomGuard", "RestrictionGuard", "StringGuard"] diff --git a/langchain/utilities/guards.py b/langchain/alignment/guards.py similarity index 100% rename from langchain/utilities/guards.py rename to langchain/alignment/guards.py diff --git a/tests/unit_tests/utilities/test_guards.py b/tests/unit_tests/utilities/test_guards.py index 31db4a37c31..904ac05bde0 100644 --- a/tests/unit_tests/utilities/test_guards.py +++ b/tests/unit_tests/utilities/test_guards.py @@ -1,6 +1,6 @@ import pytest -from langchain.utilities.guards import CustomGuard, RestrictionGuard, StringGuard +from langchain.alignment.guards import CustomGuard, RestrictionGuard, StringGuard from tests.unit_tests.llms.fake_llm import FakeLLM from typing import List