diff --git a/docs/extras/integrations/llms/promptguard.ipynb b/docs/extras/integrations/llms/promptguard.ipynb index 7cc447bbf46..f93244ecad0 100644 --- a/docs/extras/integrations/llms/promptguard.ipynb +++ b/docs/extras/integrations/llms/promptguard.ipynb @@ -39,7 +39,7 @@ "\n", "# Set API keys\n", "\n", - "os.environ['PROMPT_GUARD_API_KEY'] = \"\"\n", + "os.environ['PROMPTGUARD_API_KEY'] = \"\"\n", "os.environ['OPENAI_API_KEY'] = \"\"" ] }, @@ -47,9 +47,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Use PromptGuardLLMWrapper\n", + "# Use PromptGuard LLM Wrapper\n", "\n", - "Applying promptguard to your application could be as simple as wrapping your LLM using the PromptGuardLLMWrapper class by replace `llm=OpenAI()` with `llm=PromptGuardLLMWrapper(OpenAI())`." + "Applying promptguard to your application could be as simple as wrapping your LLM using the PromptGuard class by replace `llm=OpenAI()` with `llm=PromptGuard(base_llm=OpenAI())`." ] }, { @@ -64,7 +64,7 @@ "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferWindowMemory\n", "\n", - "from langchain.llms import PromptGuardLLMWrapper\n", + "from langchain.llms import PromptGuard\n", "\n", "langchain.verbose = True\n", "langchain.debug = True\n", @@ -106,7 +106,7 @@ "\n", "chain = LLMChain(\n", " prompt=PromptTemplate.from_template(prompt_template),\n", - " llm=PromptGuardLLMWrapper(llm=OpenAI()),\n", + " llm=PromptGuard(base_llm=OpenAI()),\n", " memory=ConversationBufferWindowMemory(k=2),\n", " verbose=True,\n", ")\n", diff --git a/libs/langchain/langchain/llms/promptguard.py b/libs/langchain/langchain/llms/promptguard.py index a6e85974657..9f99ff40193 100644 --- a/libs/langchain/langchain/llms/promptguard.py +++ b/libs/langchain/langchain/llms/promptguard.py @@ -23,10 +23,10 @@ class PromptGuard(LLM): Example: .. code-block:: python - from langchain.llms import PromptGuardLLM + from langchain.llms import PromptGuard from langchain.chat_models import ChatOpenAI - prompt_guard_llm = PromptGuardLLM(base_llm=ChatOpenAI()) + prompt_guard_llm = PromptGuard(base_llm=ChatOpenAI()) """ base_llm: BaseLanguageModel