refactor: rename to OpaquePrompts (#10013)

Renamed to OpaquePrompts

cc @baskaryan Thanks in advance!
This commit is contained in:
Zizhong Zhang
2023-08-31 12:21:24 -07:00
committed by GitHub
parent 8d66b00c73
commit 641b71e2cd
5 changed files with 60 additions and 60 deletions

View File

@@ -62,6 +62,7 @@ from langchain.llms.mosaicml import MosaicML
from langchain.llms.nlpcloud import NLPCloud
from langchain.llms.octoai_endpoint import OctoAIEndpoint
from langchain.llms.ollama import Ollama
from langchain.llms.opaqueprompts import OpaquePrompts
from langchain.llms.openai import AzureOpenAI, OpenAI, OpenAIChat
from langchain.llms.openllm import OpenLLM
from langchain.llms.openlm import OpenLM
@@ -69,7 +70,6 @@ from langchain.llms.petals import Petals
from langchain.llms.pipelineai import PipelineAI
from langchain.llms.predibase import Predibase
from langchain.llms.predictionguard import PredictionGuard
from langchain.llms.promptguard import PromptGuard
from langchain.llms.promptlayer_openai import PromptLayerOpenAI, PromptLayerOpenAIChat
from langchain.llms.replicate import Replicate
from langchain.llms.rwkv import RWKV
@@ -142,7 +142,7 @@ __all__ = [
"PredictionGuard",
"PromptLayerOpenAI",
"PromptLayerOpenAIChat",
"PromptGuard",
"OpaquePrompts",
"RWKV",
"Replicate",
"SagemakerEndpoint",
@@ -207,7 +207,7 @@ type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
"petals": Petals,
"pipelineai": PipelineAI,
"predibase": Predibase,
"promptguard": PromptGuard,
"opaqueprompts": OpaquePrompts,
"replicate": Replicate,
"rwkv": RWKV,
"sagemaker_endpoint": SagemakerEndpoint,

View File

@@ -10,23 +10,23 @@ from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class PromptGuard(LLM):
"""An LLM wrapper that uses PromptGuard to sanitize prompts.
class OpaquePrompts(LLM):
"""An LLM wrapper that uses OpaquePrompts to sanitize prompts.
Wraps another LLM and sanitizes prompts before passing it to the LLM, then
de-sanitizes the response.
To use, you should have the ``promptguard`` python package installed,
and the environment variable ``PROMPTGUARD_API_KEY`` set with
To use, you should have the ``opaqueprompts`` python package installed,
and the environment variable ``OPAQUEPROMPTS_API_KEY`` set with
your API key, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import PromptGuard
from langchain.llms import OpaquePrompts
from langchain.chat_models import ChatOpenAI
prompt_guard_llm = PromptGuard(base_llm=ChatOpenAI())
op_llm = OpaquePrompts(base_llm=ChatOpenAI())
"""
base_llm: BaseLanguageModel
@@ -39,29 +39,29 @@ class PromptGuard(LLM):
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validates that the PromptGuard API key and the Python package exist."""
"""Validates that the OpaquePrompts API key and the Python package exist."""
try:
import promptguard as pg
import opaqueprompts as op
except ImportError:
raise ImportError(
"Could not import the `promptguard` Python package, "
"please install it with `pip install promptguard`."
"Could not import the `opaqueprompts` Python package, "
"please install it with `pip install opaqueprompts`."
)
if pg.__package__ is None:
if op.__package__ is None:
raise ValueError(
"Could not properly import `promptguard`, "
"promptguard.__package__ is None."
"Could not properly import `opaqueprompts`, "
"opaqueprompts.__package__ is None."
)
api_key = get_from_dict_or_env(
values, "promptguard_api_key", "PROMPTGUARD_API_KEY", default=""
values, "opaqueprompts_api_key", "OPAQUEPROMPTS_API_KEY", default=""
)
if not api_key:
raise ValueError(
"Could not find PROMPTGUARD_API_KEY in the environment. "
"Please set it to your PromptGuard API key."
"You can get it by creating an account on the PromptGuard website: "
"https://promptguard.opaque.co/ ."
"Could not find OPAQUEPROMPTS_API_KEY in the environment. "
"Please set it to your OpaquePrompts API key."
"You can get it by creating an account on the OpaquePrompts website: "
"https://opaqueprompts.opaque.co/ ."
)
return values
@@ -83,14 +83,14 @@ class PromptGuard(LLM):
Example:
.. code-block:: python
response = prompt_guard_llm("Tell me a joke.")
response = op_llm("Tell me a joke.")
"""
import promptguard as pg
import opaqueprompts as op
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
# sanitize the prompt by replacing the sensitive information with a placeholder
sanitize_response: pg.SanitizeResponse = pg.sanitize([prompt])
sanitize_response: op.SanitizeResponse = op.sanitize([prompt])
sanitized_prompt_value_str = sanitize_response.sanitized_texts[0]
# TODO: Add in callbacks once child runs for LLMs are supported by LangSmith.
@@ -101,7 +101,7 @@ class PromptGuard(LLM):
)
# desanitize the response by restoring the original sensitive information
desanitize_response: pg.DesanitizeResponse = pg.desanitize(
desanitize_response: op.DesanitizeResponse = op.desanitize(
llm_response,
secure_context=sanitize_response.secure_context,
)
@@ -113,4 +113,4 @@ class PromptGuard(LLM):
This is an override of the base class method.
"""
return "promptguard"
return "opaqueprompts"

View File

@@ -31,16 +31,16 @@ def sanitize(
The `secure_context` needs to be passed to the `desanitize` function.
"""
try:
import promptguard as pg
import opaqueprompts as op
except ImportError:
raise ImportError(
"Could not import the `promptguard` Python package, "
"please install it with `pip install promptguard`."
"Could not import the `opaqueprompts` Python package, "
"please install it with `pip install opaqueprompts`."
)
if isinstance(input, str):
# the input could be a string, so we sanitize the string
sanitize_response: pg.SanitizeResponse = pg.sanitize([input])
sanitize_response: op.SanitizeResponse = op.sanitize([input])
return {
"sanitized_input": sanitize_response.sanitized_texts[0],
"secure_context": sanitize_response.secure_context,
@@ -55,7 +55,7 @@ def sanitize(
values.append(input[key])
# sanitize the values
sanitize_values_response: pg.SanitizeResponse = pg.sanitize(values)
sanitize_values_response: op.SanitizeResponse = op.sanitize(values)
# reconstruct the dict with the sanitized values
sanitized_input_values = sanitize_values_response.sanitized_texts
@@ -85,13 +85,13 @@ def desanitize(sanitized_text: str, secure_context: bytes) -> str:
De-sanitized text.
"""
try:
import promptguard as pg
import opaqueprompts as op
except ImportError:
raise ImportError(
"Could not import the `promptguard` Python package, "
"please install it with `pip install promptguard`."
"Could not import the `opaqueprompts` Python package, "
"please install it with `pip install opaqueprompts`."
)
desanitize_response: pg.DesanitizeResponse = pg.desanitize(
desanitize_response: op.DesanitizeResponse = op.desanitize(
sanitized_text, secure_context
)
return desanitize_response.desanitized_text

View File

@@ -1,7 +1,7 @@
import langchain.utilities.promptguard as pgf
import langchain.utilities.opaqueprompts as op
from langchain import LLMChain, PromptTemplate
from langchain.llms import OpenAI
from langchain.llms.promptguard import PromptGuard
from langchain.llms.opaqueprompts import OpaquePrompts
from langchain.memory import ConversationBufferWindowMemory
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableMap
@@ -42,10 +42,10 @@ Question: ```{question}```
"""
def test_promptguard() -> None:
def test_opaqueprompts() -> None:
chain = LLMChain(
prompt=PromptTemplate.from_template(prompt_template),
llm=PromptGuard(llm=OpenAI()),
llm=OpaquePrompts(llm=OpenAI()),
memory=ConversationBufferWindowMemory(k=2),
)
@@ -58,11 +58,11 @@ def test_promptguard() -> None:
assert isinstance(output, str)
def test_promptguard_functions() -> None:
def test_opaqueprompts_functions() -> None:
prompt = (PromptTemplate.from_template(prompt_template),)
llm = OpenAI()
pg_chain = (
pgf.sanitize
op.sanitize
| RunnableMap(
{
"response": (lambda x: x["sanitized_input"]) # type: ignore
@@ -72,7 +72,7 @@ def test_promptguard_functions() -> None:
"secure_context": lambda x: x["secure_context"],
}
)
| (lambda x: pgf.desanitize(x["response"], x["secure_context"]))
| (lambda x: op.desanitize(x["response"], x["secure_context"]))
)
pg_chain.invoke(