mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-15 06:26:12 +00:00
community[major], core[patch], langchain[patch], experimental[patch]: Create langchain-community (#14463)
Moved the following modules to new package langchain-community in a backwards compatible fashion: ``` mv langchain/langchain/adapters community/langchain_community mv langchain/langchain/callbacks community/langchain_community/callbacks mv langchain/langchain/chat_loaders community/langchain_community mv langchain/langchain/chat_models community/langchain_community mv langchain/langchain/document_loaders community/langchain_community mv langchain/langchain/docstore community/langchain_community mv langchain/langchain/document_transformers community/langchain_community mv langchain/langchain/embeddings community/langchain_community mv langchain/langchain/graphs community/langchain_community mv langchain/langchain/llms community/langchain_community mv langchain/langchain/memory/chat_message_histories community/langchain_community mv langchain/langchain/retrievers community/langchain_community mv langchain/langchain/storage community/langchain_community mv langchain/langchain/tools community/langchain_community mv langchain/langchain/utilities community/langchain_community mv langchain/langchain/vectorstores community/langchain_community mv langchain/langchain/agents/agent_toolkits community/langchain_community mv langchain/langchain/cache.py community/langchain_community mv langchain/langchain/adapters community/langchain_community mv langchain/langchain/callbacks community/langchain_community/callbacks mv langchain/langchain/chat_loaders community/langchain_community mv langchain/langchain/chat_models community/langchain_community mv langchain/langchain/document_loaders community/langchain_community mv langchain/langchain/docstore community/langchain_community mv langchain/langchain/document_transformers community/langchain_community mv langchain/langchain/embeddings community/langchain_community mv langchain/langchain/graphs community/langchain_community mv langchain/langchain/llms community/langchain_community mv langchain/langchain/memory/chat_message_histories community/langchain_community mv langchain/langchain/retrievers community/langchain_community mv langchain/langchain/storage community/langchain_community mv langchain/langchain/tools community/langchain_community mv langchain/langchain/utilities community/langchain_community mv langchain/langchain/vectorstores community/langchain_community mv langchain/langchain/agents/agent_toolkits community/langchain_community mv langchain/langchain/cache.py community/langchain_community ``` Moved the following to core ``` mv langchain/langchain/utils/json_schema.py core/langchain_core/utils mv langchain/langchain/utils/html.py core/langchain_core/utils mv langchain/langchain/utils/strings.py core/langchain_core/utils cat langchain/langchain/utils/env.py >> core/langchain_core/utils/env.py rm langchain/langchain/utils/env.py ``` See .scripts/community_split/script_integrations.sh for all changes
This commit is contained in:
116
libs/community/langchain_community/llms/opaqueprompts.py
Normal file
116
libs/community/langchain_community/llms/opaqueprompts.py
Normal file
@@ -0,0 +1,116 @@
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.language_models.llms import LLM
|
||||
from langchain_core.pydantic_v1 import Extra, root_validator
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OpaquePrompts(LLM):
|
||||
"""An LLM wrapper that uses OpaquePrompts to sanitize prompts.
|
||||
|
||||
Wraps another LLM and sanitizes prompts before passing it to the LLM, then
|
||||
de-sanitizes the response.
|
||||
|
||||
To use, you should have the ``opaqueprompts`` python package installed,
|
||||
and the environment variable ``OPAQUEPROMPTS_API_KEY`` set with
|
||||
your API key, or pass it as a named parameter to the constructor.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.llms import OpaquePrompts
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
|
||||
op_llm = OpaquePrompts(base_llm=ChatOpenAI())
|
||||
"""
|
||||
|
||||
base_llm: BaseLanguageModel
|
||||
"""The base LLM to use."""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validates that the OpaquePrompts API key and the Python package exist."""
|
||||
try:
|
||||
import opaqueprompts as op
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import the `opaqueprompts` Python package, "
|
||||
"please install it with `pip install opaqueprompts`."
|
||||
)
|
||||
if op.__package__ is None:
|
||||
raise ValueError(
|
||||
"Could not properly import `opaqueprompts`, "
|
||||
"opaqueprompts.__package__ is None."
|
||||
)
|
||||
|
||||
api_key = get_from_dict_or_env(
|
||||
values, "opaqueprompts_api_key", "OPAQUEPROMPTS_API_KEY", default=""
|
||||
)
|
||||
if not api_key:
|
||||
raise ValueError(
|
||||
"Could not find OPAQUEPROMPTS_API_KEY in the environment. "
|
||||
"Please set it to your OpaquePrompts API key."
|
||||
"You can get it by creating an account on the OpaquePrompts website: "
|
||||
"https://opaqueprompts.opaque.co/ ."
|
||||
)
|
||||
return values
|
||||
|
||||
def _call(
|
||||
self,
|
||||
prompt: str,
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
"""Call base LLM with sanitization before and de-sanitization after.
|
||||
|
||||
Args:
|
||||
prompt: The prompt to pass into the model.
|
||||
|
||||
Returns:
|
||||
The string generated by the model.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
response = op_llm("Tell me a joke.")
|
||||
"""
|
||||
import opaqueprompts as op
|
||||
|
||||
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
|
||||
|
||||
# sanitize the prompt by replacing the sensitive information with a placeholder
|
||||
sanitize_response: op.SanitizeResponse = op.sanitize([prompt])
|
||||
sanitized_prompt_value_str = sanitize_response.sanitized_texts[0]
|
||||
|
||||
# TODO: Add in callbacks once child runs for LLMs are supported by LangSmith.
|
||||
# call the LLM with the sanitized prompt and get the response
|
||||
llm_response = self.base_llm.predict(
|
||||
sanitized_prompt_value_str,
|
||||
stop=stop,
|
||||
)
|
||||
|
||||
# desanitize the response by restoring the original sensitive information
|
||||
desanitize_response: op.DesanitizeResponse = op.desanitize(
|
||||
llm_response,
|
||||
secure_context=sanitize_response.secure_context,
|
||||
)
|
||||
return desanitize_response.desanitized_text
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
"""Return type of LLM.
|
||||
|
||||
This is an override of the base class method.
|
||||
"""
|
||||
return "opaqueprompts"
|
Reference in New Issue
Block a user