Compare commits

...

4 Commits

Author SHA1 Message Date
Bagatur
63d30069e4 fmt 2024-03-31 14:49:43 -07:00
Bagatur
70c12f1343 fmt 2024-03-31 14:49:22 -07:00
Jamsheed Mistri
147dfc36f1 docs: Layerup Security docs 2024-03-29 22:44:56 -07:00
Jamsheed Mistri
2fcf736dbd feat: Layerup Security integration 2024-03-29 22:44:51 -07:00
3 changed files with 225 additions and 0 deletions

View File

@@ -0,0 +1,85 @@
# Layerup Security
The [Layerup Security](https://uselayerup.com) integration allows you to secure your calls to any LangChain LLM, LLM chain or LLM agent. The LLM object wraps around any existing LLM object, allowing for a secure layer between your users and your LLMs.
While the Layerup Security object is designed as an LLM, it is not actually an LLM itself, it simply wraps around an LLM, allowing it to adapt the same functionality as the underlying LLM.
## Setup
First, you'll need a Layerup Security account from the Layerup [website](https://uselayerup.com).
Next, create a project via the [dashboard](https://dashboard.uselayerup.com), and copy your API key. We recommend putting your API key in your project's environment.
Install the Layerup Security SDK:
```bash
pip install LayerupSecurity
```
And install LangChain Community:
```bash
pip install langchain-community
```
And now you're ready to start protecting your LLM calls with Layerup Security!
```python
from langchain_community.llms.layerup_security import LayerupSecurity
from langchain_openai import OpenAI
# Create an instance of your favorite LLM
openai = OpenAI(
model_name="gpt-3.5-turbo",
openai_api_key="OPENAI_API_KEY",
)
# Configure Layerup Security
layerup_security = LayerupSecurity(
# Specify a LLM that Layerup Security will wrap around
llm=openai,
# Layerup API key, from the Layerup dashboard
layerup_api_key="LAYERUP_API_KEY",
# Custom base URL, if self hosting
layerup_api_base_url="https://api.uselayerup.com/v1",
# List of guardrails to run on prompts before the LLM is invoked
prompt_guardrails=[],
# List of guardrails to run on responses from the LLM
response_guardrails=["layerup.hallucination"],
# Whether or not to mask the prompt for PII & sensitive data before it is sent to the LLM
mask=False,
# Metadata for abuse tracking, customer tracking, and scope tracking.
metadata={"customer": "example@uselayerup.com"},
# Handler for guardrail violations on the prompt guardrails
handle_prompt_guardrail_violation=(
lambda violation: {
"role": "assistant",
"content": (
"There was sensitive data! I cannot respond. "
"Here's a dynamic canned response. Current date: {}"
).format(datetime.now())
}
if violation["offending_guardrail"] == "layerup.sensitive_data"
else None
),
# Handler for guardrail violations on the response guardrails
handle_response_guardrail_violation=(
lambda violation: {
"role": "assistant",
"content": (
"Custom canned response with dynamic data! "
"The violation rule was {}."
).format(violation["offending_guardrail"])
}
),
)
response = layerup_security.invoke(
"Summarize this message: my name is Bob Dylan. My SSN is 123-45-6789."
)
```

View File

@@ -0,0 +1,96 @@
import logging
from typing import Any, Callable, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.pydantic_v1 import root_validator
logger = logging.getLogger(__name__)
def default_guardrail_violation_handler(violation: dict) -> str:
if violation.get("canned_response"):
return violation["canned_response"]
guardrail_name = (
f"Guardrail {violation.get('offending_guardrail')}"
if violation.get("offending_guardrail")
else "A guardrail"
)
raise ValueError(
f"{guardrail_name} was violated without a proper guardrail violation handler."
)
class LayerupSecurity(LLM):
llm: LLM
layerup_api_key: str
layerup_api_base_url: str = "https://api.uselayerup.com/v1"
prompt_guardrails: Optional[List[str]] = []
response_guardrails: Optional[List[str]] = []
mask: bool = False
metadata: Optional[Dict[str, Any]] = {}
handle_prompt_guardrail_violation: Callable[
[dict], str
] = default_guardrail_violation_handler
handle_response_guardrail_violation: Callable[
[dict], str
] = default_guardrail_violation_handler
client: Any #: :meta private:
@root_validator(pre=True)
def validate_layerup_sdk(cls, values: Dict[str, Any]) -> Dict[str, Any]:
try:
from layerup_security import LayerupSecurity as LayerupSecuritySDK
values["client"] = LayerupSecuritySDK(
api_key=values["layerup_api_key"],
base_url=values["layerup_api_base_url"],
)
except ImportError:
raise ImportError(
"Could not import LayerupSecurity SDK. "
"Please install it with `pip install LayerupSecurity`."
)
return values
@property
def _llm_type(self) -> str:
return "layerup_security"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
messages = [{"role": "user", "content": prompt}]
unmask_response = None
if self.mask:
messages, unmask_response = self.client.mask_prompt(messages, self.metadata)
if self.prompt_guardrails:
security_response = self.client.execute_guardrails(
self.prompt_guardrails, messages, self.metadata
)
if not security_response["all_safe"]:
return self.handle_prompt_guardrail_violation(security_response)
result = self.llm._call(
messages[0]["content"], run_manager=run_manager, **kwargs
)
if self.mask and unmask_response:
result = unmask_response(result)
messages.append({"role": "assistant", "content": result})
if self.response_guardrails:
security_response = self.client.execute_guardrails(
self.response_guardrails, messages, self.metadata
)
if not security_response["all_safe"]:
return self.handle_response_guardrail_violation(security_response)
return result

View File

@@ -0,0 +1,44 @@
from typing import Any, List, Optional
import pytest
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_community.llms.layerup_security import LayerupSecurity
class MockLLM(LLM):
@property
def _llm_type(self) -> str:
return "mock_llm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return "Hi Bob! How are you?"
def test_layerup_security_with_invalid_api_key() -> None:
mock_llm = MockLLM()
layerup_security = LayerupSecurity(
llm=mock_llm,
layerup_api_key="-- invalid API key --",
layerup_api_base_url="https://api.uselayerup.com/v1",
prompt_guardrails=[],
response_guardrails=["layerup.hallucination"],
mask=False,
metadata={"customer": "example@uselayerup.com"},
handle_response_guardrail_violation=(
lambda violation: (
"Custom canned response with dynamic data! "
"The violation rule was {offending_guardrail}."
).format(offending_guardrail=violation["offending_guardrail"])
),
)
with pytest.raises(Exception):
layerup_security.invoke("My name is Bob Dylan. My SSN is 123-45-6789.")