mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-14 00:47:27 +00:00
langchain[patch]: Mask API key for ForeFrontAI LLM (#14013)
- **Description:** Mask API key for ForeFrontAI LLM and associated unit tests - **Issue:** https://github.com/langchain-ai/langchain/issues/12165 - **Dependencies:** N/A - **Tag maintainer:** @eyurtsev - **Twitter handle:** `__mmahmad__` I made the API key non-optional since linting required adding validation for None, but the key is required per documentation: https://python.langchain.com/docs/integrations/llms/forefrontai
This commit is contained in:
parent
a0e859df51
commit
1600ebe6c7
@ -1,12 +1,12 @@
|
|||||||
from typing import Any, Dict, List, Mapping, Optional
|
from typing import Any, Dict, List, Mapping, Optional
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
from langchain_core.pydantic_v1 import Extra, root_validator
|
from langchain_core.pydantic_v1 import Extra, SecretStr, root_validator
|
||||||
|
|
||||||
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
||||||
from langchain.llms.base import LLM
|
from langchain.llms.base import LLM
|
||||||
from langchain.llms.utils import enforce_stop_tokens
|
from langchain.llms.utils import enforce_stop_tokens
|
||||||
from langchain.utils import get_from_dict_or_env
|
from langchain.utils import convert_to_secret_str, get_from_dict_or_env
|
||||||
|
|
||||||
|
|
||||||
class ForefrontAI(LLM):
|
class ForefrontAI(LLM):
|
||||||
@ -41,7 +41,7 @@ class ForefrontAI(LLM):
|
|||||||
repetition_penalty: int = 1
|
repetition_penalty: int = 1
|
||||||
"""Penalizes repeated tokens according to frequency."""
|
"""Penalizes repeated tokens according to frequency."""
|
||||||
|
|
||||||
forefrontai_api_key: Optional[str] = None
|
forefrontai_api_key: SecretStr = None
|
||||||
|
|
||||||
base_url: Optional[str] = None
|
base_url: Optional[str] = None
|
||||||
"""Base url to use, if None decides based on model name."""
|
"""Base url to use, if None decides based on model name."""
|
||||||
@ -54,10 +54,9 @@ class ForefrontAI(LLM):
|
|||||||
@root_validator()
|
@root_validator()
|
||||||
def validate_environment(cls, values: Dict) -> Dict:
|
def validate_environment(cls, values: Dict) -> Dict:
|
||||||
"""Validate that api key exists in environment."""
|
"""Validate that api key exists in environment."""
|
||||||
forefrontai_api_key = get_from_dict_or_env(
|
values["forefrontai_api_key"] = convert_to_secret_str(
|
||||||
values, "forefrontai_api_key", "FOREFRONTAI_API_KEY"
|
get_from_dict_or_env(values, "forefrontai_api_key", "FOREFRONTAI_API_KEY")
|
||||||
)
|
)
|
||||||
values["forefrontai_api_key"] = forefrontai_api_key
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -102,10 +101,11 @@ class ForefrontAI(LLM):
|
|||||||
|
|
||||||
response = ForefrontAI("Tell me a joke.")
|
response = ForefrontAI("Tell me a joke.")
|
||||||
"""
|
"""
|
||||||
|
auth_value = f"Bearer {self.forefrontai_api_key.get_secret_value()}"
|
||||||
response = requests.post(
|
response = requests.post(
|
||||||
url=self.endpoint_url,
|
url=self.endpoint_url,
|
||||||
headers={
|
headers={
|
||||||
"Authorization": f"Bearer {self.forefrontai_api_key}",
|
"Authorization": auth_value,
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
json={"text": prompt, **self._default_params, **kwargs},
|
json={"text": prompt, **self._default_params, **kwargs},
|
||||||
|
50
libs/langchain/tests/unit_tests/llms/test_forefrontai.py
Normal file
50
libs/langchain/tests/unit_tests/llms/test_forefrontai.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
"""Test ForeFrontAI LLM"""
|
||||||
|
from typing import cast
|
||||||
|
|
||||||
|
from langchain_core.pydantic_v1 import SecretStr
|
||||||
|
from pytest import CaptureFixture, MonkeyPatch
|
||||||
|
|
||||||
|
from langchain.llms.forefrontai import ForefrontAI
|
||||||
|
|
||||||
|
|
||||||
|
def test_forefrontai_api_key_is_secret_string() -> None:
|
||||||
|
"""Test that the API key is stored as a SecretStr."""
|
||||||
|
llm = ForefrontAI(forefrontai_api_key="secret-api-key", temperature=0.2)
|
||||||
|
assert isinstance(llm.forefrontai_api_key, SecretStr)
|
||||||
|
|
||||||
|
|
||||||
|
def test_forefrontai_api_key_masked_when_passed_from_env(
|
||||||
|
monkeypatch: MonkeyPatch, capsys: CaptureFixture
|
||||||
|
) -> None:
|
||||||
|
"""Test that the API key is masked when passed from an environment variable."""
|
||||||
|
monkeypatch.setenv("FOREFRONTAI_API_KEY", "secret-api-key")
|
||||||
|
llm = ForefrontAI(temperature=0.2)
|
||||||
|
print(llm.forefrontai_api_key, end="")
|
||||||
|
captured = capsys.readouterr()
|
||||||
|
|
||||||
|
assert captured.out == "**********"
|
||||||
|
|
||||||
|
|
||||||
|
def test_forefrontai_api_key_masked_when_passed_via_constructor(
|
||||||
|
capsys: CaptureFixture,
|
||||||
|
) -> None:
|
||||||
|
"""Test that the API key is masked when passed via the constructor."""
|
||||||
|
llm = ForefrontAI(
|
||||||
|
forefrontai_api_key="secret-api-key",
|
||||||
|
temperature=0.2,
|
||||||
|
)
|
||||||
|
print(llm.forefrontai_api_key, end="")
|
||||||
|
captured = capsys.readouterr()
|
||||||
|
|
||||||
|
assert captured.out == "**********"
|
||||||
|
|
||||||
|
|
||||||
|
def test_forefrontai_uses_actual_secret_value_from_secretstr() -> None:
|
||||||
|
"""Test that the actual secret value is correctly retrieved."""
|
||||||
|
llm = ForefrontAI(
|
||||||
|
forefrontai_api_key="secret-api-key",
|
||||||
|
temperature=0.2,
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
cast(SecretStr, llm.forefrontai_api_key).get_secret_value() == "secret-api-key"
|
||||||
|
)
|
Loading…
Reference in New Issue
Block a user