ibm: added partners package langchain_ibm, added llm (#16512)

- **Description:** Added `langchain_ibm` as an langchain partners
package of IBM [watsonx.ai](https://www.ibm.com/products/watsonx-ai) LLM
provider (`WatsonxLLM`)
- **Dependencies:**
[ibm-watsonx-ai](https://pypi.org/project/ibm-watsonx-ai/),
  - **Tag maintainer:** : 
---------

Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
Mateusz Szewczyk 2024-02-14 21:12:19 +01:00 committed by GitHub
parent f6d3a3546f
commit 916332ef5b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 1906 additions and 4 deletions

View File

@ -62,6 +62,8 @@ jobs:
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }}
WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }}
WATSONX_PROJECT_ID: ${{ secrets.WATSONX_PROJECT_ID }}
PINECONE_API_KEY: ${{ secrets.PINECONE_API_KEY }}
PINECONE_ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
run: |

View File

@ -182,6 +182,8 @@ jobs:
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }}
WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }}
WATSONX_PROJECT_ID: ${{ secrets.WATSONX_PROJECT_ID }}
PINECONE_API_KEY: ${{ secrets.PINECONE_API_KEY }}
PINECONE_ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
run: make integration_tests

View File

@ -109,7 +109,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.llms import WatsonxLLM\n",
"from langchain_ibm import WatsonxLLM\n",
"\n",
"watsonx_llm = WatsonxLLM(\n",
" model_id=\"google/flan-ul2\",\n",

View File

@ -2,6 +2,7 @@ import logging
import os
from typing import Any, Dict, Iterator, List, Mapping, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
@ -11,6 +12,9 @@ from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
logger = logging.getLogger(__name__)
@deprecated(
since="0.0.18", removal="0.2", alternative_import="langchain_ibm.WatsonxLLM"
)
class WatsonxLLM(BaseLLM):
"""
IBM watsonx.ai large language models.
@ -293,9 +297,6 @@ class WatsonxLLM(BaseLLM):
generation_info=dict(
finish_reason=stream_response["results"][0].get("stop_reason", None),
llm_output={
"generated_token_count": stream_response["results"][0].get(
"generated_token_count", None
),
"model_id": self.model_id,
"deployment_id": self.deployment_id,
},

1
libs/partners/ibm/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
__pycache__

21
libs/partners/ibm/LICENSE Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2023 LangChain, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,58 @@
.PHONY: all format lint test tests integration_tests docker_tests help extended_tests
# Default target executed when no arguments are given to make.
all: help
# Define a variable for the test file path.
TEST_FILE ?= tests/unit_tests/
integration_test integration_tests: TEST_FILE=tests/integration_tests/
test tests integration_test integration_tests:
poetry run pytest $(TEST_FILE)
######################
# LINTING AND FORMATTING
######################
# Define a variable for Python and notebook files.
PYTHON_FILES=.
MYPY_CACHE=.mypy_cache
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/partners/ibm --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
lint_package: PYTHON_FILES=langchain_ibm
lint_tests: PYTHON_FILES=tests
lint_tests: MYPY_CACHE=.mypy_cache_test
lint lint_diff lint_package lint_tests:
poetry run ruff .
poetry run ruff format $(PYTHON_FILES) --diff
poetry run ruff --select I $(PYTHON_FILES)
mkdir $(MYPY_CACHE); poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)
format format_diff:
poetry run ruff format $(PYTHON_FILES)
poetry run ruff --select I --fix $(PYTHON_FILES)
spell_check:
poetry run codespell --toml pyproject.toml
spell_fix:
poetry run codespell --toml pyproject.toml -w
check_imports: $(shell find langchain_ibm -name '*.py')
poetry run python ./scripts/check_imports.py $^
######################
# HELP
######################
help:
@echo '----'
@echo 'check_imports - check imports'
@echo 'format - run code formatters'
@echo 'lint - run linters'
@echo 'test - run unit tests'
@echo 'tests - run unit tests'
@echo 'test TEST_FILE=<test_file> - run all tests in file'

View File

@ -0,0 +1,18 @@
# langchain-ibm
This package contains the LangChain integrations for IBM watsonx.ai through their `ibm-watsonx-ai` SDK.
## Installation and Setup
- Install the LangChain partner package
```bash
pip install langchain-ibm
```
## LLM
See a [usage example](https://python.langchain.com/docs/integrations/llms/watsonxllm).
```python
from langchain_ibm import WatsonxLLM
```

View File

@ -0,0 +1,3 @@
from langchain_ibm.llms import WatsonxLLM
__all__ = ["WatsonxLLM"]

View File

@ -0,0 +1,387 @@
import logging
import os
from typing import Any, Dict, Iterator, List, Mapping, Optional, Union
from ibm_watsonx_ai.foundation_models import ModelInference # type: ignore
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.pydantic_v1 import Extra, SecretStr, root_validator
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
logger = logging.getLogger(__name__)
class WatsonxLLM(BaseLLM):
"""
IBM watsonx.ai large language models.
To use, you should have ``ibm_watsonx_ai`` python package installed,
and the environment variable ``WATSONX_APIKEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from ibm_watsonx_ai.metanames import GenTextParamsMetaNames
parameters = {
GenTextParamsMetaNames.DECODING_METHOD: "sample",
GenTextParamsMetaNames.MAX_NEW_TOKENS: 100,
GenTextParamsMetaNames.MIN_NEW_TOKENS: 1,
GenTextParamsMetaNames.TEMPERATURE: 0.5,
GenTextParamsMetaNames.TOP_K: 50,
GenTextParamsMetaNames.TOP_P: 1,
}
from langchain_ibm import WatsonxLLM
watsonx_llm = WatsonxLLM(
model_id="google/flan-ul2",
url="https://us-south.ml.cloud.ibm.com",
apikey="*****",
project_id="*****",
params=parameters,
)
"""
model_id: str = ""
"""Type of model to use."""
deployment_id: str = ""
"""Type of deployed model to use."""
project_id: str = ""
"""ID of the Watson Studio project."""
space_id: str = ""
"""ID of the Watson Studio space."""
url: Optional[SecretStr] = None
"""Url to Watson Machine Learning instance"""
apikey: Optional[SecretStr] = None
"""Apikey to Watson Machine Learning instance"""
token: Optional[SecretStr] = None
"""Token to Watson Machine Learning instance"""
password: Optional[SecretStr] = None
"""Password to Watson Machine Learning instance"""
username: Optional[SecretStr] = None
"""Username to Watson Machine Learning instance"""
instance_id: Optional[SecretStr] = None
"""Instance_id of Watson Machine Learning instance"""
version: Optional[SecretStr] = None
"""Version of Watson Machine Learning instance"""
params: Optional[dict] = None
"""Model parameters to use during generate requests."""
verify: Union[str, bool] = ""
"""User can pass as verify one of following:
the path to a CA_BUNDLE file
the path of directory with certificates of trusted CAs
True - default path to truststore will be taken
False - no verification will be made"""
streaming: bool = False
""" Whether to stream the results or not. """
watsonx_model: Any
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"url": "WATSONX_URL",
"apikey": "WATSONX_APIKEY",
"token": "WATSONX_TOKEN",
"password": "WATSONX_PASSWORD",
"username": "WATSONX_USERNAME",
"instance_id": "WATSONX_INSTANCE_ID",
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that credentials and python package exists in environment."""
values["url"] = convert_to_secret_str(
get_from_dict_or_env(values, "url", "WATSONX_URL")
)
if "cloud.ibm.com" in values.get("url", "").get_secret_value():
values["apikey"] = convert_to_secret_str(
get_from_dict_or_env(values, "apikey", "WATSONX_APIKEY")
)
else:
if (
not values["token"]
and "WATSONX_TOKEN" not in os.environ
and not values["password"]
and "WATSONX_PASSWORD" not in os.environ
and not values["apikey"]
and "WATSONX_APIKEY" not in os.environ
):
raise ValueError(
"Did not find 'token', 'password' or 'apikey',"
" please add an environment variable"
" `WATSONX_TOKEN`, 'WATSONX_PASSWORD' or 'WATSONX_APIKEY' "
"which contains it,"
" or pass 'token', 'password' or 'apikey'"
" as a named parameter."
)
elif values["token"] or "WATSONX_TOKEN" in os.environ:
values["token"] = convert_to_secret_str(
get_from_dict_or_env(values, "token", "WATSONX_TOKEN")
)
elif values["password"] or "WATSONX_PASSWORD" in os.environ:
values["password"] = convert_to_secret_str(
get_from_dict_or_env(values, "password", "WATSONX_PASSWORD")
)
values["username"] = convert_to_secret_str(
get_from_dict_or_env(values, "username", "WATSONX_USERNAME")
)
elif values["apikey"] or "WATSONX_APIKEY" in os.environ:
values["apikey"] = convert_to_secret_str(
get_from_dict_or_env(values, "apikey", "WATSONX_APIKEY")
)
values["username"] = convert_to_secret_str(
get_from_dict_or_env(values, "username", "WATSONX_USERNAME")
)
if not values["instance_id"] or "WATSONX_INSTANCE_ID" not in os.environ:
values["instance_id"] = convert_to_secret_str(
get_from_dict_or_env(values, "instance_id", "WATSONX_INSTANCE_ID")
)
credentials = {
"url": values["url"].get_secret_value() if values["url"] else None,
"apikey": values["apikey"].get_secret_value() if values["apikey"] else None,
"token": values["token"].get_secret_value() if values["token"] else None,
"password": values["password"].get_secret_value()
if values["password"]
else None,
"username": values["username"].get_secret_value()
if values["username"]
else None,
"instance_id": values["instance_id"].get_secret_value()
if values["instance_id"]
else None,
"version": values["version"].get_secret_value()
if values["version"]
else None,
}
credentials_without_none_value = {
key: value for key, value in credentials.items() if value is not None
}
watsonx_model = ModelInference(
model_id=values["model_id"],
deployment_id=values["deployment_id"],
credentials=credentials_without_none_value,
params=values["params"],
project_id=values["project_id"],
space_id=values["space_id"],
verify=values["verify"],
)
values["watsonx_model"] = watsonx_model
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_id": self.model_id,
"deployment_id": self.deployment_id,
"params": self.params,
"project_id": self.project_id,
"space_id": self.space_id,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "IBM watsonx.ai"
@staticmethod
def _extract_token_usage(
response: Optional[List[Dict[str, Any]]] = None,
) -> Dict[str, Any]:
if response is None:
return {"generated_token_count": 0, "input_token_count": 0}
input_token_count = 0
generated_token_count = 0
def get_count_value(key: str, result: Dict[str, Any]) -> int:
return result.get(key, 0) or 0
for res in response:
results = res.get("results")
if results:
input_token_count += get_count_value("input_token_count", results[0])
generated_token_count += get_count_value(
"generated_token_count", results[0]
)
return {
"generated_token_count": generated_token_count,
"input_token_count": input_token_count,
}
def _get_chat_params(
self, stop: Optional[List[str]] = None
) -> Optional[Dict[str, Any]]:
params: Optional[Dict[str, Any]] = {**self.params} if self.params else None
if stop is not None:
params = (params or {}) | {"stop_sequences": stop}
return params
def _create_llm_result(self, response: List[dict]) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for res in response:
results = res.get("results")
if results:
finish_reason = results[0].get("stop_reason")
gen = Generation(
text=results[0].get("generated_text"),
generation_info={"finish_reason": finish_reason},
)
generations.append([gen])
final_token_usage = self._extract_token_usage(response)
llm_output = {
"token_usage": final_token_usage,
"model_id": self.model_id,
"deployment_id": self.deployment_id,
}
return LLMResult(generations=generations, llm_output=llm_output)
def _stream_response_to_generation_chunk(
self,
stream_response: Dict[str, Any],
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
if not stream_response["results"]:
return GenerationChunk(text="")
return GenerationChunk(
text=stream_response["results"][0]["generated_text"],
generation_info=dict(
finish_reason=stream_response["results"][0].get("stop_reason", None),
llm_output={
"model_id": self.model_id,
"deployment_id": self.deployment_id,
},
),
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the IBM watsonx.ai inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = watsonx_llm("What is a molecule")
"""
result = self._generate(
prompts=[prompt], stop=stop, run_manager=run_manager, **kwargs
)
return result.generations[0][0].text
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> LLMResult:
"""Call the IBM watsonx.ai inference endpoint which then generate the response.
Args:
prompts: List of strings (prompts) to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The full LLMResult output.
Example:
.. code-block:: python
response = watsonx_llm.generate(["What is a molecule"])
"""
params = self._get_chat_params(stop=stop)
should_stream = stream if stream is not None else self.streaming
if should_stream:
if len(prompts) > 1:
raise ValueError(
f"WatsonxLLM currently only supports single prompt, got {prompts}"
)
generation = GenerationChunk(text="")
stream_iter = self._stream(
prompts[0], stop=stop, run_manager=run_manager, **kwargs
)
for chunk in stream_iter:
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
if isinstance(generation.generation_info, dict):
llm_output = generation.generation_info.pop("llm_output")
return LLMResult(generations=[[generation]], llm_output=llm_output)
return LLMResult(generations=[[generation]])
else:
response = self.watsonx_model.generate(prompt=prompts, params=params)
return self._create_llm_result(response)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call the IBM watsonx.ai inference endpoint which then streams the response.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The iterator which yields generation chunks.
Example:
.. code-block:: python
response = watsonx_llm.stream("What is a molecule")
for chunk in response:
print(chunk, end='')
"""
params = self._get_chat_params(stop=stop)
for stream_resp in self.watsonx_model.generate_text_stream(
prompt=prompt, raw_response=True, params=params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
chunk = self._stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)

View File

1095
libs/partners/ibm/poetry.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,96 @@
[tool.poetry]
name = "langchain-ibm"
version = "0.0.1"
description = "An integration package connecting IBM watsonx.ai and LangChain"
authors = ["IBM"]
readme = "README.md"
repository = "https://github.com/langchain-ai/langchain"
license = "MIT"
[tool.poetry.urls]
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/ibm"
[tool.poetry.dependencies]
python = ">=3.10,<4.0"
langchain-core = ">=0.1.22,<0.2"
ibm-watsonx-ai = "^0.1.4"
[tool.poetry.group.test]
optional = true
[tool.poetry.group.test.dependencies]
pytest = "^7.3.0"
freezegun = "^1.2.2"
pytest-mock = "^3.10.0"
syrupy = "^4.0.2"
pytest-watcher = "^0.3.4"
pytest-asyncio = "^0.21.1"
langchain-core = {path = "../../core", develop = true}
[tool.poetry.group.codespell]
optional = true
[tool.poetry.group.codespell.dependencies]
codespell = "^2.2.0"
[tool.poetry.group.test_integration]
optional = true
[tool.poetry.group.test_integration.dependencies]
[tool.poetry.group.lint]
optional = true
[tool.poetry.group.lint.dependencies]
ruff = "^0.1.5"
[tool.poetry.group.typing.dependencies]
mypy = "^0.991"
langchain-core = {path = "../../core", develop = true}
types-requests = "^2"
[tool.poetry.group.dev]
optional = true
[tool.poetry.group.dev.dependencies]
langchain-core = {path = "../../core", develop = true}
[tool.ruff]
select = [
"E", # pycodestyle
"F", # pyflakes
"I", # isort
]
[tool.mypy]
disallow_untyped_defs = "True"
[tool.coverage.run]
omit = [
"tests/*",
]
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.pytest.ini_options]
# --strict-markers will raise errors on unknown marks.
# https://docs.pytest.org/en/7.1.x/how-to/mark.html#raising-errors-on-unknown-marks
#
# https://docs.pytest.org/en/7.1.x/reference/reference.html
# --strict-config any warnings encountered while parsing the `pytest`
# section of the configuration file raise errors.
#
# https://github.com/tophat/syrupy
# --snapshot-warn-unused Prints a warning on unused snapshots rather than fail the test suite.
addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5"
# Registering custom markers.
# https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers
markers = [
"requires: mark tests as requiring a specific library",
"asyncio: mark tests as requiring asyncio",
"compile: mark placeholder test used to compile integration tests without running them",
"scheduled: mark tests to run in scheduled testing",
]
asyncio_mode = "auto"

View File

@ -0,0 +1,17 @@
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_faillure = True
print(file)
traceback.print_exc()
print()
sys.exit(1 if has_failure else 0)

View File

@ -0,0 +1,27 @@
#!/bin/bash
#
# This script searches for lines starting with "import pydantic" or "from pydantic"
# in tracked files within a Git repository.
#
# Usage: ./scripts/check_pydantic.sh /path/to/repository
# Check if a path argument is provided
if [ $# -ne 1 ]; then
echo "Usage: $0 /path/to/repository"
exit 1
fi
repository_path="$1"
# Search for lines matching the pattern within the specified repository
result=$(git -C "$repository_path" grep -E '^import pydantic|^from pydantic')
# Check if any matching lines were found
if [ -n "$result" ]; then
echo "ERROR: The following lines need to be updated:"
echo "$result"
echo "Please replace the code with an import from langchain_core.pydantic_v1."
echo "For example, replace 'from pydantic import BaseModel'"
echo "with 'from langchain_core.pydantic_v1 import BaseModel'"
exit 1
fi

View File

@ -0,0 +1,17 @@
#!/bin/bash
set -eu
# Initialize a variable to keep track of errors
errors=0
# make sure not importing from langchain or langchain_experimental
git --no-pager grep '^from langchain\.' . && errors=$((errors+1))
git --no-pager grep '^from langchain_experimental\.' . && errors=$((errors+1))
# Decide on an exit status based on the errors
if [ "$errors" -gt 0 ]; then
exit 1
else
exit 0
fi

View File

View File

@ -0,0 +1,7 @@
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass

View File

@ -0,0 +1,83 @@
"""Test WatsonxLLM API wrapper.
You'll need to set WATSONX_APIKEY and WATSONX_PROJECT_ID environment variables.
"""
import os
from langchain_core.outputs import LLMResult
from langchain_ibm import WatsonxLLM
PROJECT_ID = os.environ.get("WATSONX_PROJECT_ID", "")
def test_watsonxllm_invoke() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
url="https://us-south.ml.cloud.ibm.com",
project_id=PROJECT_ID,
)
response = watsonxllm.invoke("What color sunflower is?")
assert isinstance(response, str)
assert len(response) > 0
def test_watsonxllm_generate() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
url="https://us-south.ml.cloud.ibm.com",
project_id=PROJECT_ID,
)
response = watsonxllm.generate(["What color sunflower is?"])
response_text = response.generations[0][0].text
assert isinstance(response, LLMResult)
assert len(response_text) > 0
def test_watsonxllm_generate_with_multiple_prompts() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
url="https://us-south.ml.cloud.ibm.com",
project_id=PROJECT_ID,
)
response = watsonxllm.generate(
["What color sunflower is?", "What color turtle is?"]
)
response_text = response.generations[0][0].text
assert isinstance(response, LLMResult)
assert len(response_text) > 0
def test_watsonxllm_generate_stream() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
url="https://us-south.ml.cloud.ibm.com",
project_id=PROJECT_ID,
)
response = watsonxllm.generate(["What color sunflower is?"], stream=True)
response_text = response.generations[0][0].text
assert isinstance(response, LLMResult)
assert len(response_text) > 0
def test_watsonxllm_stream() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
url="https://us-south.ml.cloud.ibm.com",
project_id=PROJECT_ID,
)
response = watsonxllm.invoke("What color sunflower is?")
stream_response = watsonxllm.stream("What color sunflower is?")
linked_text_stream = ""
for chunk in stream_response:
assert isinstance(
chunk, str
), f"chunk expect type '{str}', actual '{type(chunk)}'"
linked_text_stream += chunk
assert (
response == linked_text_stream
), "Linked text stream are not the same as generated text"

View File

@ -0,0 +1,7 @@
from langchain_ibm import __all__
EXPECTED_ALL = ["WatsonxLLM"]
def test_all_imports() -> None:
assert sorted(EXPECTED_ALL) == sorted(__all__)

View File

@ -0,0 +1,60 @@
"""Test WatsonxLLM API wrapper."""
import os
from langchain_ibm import WatsonxLLM
os.environ.pop("WATSONX_APIKEY", None)
os.environ.pop("WATSONX_PROJECT_ID", None)
def test_initialize_watsonxllm_bad_path_without_url() -> None:
try:
WatsonxLLM(
model_id="google/flan-ul2",
)
except ValueError as e:
assert "WATSONX_URL" in e.__str__()
def test_initialize_watsonxllm_cloud_bad_path() -> None:
try:
WatsonxLLM(model_id="google/flan-ul2", url="https://us-south.ml.cloud.ibm.com")
except ValueError as e:
assert "WATSONX_APIKEY" in e.__str__()
def test_initialize_watsonxllm_cpd_bad_path_without_all() -> None:
try:
WatsonxLLM(
model_id="google/flan-ul2",
url="https://cpd-zen.apps.cpd48.cp.fyre.ibm.com",
)
except ValueError as e:
assert (
"WATSONX_APIKEY" in e.__str__()
and "WATSONX_PASSWORD" in e.__str__()
and "WATSONX_TOKEN" in e.__str__()
)
def test_initialize_watsonxllm_cpd_bad_path_password_without_username() -> None:
try:
WatsonxLLM(
model_id="google/flan-ul2",
url="https://cpd-zen.apps.cpd48.cp.fyre.ibm.com",
password="test_password",
)
except ValueError as e:
assert "WATSONX_USERNAME" in e.__str__()
def test_initialize_watsonxllm_cpd_bad_path_apikey_without_username() -> None:
try:
WatsonxLLM(
model_id="google/flan-ul2",
url="https://cpd-zen.apps.cpd48.cp.fyre.ibm.com",
apikey="test_apikey",
)
except ValueError as e:
assert "WATSONX_USERNAME" in e.__str__()