mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-15 17:33:53 +00:00
x
This commit is contained in:
parent
4b641f87ae
commit
796da053c6
@ -1,155 +0,0 @@
|
|||||||
"""__ModuleName__ large language models."""
|
|
||||||
|
|
||||||
from typing import (
|
|
||||||
Any,
|
|
||||||
List,
|
|
||||||
Optional,
|
|
||||||
)
|
|
||||||
|
|
||||||
from langchain_core.callbacks import (
|
|
||||||
CallbackManagerForLLMRun,
|
|
||||||
)
|
|
||||||
from langchain_core.language_models import BaseLLM
|
|
||||||
from langchain_core.outputs import LLMResult
|
|
||||||
|
|
||||||
|
|
||||||
class __ModuleName__LLM(BaseLLM):
|
|
||||||
"""__ModuleName__ completion model integration.
|
|
||||||
|
|
||||||
# TODO: Replace with relevant packages, env vars.
|
|
||||||
Setup:
|
|
||||||
Install ``__package_name__`` and set environment variable ``__MODULE_NAME___API_KEY``.
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
pip install -U __package_name__
|
|
||||||
export __MODULE_NAME___API_KEY="your-api-key"
|
|
||||||
|
|
||||||
# TODO: Populate with relevant params.
|
|
||||||
Key init args — completion params:
|
|
||||||
model: str
|
|
||||||
Name of __ModuleName__ model to use.
|
|
||||||
temperature: float
|
|
||||||
Sampling temperature.
|
|
||||||
max_tokens: Optional[int]
|
|
||||||
Max number of tokens to generate.
|
|
||||||
|
|
||||||
# TODO: Populate with relevant params.
|
|
||||||
Key init args — client params:
|
|
||||||
timeout: Optional[float]
|
|
||||||
Timeout for requests.
|
|
||||||
max_retries: int
|
|
||||||
Max number of retries.
|
|
||||||
api_key: Optional[str]
|
|
||||||
__ModuleName__ API key. If not passed in will be read from env var __MODULE_NAME___API_KEY.
|
|
||||||
|
|
||||||
See full list of supported init args and their descriptions in the params section.
|
|
||||||
|
|
||||||
# TODO: Replace with relevant init params.
|
|
||||||
Instantiate:
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
from __module_name__ import __ModuleName__LLM
|
|
||||||
|
|
||||||
llm = __ModuleName__LLM(
|
|
||||||
model="...",
|
|
||||||
temperature=0,
|
|
||||||
max_tokens=None,
|
|
||||||
timeout=None,
|
|
||||||
max_retries=2,
|
|
||||||
# api_key="...",
|
|
||||||
# other params...
|
|
||||||
)
|
|
||||||
|
|
||||||
Invoke:
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
input_text = "The meaning of life is "
|
|
||||||
llm.invoke(input_text)
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
# TODO: Example output.
|
|
||||||
|
|
||||||
# TODO: Delete if token-level streaming isn't supported.
|
|
||||||
Stream:
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
for chunk in llm.stream(input_text):
|
|
||||||
print(chunk)
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
# TODO: Example output.
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
''.join(llm.stream(input_text))
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
# TODO: Example output.
|
|
||||||
|
|
||||||
# TODO: Delete if native async isn't supported.
|
|
||||||
Async:
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
await llm.ainvoke(input_text)
|
|
||||||
|
|
||||||
# stream:
|
|
||||||
# async for chunk in (await llm.astream(input_text))
|
|
||||||
|
|
||||||
# batch:
|
|
||||||
# await llm.abatch([input_text])
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
# TODO: Example output.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# TODO: This method must be implemented to generate text completions.
|
|
||||||
def _generate(
|
|
||||||
self,
|
|
||||||
prompts: List[str],
|
|
||||||
stop: Optional[List[str]] = None,
|
|
||||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
||||||
**kwargs: Any,
|
|
||||||
) -> LLMResult:
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
# TODO: Implement if __ModuleName__LLM supports async generation. Otherwise
|
|
||||||
# delete method.
|
|
||||||
# async def _agenerate(
|
|
||||||
# self,
|
|
||||||
# prompts: List[str],
|
|
||||||
# stop: Optional[List[str]] = None,
|
|
||||||
# run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
|
||||||
# **kwargs: Any,
|
|
||||||
# ) -> LLMResult:
|
|
||||||
# raise NotImplementedError
|
|
||||||
|
|
||||||
# TODO: Implement if __ModuleName__LLM supports streaming. Otherwise delete method.
|
|
||||||
# def _stream(
|
|
||||||
# self,
|
|
||||||
# prompt: str,
|
|
||||||
# stop: Optional[List[str]] = None,
|
|
||||||
# run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
||||||
# **kwargs: Any,
|
|
||||||
# ) -> Iterator[GenerationChunk]:
|
|
||||||
# raise NotImplementedError
|
|
||||||
|
|
||||||
# TODO: Implement if __ModuleName__LLM supports async streaming. Otherwise delete
|
|
||||||
# method.
|
|
||||||
# async def _astream(
|
|
||||||
# self,
|
|
||||||
# prompt: str,
|
|
||||||
# stop: Optional[List[str]] = None,
|
|
||||||
# run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
|
||||||
# **kwargs: Any,
|
|
||||||
# ) -> AsyncIterator[GenerationChunk]:
|
|
||||||
# raise NotImplementedError
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _llm_type(self) -> str:
|
|
||||||
"""Return type of LLM."""
|
|
||||||
return "__package_name_short__-llm"
|
|
@ -1,5 +1,5 @@
|
|||||||
[build-system]
|
[build-system]
|
||||||
requires = [ "poetry-core>=1.0.0",]
|
requires = ["poetry-core>=1.0.0"]
|
||||||
build-backend = "poetry.core.masonry.api"
|
build-backend = "poetry.core.masonry.api"
|
||||||
|
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
@ -23,14 +23,16 @@ python = ">=3.9,<4.0"
|
|||||||
langchain-core = "^0.3.15"
|
langchain-core = "^0.3.15"
|
||||||
|
|
||||||
[tool.ruff.lint]
|
[tool.ruff.lint]
|
||||||
select = [ "E", "F", "I", "T201",]
|
select = ["E", "F", "I", "T201"]
|
||||||
|
|
||||||
[tool.coverage.run]
|
[tool.coverage.run]
|
||||||
omit = [ "tests/*",]
|
omit = ["tests/*"]
|
||||||
|
|
||||||
[tool.pytest.ini_options]
|
[tool.pytest.ini_options]
|
||||||
addopts = "--strict-markers --strict-config --durations=5"
|
addopts = "--strict-markers --strict-config --durations=5"
|
||||||
markers = [ "compile: mark placeholder test used to compile integration tests without running them",]
|
markers = [
|
||||||
|
"compile: mark placeholder test used to compile integration tests without running them",
|
||||||
|
]
|
||||||
asyncio_mode = "auto"
|
asyncio_mode = "auto"
|
||||||
|
|
||||||
[tool.poetry.group.test]
|
[tool.poetry.group.test]
|
||||||
@ -53,6 +55,7 @@ pytest = "^7.4.3"
|
|||||||
pytest-asyncio = "^0.23.2"
|
pytest-asyncio = "^0.23.2"
|
||||||
pytest-socket = "^0.7.0"
|
pytest-socket = "^0.7.0"
|
||||||
pytest-watcher = "^0.3.4"
|
pytest-watcher = "^0.3.4"
|
||||||
|
langchain-tests = "0.3.0"
|
||||||
|
|
||||||
[tool.poetry.group.codespell.dependencies]
|
[tool.poetry.group.codespell.dependencies]
|
||||||
codespell = "^2.2.6"
|
codespell = "^2.2.6"
|
||||||
@ -65,14 +68,4 @@ ruff = "^0.5"
|
|||||||
[tool.poetry.group.typing.dependencies]
|
[tool.poetry.group.typing.dependencies]
|
||||||
mypy = "^1.10"
|
mypy = "^1.10"
|
||||||
|
|
||||||
[tool.poetry.group.test.dependencies.langchain-core]
|
[tool.poetry.group.dev.dependencies]
|
||||||
path = "../../core"
|
|
||||||
develop = true
|
|
||||||
|
|
||||||
[tool.poetry.group.dev.dependencies.langchain-core]
|
|
||||||
path = "../../core"
|
|
||||||
develop = true
|
|
||||||
|
|
||||||
[tool.poetry.group.typing.dependencies.langchain-core]
|
|
||||||
path = "../../core"
|
|
||||||
develop = true
|
|
||||||
|
@ -1,64 +0,0 @@
|
|||||||
"""Test __ModuleName__LLM llm."""
|
|
||||||
|
|
||||||
from __module_name__.llms import __ModuleName__LLM
|
|
||||||
|
|
||||||
|
|
||||||
def test_stream() -> None:
|
|
||||||
"""Test streaming tokens from OpenAI."""
|
|
||||||
llm = __ModuleName__LLM()
|
|
||||||
|
|
||||||
for token in llm.stream("I'm Pickle Rick"):
|
|
||||||
assert isinstance(token, str)
|
|
||||||
|
|
||||||
|
|
||||||
async def test_astream() -> None:
|
|
||||||
"""Test streaming tokens from OpenAI."""
|
|
||||||
llm = __ModuleName__LLM()
|
|
||||||
|
|
||||||
async for token in llm.astream("I'm Pickle Rick"):
|
|
||||||
assert isinstance(token, str)
|
|
||||||
|
|
||||||
|
|
||||||
async def test_abatch() -> None:
|
|
||||||
"""Test streaming tokens from __ModuleName__LLM."""
|
|
||||||
llm = __ModuleName__LLM()
|
|
||||||
|
|
||||||
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
|
||||||
for token in result:
|
|
||||||
assert isinstance(token, str)
|
|
||||||
|
|
||||||
|
|
||||||
async def test_abatch_tags() -> None:
|
|
||||||
"""Test batch tokens from __ModuleName__LLM."""
|
|
||||||
llm = __ModuleName__LLM()
|
|
||||||
|
|
||||||
result = await llm.abatch(
|
|
||||||
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
|
|
||||||
)
|
|
||||||
for token in result:
|
|
||||||
assert isinstance(token, str)
|
|
||||||
|
|
||||||
|
|
||||||
def test_batch() -> None:
|
|
||||||
"""Test batch tokens from __ModuleName__LLM."""
|
|
||||||
llm = __ModuleName__LLM()
|
|
||||||
|
|
||||||
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
|
||||||
for token in result:
|
|
||||||
assert isinstance(token, str)
|
|
||||||
|
|
||||||
|
|
||||||
async def test_ainvoke() -> None:
|
|
||||||
"""Test invoke tokens from __ModuleName__LLM."""
|
|
||||||
llm = __ModuleName__LLM()
|
|
||||||
|
|
||||||
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
|
||||||
assert isinstance(result, str)
|
|
||||||
|
|
||||||
|
|
||||||
def test_invoke() -> None:
|
|
||||||
"""Test invoke tokens from __ModuleName__LLM."""
|
|
||||||
llm = __ModuleName__LLM()
|
|
||||||
|
|
||||||
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
|
|
||||||
assert isinstance(result, str)
|
|
@ -1,8 +1,23 @@
|
|||||||
"""Test chat model integration."""
|
"""Test chat model integration."""
|
||||||
|
|
||||||
|
from langchain_standard_tests.unit_tests import ChatModelUnitTests
|
||||||
|
from typing import Tuple, Type
|
||||||
|
|
||||||
|
from langchain_core.language_models import BaseChatModel
|
||||||
|
|
||||||
from __module_name__.chat_models import Chat__ModuleName__
|
from __module_name__.chat_models import Chat__ModuleName__
|
||||||
|
|
||||||
|
|
||||||
def test_initialization() -> None:
|
class TestChat__ModuleName__StandardUnitTests(ChatModelUnitTests):
|
||||||
"""Test chat model initialization."""
|
"""Standard LangChain interface tests, applied to __ModuleName__."""
|
||||||
Chat__ModuleName__()
|
|
||||||
|
@property
|
||||||
|
def chat_model_class(self) -> Type[BaseChatModel]:
|
||||||
|
"""Return chat model class."""
|
||||||
|
return Chat__ModuleName__
|
||||||
|
|
||||||
|
@property
|
||||||
|
def chat_model_params(self) -> dict:
|
||||||
|
# TODO: Update with chat model parameters
|
||||||
|
# e.g. return {"model": "claude-3-haiku-20240307"} for ChatAnthropic
|
||||||
|
return {}
|
||||||
|
@ -1,8 +1,43 @@
|
|||||||
"""Test embedding model integration."""
|
"""Test embedding model integration."""
|
||||||
|
|
||||||
|
from typing import Tuple, Type
|
||||||
|
|
||||||
|
from langchain_core.embeddings import Embeddings
|
||||||
|
from langchain_standard_tests.unit_tests.embeddings import EmbeddingsUnitTests
|
||||||
|
|
||||||
from __module_name__.embeddings import __ModuleName__Embeddings
|
from __module_name__.embeddings import __ModuleName__Embeddings
|
||||||
|
|
||||||
|
|
||||||
|
class TestFireworksStandard(EmbeddingsUnitTests):
|
||||||
|
@property
|
||||||
|
def embeddings_class(self) -> Type[Embeddings]:
|
||||||
|
return __ModuleName__Embeddings
|
||||||
|
|
||||||
|
@property
|
||||||
|
def embeddings_params(self) -> dict:
|
||||||
|
return {"api_key": "test api key"}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
|
||||||
|
"""Return env vars, init args, and expected instance attrs for initializing
|
||||||
|
from env vars.
|
||||||
|
|
||||||
|
This powers tests for initializing from environment variables."""
|
||||||
|
return (
|
||||||
|
# env vars
|
||||||
|
{
|
||||||
|
"__MODULE_NAME___API_KEY": "test api key",
|
||||||
|
},
|
||||||
|
# init vars - only pass things that are required and CAN'T be set
|
||||||
|
# via env vars
|
||||||
|
{},
|
||||||
|
# expected attributes once the object has been constructed
|
||||||
|
{
|
||||||
|
"api_key": "test api key",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_initialization() -> None:
|
def test_initialization() -> None:
|
||||||
"""Test embedding model initialization."""
|
"""Test embedding model initialization."""
|
||||||
__ModuleName__Embeddings()
|
__ModuleName__Embeddings()
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
"""Test __ModuleName__ Chat API wrapper."""
|
|
||||||
|
|
||||||
from __module_name__ import __ModuleName__LLM
|
|
||||||
|
|
||||||
|
|
||||||
def test_initialization() -> None:
|
|
||||||
"""Test integration initialization."""
|
|
||||||
__ModuleName__LLM()
|
|
Loading…
Reference in New Issue
Block a user