together: mv to external repo (#25863)

This commit is contained in:
Erick Friis 2024-08-29 16:42:59 -07:00 committed by GitHub
parent e7c856c298
commit 1640872059
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
28 changed files with 2 additions and 3676 deletions

View File

@ -1 +0,0 @@
__pycache__

View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2024 LangChain, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,56 +0,0 @@
.PHONY: all format lint test tests integration_tests docker_tests help extended_tests
# Default target executed when no arguments are given to make.
all: help
# Define a variable for the test file path.
TEST_FILE ?= tests/unit_tests/
integration_test integration_tests: TEST_FILE=tests/integration_tests/
test tests integration_test integration_tests:
poetry run pytest $(TEST_FILE)
######################
# LINTING AND FORMATTING
######################
# Define a variable for Python and notebook files.
PYTHON_FILES=.
MYPY_CACHE=.mypy_cache
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/partners/together --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
lint_package: PYTHON_FILES=langchain_together
lint_tests: PYTHON_FILES=tests
lint_tests: MYPY_CACHE=.mypy_cache_test
lint lint_diff lint_package lint_tests:
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff check $(PYTHON_FILES)
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff
[ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) && poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)
format format_diff:
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES)
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --select I --fix $(PYTHON_FILES)
spell_check:
poetry run codespell --toml pyproject.toml
spell_fix:
poetry run codespell --toml pyproject.toml -w
check_imports: $(shell find langchain_together -name '*.py')
poetry run python ./scripts/check_imports.py $^
######################
# HELP
######################
help:
@echo '----'
@echo 'check_imports - check imports'
@echo 'format - run code formatters'
@echo 'lint - run linters'
@echo 'test - run unit tests'
@echo 'tests - run unit tests'
@echo 'test TEST_FILE=<test_file> - run all tests in file'

View File

@ -1,28 +1,3 @@
# langchain-together
This package has moved!
This package contains the LangChain integrations for [Together AI](https://www.together.ai/) through their [APIs](https://docs.together.ai/).
## Installation and Setup
- Install the LangChain partner package
```bash
pip install -U langchain-together
```
- Get your Together AI api key from the [Together Dashboard](https://api.together.ai/settings/api-keys) and set it as an environment variable (`TOGETHER_API_KEY`)
## Chat Completions
This package contains the `ChatTogether` class, which is the recommended way to interface with Together AI chat models.
ADD USAGE EXAMPLE HERE.
Can we add this in the langchain docs?
NEED to add image endpoint + completions endpoint as well
## Embeddings
See a [usage example](https://python.langchain.com/docs/integrations/text_embedding/together/)
Use `togethercomputer/m2-bert-80M-8k-retrieval` as the default model for embeddings.
https://github.com/langchain-ai/langchain-together/tree/main/libs/together

View File

@ -1,7 +0,0 @@
"""This package provides the Together integration for LangChain."""
from langchain_together.chat_models import ChatTogether
from langchain_together.embeddings import TogetherEmbeddings
from langchain_together.llms import Together
__all__ = ["ChatTogether", "Together", "TogetherEmbeddings"]

View File

@ -1,364 +0,0 @@
"""Wrapper around Together AI's Chat Completions API."""
from typing import (
Any,
Dict,
List,
Optional,
)
import openai
from langchain_core.language_models.chat_models import LangSmithParams
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import (
from_env,
secret_from_env,
)
from langchain_openai.chat_models.base import BaseChatOpenAI
class ChatTogether(BaseChatOpenAI):
r"""ChatTogether chat model.
Setup:
Install ``langchain-together`` and set environment variable ``TOGETHER_API_KEY``.
.. code-block:: bash
pip install -U langchain-together
export TOGETHER_API_KEY="your-api-key"
Key init args completion params:
model: str
Name of model to use.
temperature: float
Sampling temperature.
max_tokens: Optional[int]
Max number of tokens to generate.
logprobs: Optional[bool]
Whether to return logprobs.
Key init args client params:
timeout: Union[float, Tuple[float, float], Any, None]
Timeout for requests.
max_retries: int
Max number of retries.
api_key: Optional[str]
Together API key. If not passed in will be read from env var OPENAI_API_KEY.
Instantiate:
.. code-block:: python
from langhcain_together import ChatTogether
llm = ChatTogether(
model="meta-llama/Llama-3-70b-chat-hf",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
# api_key="...",
# other params...
)
Invoke:
.. code-block:: python
messages = [
(
"system",
"You are a helpful translator. Translate the user sentence to French.",
),
("human", "I love programming."),
]
llm.invoke(messages)
.. code-block:: python
AIMessage(
content="J'adore la programmation.",
response_metadata={
'token_usage': {'completion_tokens': 9, 'prompt_tokens': 32, 'total_tokens': 41},
'model_name': 'meta-llama/Llama-3-70b-chat-hf',
'system_fingerprint': None,
'finish_reason': 'stop',
'logprobs': None
},
id='run-168dceca-3b8b-4283-94e3-4c739dbc1525-0',
usage_metadata={'input_tokens': 32, 'output_tokens': 9, 'total_tokens': 41})
Stream:
.. code-block:: python
for chunk in llm.stream(messages):
print(chunk)
.. code-block:: python
content='J' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content="'" id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content='ad' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content='ore' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content=' la' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content=' programm' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content='ation' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content='.' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content='' response_metadata={'finish_reason': 'stop', 'model_name': 'meta-llama/Llama-3-70b-chat-hf'} id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
Async:
.. code-block:: python
await llm.ainvoke(messages)
# stream:
# async for chunk in (await llm.astream(messages))
# batch:
# await llm.abatch([messages])
.. code-block:: python
AIMessage(
content="J'adore la programmation.",
response_metadata={
'token_usage': {'completion_tokens': 9, 'prompt_tokens': 32, 'total_tokens': 41},
'model_name': 'meta-llama/Llama-3-70b-chat-hf',
'system_fingerprint': None,
'finish_reason': 'stop',
'logprobs': None
},
id='run-09371a11-7f72-4c53-8e7c-9de5c238b34c-0',
usage_metadata={'input_tokens': 32, 'output_tokens': 9, 'total_tokens': 41})
Tool calling:
.. code-block:: python
from langchain_core.pydantic_v1 import BaseModel, Field
# Only certain models support tool calling, check the together website to confirm compatibility
llm = ChatTogether(model="mistralai/Mixtral-8x7B-Instruct-v0.1")
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
ai_msg = llm_with_tools.invoke(
"Which city is bigger: LA or NY?"
)
ai_msg.tool_calls
.. code-block:: python
[
{
'name': 'GetPopulation',
'args': {'location': 'NY'},
'id': 'call_m5tstyn2004pre9bfuxvom8x',
'type': 'tool_call'
},
{
'name': 'GetPopulation',
'args': {'location': 'LA'},
'id': 'call_0vjgq455gq1av5sp9eb1pw6a',
'type': 'tool_call'
}
]
Structured output:
.. code-block:: python
from typing import Optional
from langchain_core.pydantic_v1 import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
structured_llm = llm.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats")
.. code-block:: python
Joke(
setup='Why was the cat sitting on the computer?',
punchline='To keep an eye on the mouse!',
rating=7
)
JSON mode:
.. code-block:: python
json_llm = llm.bind(response_format={"type": "json_object"})
ai_msg = json_llm.invoke(
"Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]"
)
ai_msg.content
.. code-block:: python
' {\\n"random_ints": [\\n13,\\n54,\\n78,\\n45,\\n67,\\n90,\\n11,\\n29,\\n84,\\n33\\n]\\n}'
Token usage:
.. code-block:: python
ai_msg = llm.invoke(messages)
ai_msg.usage_metadata
.. code-block:: python
{'input_tokens': 37, 'output_tokens': 6, 'total_tokens': 43}
Logprobs:
.. code-block:: python
logprobs_llm = llm.bind(logprobs=True)
messages=[("human","Say Hello World! Do not return anything else.")]
ai_msg = logprobs_llm.invoke(messages)
ai_msg.response_metadata["logprobs"]
.. code-block:: python
{
'content': None,
'token_ids': [22557, 3304, 28808, 2],
'tokens': [' Hello', ' World', '!', '</s>'],
'token_logprobs': [-4.7683716e-06, -5.9604645e-07, 0, -0.057373047]
}
Response metadata
.. code-block:: python
ai_msg = llm.invoke(messages)
ai_msg.response_metadata
.. code-block:: python
{
'token_usage': {
'completion_tokens': 4,
'prompt_tokens': 19,
'total_tokens': 23
},
'model_name': 'mistralai/Mixtral-8x7B-Instruct-v0.1',
'system_fingerprint': None,
'finish_reason': 'eos',
'logprobs': None
}
""" # noqa: E501
@property
def lc_secrets(self) -> Dict[str, str]:
"""A map of constructor argument names to secret ids.
For example,
{"together_api_key": "TOGETHER_API_KEY"}
"""
return {"together_api_key": "TOGETHER_API_KEY"}
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "together"]
@property
def lc_attributes(self) -> Dict[str, Any]:
"""List of attribute names that should be included in the serialized kwargs.
These attributes must be accepted by the constructor.
"""
attributes: Dict[str, Any] = {}
if self.together_api_base:
attributes["together_api_base"] = self.together_api_base
return attributes
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "together-chat"
def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
"""Get the parameters used to invoke the model."""
params = super()._get_ls_params(stop=stop, **kwargs)
params["ls_provider"] = "together"
return params
model_name: str = Field(default="meta-llama/Llama-3-8b-chat-hf", alias="model")
"""Model name to use."""
together_api_key: Optional[SecretStr] = Field(
alias="api_key",
default_factory=secret_from_env("TOGETHER_API_KEY", default=None),
)
"""Together AI API key.
Automatically read from env variable `TOGETHER_API_KEY` if not provided.
"""
together_api_base: str = Field(
default_factory=from_env(
"TOGETHER_API_BASE", default="https://api.together.xyz/v1/"
),
alias="base_url",
)
class Config:
"""Pydantic config."""
allow_population_by_field_name = True
@root_validator(pre=False, skip_on_failure=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
client_params = {
"api_key": (
values["together_api_key"].get_secret_value()
if values["together_api_key"]
else None
),
"base_url": values["together_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
}
if not values.get("client"):
sync_specific = {"http_client": values["http_client"]}
values["client"] = openai.OpenAI(
**client_params, **sync_specific
).chat.completions
if not values.get("async_client"):
async_specific = {"http_client": values["http_async_client"]}
values["async_client"] = openai.AsyncOpenAI(
**client_params, **async_specific
).chat.completions
return values

View File

@ -1,325 +0,0 @@
"""Wrapper around Together AI's Embeddings API."""
import logging
import warnings
from typing import (
Any,
Dict,
List,
Literal,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import openai
from langchain_core.embeddings import Embeddings
from langchain_core.pydantic_v1 import (
BaseModel,
Field,
SecretStr,
root_validator,
)
from langchain_core.utils import (
from_env,
get_pydantic_field_names,
secret_from_env,
)
logger = logging.getLogger(__name__)
class TogetherEmbeddings(BaseModel, Embeddings):
"""Together embedding model integration.
Setup:
Install ``langchain_together`` and set environment variable
``TOGETHER_API_KEY``.
.. code-block:: bash
pip install -U langchain_together
export TOGETHER_API_KEY="your-api-key"
Key init args completion params:
model: str
Name of Together model to use.
Key init args client params:
api_key: Optional[SecretStr]
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from __module_name__ import TogetherEmbeddings
embed = TogetherEmbeddings(
model="togethercomputer/m2-bert-80M-8k-retrieval",
# api_key="...",
# other params...
)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
.. code-block:: python
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
Embed multiple texts:
.. code-block:: python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
.. code-block:: python
2
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
Async:
.. code-block:: python
vector = await embed.aembed_query(input_text)
print(vector[:3])
# multiple:
# await embed.aembed_documents(input_texts)
.. code-block:: python
[-0.009100092574954033, 0.005071679595857859, -0.0029193938244134188]
"""
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model: str = "togethercomputer/m2-bert-80M-8k-retrieval"
"""Embeddings model name to use.
Instead, use 'togethercomputer/m2-bert-80M-8k-retrieval' for example.
"""
dimensions: Optional[int] = None
"""The number of dimensions the resulting output embeddings should have.
Not yet supported.
"""
together_api_key: Optional[SecretStr] = Field(
alias="api_key",
default_factory=secret_from_env("TOGETHER_API_KEY", default=None),
)
"""Together AI API key.
Automatically read from env variable `TOGETHER_API_KEY` if not provided.
"""
together_api_base: str = Field(
default_factory=from_env(
"TOGETHER_API_BASE", default="https://api.together.xyz/v1/"
),
alias="base_url",
)
"""Endpoint URL to use."""
embedding_ctx_length: int = 4096
"""The maximum number of tokens to embed at once.
Not yet supported.
"""
allowed_special: Union[Literal["all"], Set[str]] = set()
"""Not yet supported."""
disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all"
"""Not yet supported."""
chunk_size: int = 1000
"""Maximum number of texts to embed in each batch.
Not yet supported.
"""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
request_timeout: Optional[Union[float, Tuple[float, float], Any]] = Field(
default=None, alias="timeout"
)
"""Timeout for requests to Together embedding API. Can be float, httpx.Timeout or
None."""
show_progress_bar: bool = False
"""Whether to show a progress bar when embedding.
Not yet supported.
"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
skip_empty: bool = False
"""Whether to skip empty strings when embedding or raise an error.
Defaults to not skipping.
Not yet supported."""
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
# Configure a custom httpx client. See the
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: Union[Any, None] = None
"""Optional httpx.Client. Only used for sync invocations. Must specify
http_async_client as well if you'd like a custom client for async invocations.
"""
http_async_client: Union[Any, None] = None
"""Optional httpx.AsyncClient. Only used for async invocations. Must specify
http_client as well if you'd like a custom client for sync invocations."""
class Config:
"""Configuration for this pydantic object."""
extra = "forbid"
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
warnings.warn(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator(pre=False, skip_on_failure=True)
def post_init(cls, values: Dict) -> Dict:
"""Logic that will post Pydantic initialization."""
client_params = {
"api_key": (
values["together_api_key"].get_secret_value()
if values["together_api_key"]
else None
),
"base_url": values["together_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
}
if not values.get("client"):
sync_specific = (
{"http_client": values["http_client"]} if values["http_client"] else {}
)
values["client"] = openai.OpenAI(
**client_params, **sync_specific
).embeddings
if not values.get("async_client"):
async_specific = (
{"http_client": values["http_async_client"]}
if values["http_async_client"]
else {}
)
values["async_client"] = openai.AsyncOpenAI(
**client_params, **async_specific
).embeddings
return values
@property
def _invocation_params(self) -> Dict[str, Any]:
params: Dict = {"model": self.model, **self.model_kwargs}
if self.dimensions is not None:
params["dimensions"] = self.dimensions
return params
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of document texts using passage model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = []
params = self._invocation_params
params["model"] = params["model"]
for text in texts:
response = self.client.create(input=text, **params)
if not isinstance(response, dict):
response = response.model_dump()
embeddings.extend([i["embedding"] for i in response["data"]])
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed query text using query model.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
params = self._invocation_params
params["model"] = params["model"]
response = self.client.create(input=text, **params)
if not isinstance(response, dict):
response = response.model_dump()
return response["data"][0]["embedding"]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of document texts using passage model asynchronously.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = []
params = self._invocation_params
params["model"] = params["model"]
for text in texts:
response = await self.async_client.create(input=text, **params)
if not isinstance(response, dict):
response = response.model_dump()
embeddings.extend([i["embedding"] for i in response["data"]])
return embeddings
async def aembed_query(self, text: str) -> List[float]:
"""Asynchronous Embed query text using query model.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
params = self._invocation_params
params["model"] = params["model"]
response = await self.async_client.create(input=text, **params)
if not isinstance(response, dict):
response = response.model_dump()
return response["data"][0]["embedding"]

View File

@ -1,224 +0,0 @@
"""Wrapper around Together AI's Completion API."""
import logging
import warnings
from typing import Any, Dict, List, Optional
import requests
from aiohttp import ClientSession
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import (
secret_from_env,
)
logger = logging.getLogger(__name__)
class Together(LLM):
"""LLM models from `Together`.
To use, you'll need an API key which you can find here:
https://api.together.ai/settings/api-keys. This can be passed in as init param
``together_api_key`` or set as environment variable ``TOGETHER_API_KEY``.
Together AI API reference: https://docs.together.ai/reference/completions
Example:
.. code-block:: python
from langchain_together import Together
model = Together(model_name="mistralai/Mixtral-8x7B-Instruct-v0.1")
"""
base_url: str = "https://api.together.xyz/v1/completions"
"""Base completions API URL."""
together_api_key: SecretStr = Field(
alias="api_key",
default_factory=secret_from_env("TOGETHER_API_KEY"),
)
"""Together AI API key.
Automatically read from env variable `TOGETHER_API_KEY` if not provided.
"""
model: str
"""Model name. Available models listed here:
Base Models: https://docs.together.ai/docs/inference-models#language-models
Chat Models: https://docs.together.ai/docs/inference-models#chat-models
"""
temperature: Optional[float] = None
"""Model temperature."""
top_p: Optional[float] = None
"""Used to dynamically adjust the number of choices for each predicted token based
on the cumulative probabilities. A value of 1 will always yield the same
output. A temperature less than 1 favors more correctness and is appropriate
for question answering or summarization. A value greater than 1 introduces more
randomness in the output.
"""
top_k: Optional[int] = None
"""Used to limit the number of choices for the next predicted word or token. It
specifies the maximum number of tokens to consider at each step, based on their
probability of occurrence. This technique helps to speed up the generation
process and can improve the quality of the generated text by focusing on the
most likely options.
"""
max_tokens: Optional[int] = None
"""The maximum number of tokens to generate."""
repetition_penalty: Optional[float] = None
"""A number that controls the diversity of generated text by reducing the
likelihood of repeated sequences. Higher values decrease repetition.
"""
logprobs: Optional[int] = None
"""An integer that specifies how many top token log probabilities are included in
the response for each token generation step.
"""
class Config:
"""Configuration for this pydantic object."""
extra = "forbid"
allow_population_by_field_name = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
if values.get("max_tokens") is None:
warnings.warn(
"The completions endpoint, has 'max_tokens' as required argument. "
"The default value is being set to 200 "
"Consider setting this value, when initializing LLM"
)
values["max_tokens"] = 200 # Default Value
return values
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "together"
def _format_output(self, output: dict) -> str:
return output["choices"][0]["text"]
@property
def default_params(self) -> Dict[str, Any]:
"""Return the default parameters for the Together model.
Returns:
A dictionary containing the default parameters.
"""
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"max_tokens": self.max_tokens,
"repetition_penalty": self.repetition_penalty,
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Together's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: The CallbackManager for LLM run, it's not used at the moment.
kwargs: Additional parameters to pass to the model.
Returns:
The string generated by the model..
"""
headers = {
"Authorization": f"Bearer {self.together_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
stop_to_use = stop[0] if stop and len(stop) == 1 else stop
payload: Dict[str, Any] = {
**self.default_params,
"prompt": prompt,
"stop": stop_to_use,
**kwargs,
}
# filter None values to not pass them to the http payload
payload = {k: v for k, v in payload.items() if v is not None}
response = requests.post(url=self.base_url, json=payload, headers=headers)
if response.status_code >= 500:
raise Exception(f"Together Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"Together received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"Together returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
data = response.json()
output = self._format_output(data)
return output
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call Together model to get predictions based on the prompt.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: The CallbackManager for LLM run, it's not used at the moment.
kwargs: Additional parameters to pass to the model.
Returns:
The string generated by the model.
"""
headers = {
"Authorization": f"Bearer {self.together_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
stop_to_use = stop[0] if stop and len(stop) == 1 else stop
payload: Dict[str, Any] = {
**self.default_params,
"prompt": prompt,
"stop": stop_to_use,
**kwargs,
}
# filter None values to not pass them to the http payload
payload = {k: v for k, v in payload.items() if v is not None}
async with ClientSession() as session:
async with session.post(
self.base_url, json=payload, headers=headers
) as response:
if response.status >= 500:
raise Exception(f"Together Server: Error {response.status}")
elif response.status >= 400:
raise ValueError(
f"Together received an invalid payload: {response.text}"
)
elif response.status != 200:
raise Exception(
f"Together returned an unexpected response with status "
f"{response.status}: {response.text}"
)
response_json = await response.json()
output = self._format_output(response_json)
return output

File diff suppressed because it is too large Load Diff

View File

@ -1,109 +0,0 @@
[build-system]
requires = [ "poetry-core>=1.0.0",]
build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "langchain-together"
version = "0.1.6"
description = "An integration package connecting Together AI and LangChain"
authors = []
readme = "README.md"
repository = "https://github.com/langchain-ai/langchain"
license = "MIT"
[tool.mypy]
disallow_untyped_defs = "True"
[tool.poetry.urls]
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/together"
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-together%3D%3D0%22&expanded=true"
[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
langchain-core = "^0.2.35"
langchain-openai = "^0.1.16"
requests = "^2"
aiohttp = "^3.9.1"
[tool.ruff.lint]
select = [ "E", "F", "I", "D",]
[tool.coverage.run]
omit = [ "tests/*",]
[tool.pytest.ini_options]
addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5"
markers = [ "requires: mark tests as requiring a specific library", "asyncio: mark tests as requiring asyncio", "compile: mark placeholder test used to compile integration tests without running them",]
asyncio_mode = "auto"
[tool.poetry.group.test]
optional = true
[tool.poetry.group.codespell]
optional = true
[tool.poetry.group.test_integration]
optional = true
[tool.poetry.group.lint]
optional = true
[tool.poetry.group.typing]
optional = true
[tool.poetry.group.dev]
optional = true
[tool.ruff.lint.pydocstyle]
convention = "google"
[tool.ruff.lint.per-file-ignores]
"tests/**" = [ "D",]
[tool.poetry.group.test.dependencies]
pytest = "^7.3.0"
freezegun = "^1.2.2"
pytest-mock = "^3.10.0"
syrupy = "^4.0.2"
pytest-watcher = "^0.3.4"
pytest-asyncio = "^0.21.1"
docarray = "^0.32.1"
[tool.poetry.group.codespell.dependencies]
codespell = "^2.2.0"
[tool.poetry.group.test_integration.dependencies]
[[tool.poetry.group.test_integration.dependencies.numpy]]
version = "^1"
python = "<3.12"
[[tool.poetry.group.test_integration.dependencies.numpy]]
version = "^1.26.0"
python = ">=3.12"
[tool.poetry.group.lint.dependencies]
ruff = "^0.5"
[tool.poetry.group.typing.dependencies]
mypy = "^1.10"
types-requests = "^2"
[tool.poetry.group.test.dependencies.langchain-openai]
path = "../openai"
develop = true
[tool.poetry.group.test.dependencies.langchain-core]
path = "../../core"
develop = true
[tool.poetry.group.test.dependencies.langchain-standard-tests]
path = "../../standard-tests"
develop = true
[tool.poetry.group.typing.dependencies.langchain-core]
path = "../../core"
develop = true
[tool.poetry.group.dev.dependencies.langchain-core]
path = "../../core"
develop = true

View File

@ -1,19 +0,0 @@
"""This module checks if the given python files can be imported without error."""
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_failure = True
print(file)
traceback.print_exc()
print()
sys.exit(1 if has_failure else 0)

View File

@ -1,27 +0,0 @@
#!/bin/bash
#
# This script searches for lines starting with "import pydantic" or "from pydantic"
# in tracked files within a Git repository.
#
# Usage: ./scripts/check_pydantic.sh /path/to/repository
# Check if a path argument is provided
if [ $# -ne 1 ]; then
echo "Usage: $0 /path/to/repository"
exit 1
fi
repository_path="$1"
# Search for lines matching the pattern within the specified repository
result=$(git -C "$repository_path" grep -E '^import pydantic|^from pydantic')
# Check if any matching lines were found
if [ -n "$result" ]; then
echo "ERROR: The following lines need to be updated:"
echo "$result"
echo "Please replace the code with an import from langchain_core.pydantic_v1."
echo "For example, replace 'from pydantic import BaseModel'"
echo "with 'from langchain_core.pydantic_v1 import BaseModel'"
exit 1
fi

View File

@ -1,17 +0,0 @@
#!/bin/bash
set -eu
# Initialize a variable to keep track of errors
errors=0
# make sure not importing from langchain or langchain_experimental
git --no-pager grep '^from langchain\.' . && errors=$((errors+1))
git --no-pager grep '^from langchain_experimental\.' . && errors=$((errors+1))
# Decide on an exit status based on the errors
if [ "$errors" -gt 0 ]; then
exit 1
else
exit 0
fi

View File

@ -1,136 +0,0 @@
import pytest # type: ignore[import-not-found]
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langchain_together import ChatTogether
def test_chat_together_model() -> None:
"""Test ChatTogether wrapper handles model_name."""
chat = ChatTogether(model="foo")
assert chat.model_name == "foo"
chat = ChatTogether(model_name="bar") # type: ignore[call-arg]
assert chat.model_name == "bar"
def test_chat_together_system_message() -> None:
"""Test ChatOpenAI wrapper with system message."""
chat = ChatTogether(max_tokens=10)
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_chat_together_llm_output_contains_model_name() -> None:
"""Test llm_output contains model_name."""
chat = ChatTogether(max_tokens=10)
message = HumanMessage(content="Hello")
llm_result = chat.generate([[message]])
assert llm_result.llm_output is not None
assert llm_result.llm_output["model_name"] == chat.model_name
def test_chat_together_streaming_llm_output_contains_model_name() -> None:
"""Test llm_output contains model_name."""
chat = ChatTogether(max_tokens=10, streaming=True)
message = HumanMessage(content="Hello")
llm_result = chat.generate([[message]])
assert llm_result.llm_output is not None
assert llm_result.llm_output["model_name"] == chat.model_name
def test_chat_together_invalid_streaming_params() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
with pytest.raises(ValueError):
ChatTogether(
max_tokens=10,
streaming=True,
temperature=0,
n=5,
)
def test_chat_together_extra_kwargs() -> None:
"""Test extra kwargs to chat together."""
# Check that foo is saved in extra_kwargs.
llm = ChatTogether(foo=3, max_tokens=10) # type: ignore[call-arg]
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = ChatTogether(foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
ChatTogether(foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg]
# Test that if explicit param is specified in kwargs it errors
with pytest.raises(ValueError):
ChatTogether(model_kwargs={"temperature": 0.2})
# Test that "model" cannot be specified in kwargs
with pytest.raises(ValueError):
ChatTogether(model_kwargs={"model": "meta-llama/Llama-3-8b-chat-hf"})
def test_stream() -> None:
"""Test streaming tokens from Together AI."""
llm = ChatTogether()
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
async def test_astream() -> None:
"""Test streaming tokens from Together AI."""
llm = ChatTogether()
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token.content, str)
async def test_abatch() -> None:
"""Test streaming tokens from ChatTogether."""
llm = ChatTogether()
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
async def test_abatch_tags() -> None:
"""Test batch tokens from ChatTogether."""
llm = ChatTogether()
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token.content, str)
def test_batch() -> None:
"""Test batch tokens from ChatTogether."""
llm = ChatTogether()
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
async def test_ainvoke() -> None:
"""Test invoke tokens from ChatTogether."""
llm = ChatTogether()
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result.content, str)
def test_invoke() -> None:
"""Test invoke tokens from ChatTogether."""
llm = ChatTogether()
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result.content, str)

View File

@ -1,38 +0,0 @@
"""Standard LangChain interface tests"""
from typing import Optional, Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_standard_tests.integration_tests import ChatModelIntegrationTests
from langchain_together import ChatTogether
# Initialize the rate limiter in global scope, so it can be re-used
# across tests.
rate_limiter = InMemoryRateLimiter(
requests_per_second=0.5,
)
class TestTogetherStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatTogether
@property
def chat_model_params(self) -> dict:
return {
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"rate_limiter": rate_limiter,
}
@property
def tool_choice_value(self) -> Optional[str]:
"""Value to use for tool choice when used in tests."""
return "tool_name"
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)

View File

@ -1,7 +0,0 @@
import pytest # type: ignore[import-not-found]
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass

View File

@ -1,37 +0,0 @@
"""Test Together AI embeddings."""
from langchain_together import TogetherEmbeddings
def test_langchain_together_embed_documents() -> None:
"""Test Together AI embeddings."""
documents = ["foo bar", "bar foo"]
embedding = TogetherEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) > 0
def test_langchain_together_embed_query() -> None:
"""Test Together AI embeddings."""
query = "foo bar"
embedding = TogetherEmbeddings()
output = embedding.embed_query(query)
assert len(output) > 0
async def test_langchain_together_aembed_documents() -> None:
"""Test Together AI embeddings asynchronous."""
documents = ["foo bar", "bar foo"]
embedding = TogetherEmbeddings()
output = await embedding.aembed_documents(documents)
assert len(output) == 2
assert len(output[0]) > 0
async def test_langchain_together_aembed_query() -> None:
"""Test Together AI embeddings asynchronous."""
query = "foo bar"
embedding = TogetherEmbeddings()
output = await embedding.aembed_query(query)
assert len(output) > 0

View File

@ -1,39 +0,0 @@
"""Test Together API wrapper.
In order to run this test, you need to have an Together api key.
You can get it by registering for free at https://api.together.ai/.
A test key can be found at https://api.together.xyz/settings/api-keys
You'll then need to set TOGETHER_API_KEY environment variable to your api key.
"""
import pytest as pytest # type: ignore[import-not-found]
from langchain_together import Together
def test_together_call() -> None:
"""Test simple call to together."""
llm = Together( # type: ignore[call-arg]
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
output = llm.invoke("Say foo:")
assert llm._llm_type == "together"
assert isinstance(output, str)
assert len(output) > 0
async def test_together_acall() -> None:
"""Test simple call to together."""
llm = Together( # type: ignore[call-arg]
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
output = await llm.agenerate(["Say foo:"], stop=["bar"])
assert llm._llm_type == "together"
output_text = output.generations[0][0].text
assert isinstance(output_text, str)
assert output_text.count("bar") <= 1

View File

@ -1,122 +0,0 @@
import json
import pytest # type: ignore[import-not-found]
from langchain_core.messages import (
AIMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_openai.chat_models.base import (
_convert_dict_to_message,
_convert_message_to_dict,
)
from langchain_together import ChatTogether
def test_initialization() -> None:
"""Test chat model initialization."""
ChatTogether()
def test_together_model_param() -> None:
llm = ChatTogether(model="foo")
assert llm.model_name == "foo"
llm = ChatTogether(model_name="foo") # type: ignore[call-arg]
assert llm.model_name == "foo"
ls_params = llm._get_ls_params()
assert ls_params["ls_provider"] == "together"
def test_function_dict_to_message_function_message() -> None:
content = json.dumps({"result": "Example #1"})
name = "test_function"
result = _convert_dict_to_message(
{
"role": "function",
"name": name,
"content": content,
}
)
assert isinstance(result, FunctionMessage)
assert result.name == name
assert result.content == content
def test_convert_dict_to_message_human() -> None:
message = {"role": "user", "content": "foo"}
result = _convert_dict_to_message(message)
expected_output = HumanMessage(content="foo")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test__convert_dict_to_message_human_with_name() -> None:
message = {"role": "user", "content": "foo", "name": "test"}
result = _convert_dict_to_message(message)
expected_output = HumanMessage(content="foo", name="test")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_ai() -> None:
message = {"role": "assistant", "content": "foo"}
result = _convert_dict_to_message(message)
expected_output = AIMessage(content="foo")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_ai_with_name() -> None:
message = {"role": "assistant", "content": "foo", "name": "test"}
result = _convert_dict_to_message(message)
expected_output = AIMessage(content="foo", name="test")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_system() -> None:
message = {"role": "system", "content": "foo"}
result = _convert_dict_to_message(message)
expected_output = SystemMessage(content="foo")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_system_with_name() -> None:
message = {"role": "system", "content": "foo", "name": "test"}
result = _convert_dict_to_message(message)
expected_output = SystemMessage(content="foo", name="test")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_tool() -> None:
message = {"role": "tool", "content": "foo", "tool_call_id": "bar"}
result = _convert_dict_to_message(message)
expected_output = ToolMessage(content="foo", tool_call_id="bar")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
@pytest.fixture
def mock_completion() -> dict:
return {
"id": "chatcmpl-7fcZavknQda3SQ",
"object": "chat.completion",
"created": 1689989000,
"model": "meta-llama/Llama-3-8b-chat-hf",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Bab",
"name": "KimSolar",
},
"finish_reason": "stop",
}
],
}

View File

@ -1,20 +0,0 @@
"""Standard LangChain interface tests"""
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_standard_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_together import ChatTogether
class TestTogetherStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatTogether
@property
def chat_model_params(self) -> dict:
return {"model": "meta-llama/Llama-3-8b-chat-hf"}

View File

@ -1,25 +0,0 @@
"""Test embedding model integration."""
import os
import pytest # type: ignore[import-not-found]
from langchain_together import TogetherEmbeddings
os.environ["TOGETHER_API_KEY"] = "foo"
def test_initialization() -> None:
"""Test embedding model initialization."""
TogetherEmbeddings()
def test_together_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
TogetherEmbeddings(model_kwargs={"model": "foo"})
def test_together_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = TogetherEmbeddings(foo="bar") # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}

View File

@ -1,7 +0,0 @@
from langchain_together import __all__
EXPECTED_ALL = ["ChatTogether", "TogetherEmbeddings", "Together"]
def test_all_imports() -> None:
assert sorted(EXPECTED_ALL) == sorted(__all__)

View File

@ -1,101 +0,0 @@
from typing import cast
from langchain_core.pydantic_v1 import SecretStr
from pytest import CaptureFixture, MonkeyPatch # type: ignore[import-not-found]
from langchain_together import Together
def test_together_api_key_is_secret_string() -> None:
"""Test that the API key is stored as a SecretStr."""
llm = Together(
together_api_key="secret-api-key", # type: ignore[call-arg]
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
assert isinstance(llm.together_api_key, SecretStr)
def test_together_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
"""Test that the API key is masked when passed from an environment variable."""
monkeypatch.setenv("TOGETHER_API_KEY", "secret-api-key")
llm = Together( # type: ignore[call-arg]
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
print(llm.together_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_together_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
"""Test that the API key is masked when passed via the constructor."""
llm = Together(
together_api_key="secret-api-key", # type: ignore[call-arg]
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
print(llm.together_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_together_uses_actual_secret_value_from_secretstr() -> None:
"""Test that the actual secret value is correctly retrieved."""
llm = Together(
together_api_key="secret-api-key", # type: ignore[call-arg]
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
assert cast(SecretStr, llm.together_api_key).get_secret_value() == "secret-api-key"
def test_together_uses_actual_secret_value_from_secretstr_api_key() -> None:
"""Test that the actual secret value is correctly retrieved."""
llm = Together(
api_key="secret-api-key", # type: ignore[arg-type]
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
assert cast(SecretStr, llm.together_api_key).get_secret_value() == "secret-api-key"
def test_together_model_params() -> None:
# Test standard tracing params
llm = Together(
api_key="secret-api-key", # type: ignore[arg-type]
model="foo",
)
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "together",
"ls_model_type": "llm",
"ls_model_name": "foo",
"ls_max_tokens": 200,
}
llm = Together(
api_key="secret-api-key", # type: ignore[arg-type]
model="foo",
temperature=0.2,
max_tokens=250,
)
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "together",
"ls_model_type": "llm",
"ls_model_name": "foo",
"ls_max_tokens": 250,
"ls_temperature": 0.2,
}

View File

@ -1,13 +0,0 @@
from langchain_together import ChatTogether, TogetherEmbeddings
def test_chat_together_secrets() -> None:
o = ChatTogether(together_api_key="foo") # type: ignore[call-arg]
s = str(o)
assert "foo" not in s
def test_together_embeddings_secrets() -> None:
o = TogetherEmbeddings(together_api_key="foo") # type: ignore[call-arg]
s = str(o)
assert "foo" not in s