langchain[patch], core[patch]: Make common utils public (#13932)

- rename `langchain_core.chat_models.base._generate_from_stream` -> `generate_from_stream`
- rename `langchain_core.chat_models.base._agenerate_from_stream` -> `agenerate_from_stream`
- export `langchain_core.utils.utils.build_extra_kwargs` from `langchain_core.utils`
This commit is contained in:
Bagatur 2023-11-27 15:34:46 -08:00 committed by GitHub
parent c0277d06e8
commit 10a6e7cbb6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 45 additions and 42 deletions

View File

@ -55,7 +55,7 @@ def _get_verbosity() -> bool:
return get_verbose() return get_verbose()
def _generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult: def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
generation: Optional[ChatGenerationChunk] = None generation: Optional[ChatGenerationChunk] = None
for chunk in stream: for chunk in stream:
if generation is None: if generation is None:
@ -66,7 +66,7 @@ def _generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
return ChatResult(generations=[generation]) return ChatResult(generations=[generation])
async def _agenerate_from_stream( async def agenerate_from_stream(
stream: AsyncIterator[ChatGenerationChunk], stream: AsyncIterator[ChatGenerationChunk],
) -> ChatResult: ) -> ChatResult:
generation: Optional[ChatGenerationChunk] = None generation: Optional[ChatGenerationChunk] = None

View File

@ -13,6 +13,7 @@ from langchain_core.utils.input import (
) )
from langchain_core.utils.loading import try_load_from_hub from langchain_core.utils.loading import try_load_from_hub
from langchain_core.utils.utils import ( from langchain_core.utils.utils import (
build_extra_kwargs,
check_package_version, check_package_version,
convert_to_secret_str, convert_to_secret_str,
get_pydantic_field_names, get_pydantic_field_names,
@ -37,4 +38,5 @@ __all__ = [
"raise_for_status_with_text", "raise_for_status_with_text",
"xor_args", "xor_args",
"try_load_from_hub", "try_load_from_hub",
"build_extra_kwargs",
] ]

View File

@ -15,6 +15,7 @@ EXPECTED_ALL = [
"raise_for_status_with_text", "raise_for_status_with_text",
"xor_args", "xor_args",
"try_load_from_hub", "try_load_from_hub",
"build_extra_kwargs",
] ]

View File

@ -17,8 +17,8 @@ from langchain.callbacks.manager import (
) )
from langchain.chat_models.base import ( from langchain.chat_models.base import (
BaseChatModel, BaseChatModel,
_agenerate_from_stream, agenerate_from_stream,
_generate_from_stream, generate_from_stream,
) )
from langchain.llms.anthropic import _AnthropicCommon from langchain.llms.anthropic import _AnthropicCommon
@ -171,7 +171,7 @@ class ChatAnthropic(BaseChatModel, _AnthropicCommon):
stream_iter = self._stream( stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
return _generate_from_stream(stream_iter) return generate_from_stream(stream_iter)
prompt = self._convert_messages_to_prompt( prompt = self._convert_messages_to_prompt(
messages, messages,
) )
@ -198,7 +198,7 @@ class ChatAnthropic(BaseChatModel, _AnthropicCommon):
stream_iter = self._astream( stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
return await _agenerate_from_stream(stream_iter) return await agenerate_from_stream(stream_iter)
prompt = self._convert_messages_to_prompt( prompt = self._convert_messages_to_prompt(
messages, messages,
) )

View File

@ -23,7 +23,7 @@ from langchain_core.utils import (
) )
from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel, _generate_from_stream from langchain.chat_models.base import BaseChatModel, generate_from_stream
from langchain.utils import get_from_dict_or_env from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -198,7 +198,7 @@ class ChatBaichuan(BaseChatModel):
stream_iter = self._stream( stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs messages=messages, stop=stop, run_manager=run_manager, **kwargs
) )
return _generate_from_stream(stream_iter) return generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs) res = self._chat(messages, **kwargs)

View File

@ -1,15 +1,15 @@
from langchain_core.language_models.chat_models import ( from langchain_core.language_models.chat_models import (
BaseChatModel, BaseChatModel,
SimpleChatModel, SimpleChatModel,
_agenerate_from_stream,
_generate_from_stream,
_get_verbosity, _get_verbosity,
agenerate_from_stream,
generate_from_stream,
) )
__all__ = [ __all__ = [
"BaseChatModel", "BaseChatModel",
"SimpleChatModel", "SimpleChatModel",
"_generate_from_stream", "generate_from_stream",
"_agenerate_from_stream", "agenerate_from_stream",
"_get_verbosity", "_get_verbosity",
] ]

View File

@ -16,8 +16,8 @@ from langchain.callbacks.manager import (
) )
from langchain.chat_models.base import ( from langchain.chat_models.base import (
BaseChatModel, BaseChatModel,
_agenerate_from_stream, agenerate_from_stream,
_generate_from_stream, generate_from_stream,
) )
from langchain.llms.cohere import BaseCohere from langchain.llms.cohere import BaseCohere
@ -188,7 +188,7 @@ class ChatCohere(BaseChatModel, BaseCohere):
stream_iter = self._stream( stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
return _generate_from_stream(stream_iter) return generate_from_stream(stream_iter)
request = get_cohere_chat_request(messages, **self._default_params, **kwargs) request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
response = self.client.chat(**request) response = self.client.chat(**request)
@ -214,7 +214,7 @@ class ChatCohere(BaseChatModel, BaseCohere):
stream_iter = self._astream( stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
return await _agenerate_from_stream(stream_iter) return await agenerate_from_stream(stream_iter)
request = get_cohere_chat_request(messages, **self._default_params, **kwargs) request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
response = self.client.chat(**request, stream=False) response = self.client.chat(**request, stream=False)

View File

@ -17,8 +17,8 @@ from langchain.callbacks.manager import (
) )
from langchain.chat_models.base import ( from langchain.chat_models.base import (
BaseChatModel, BaseChatModel,
_agenerate_from_stream, agenerate_from_stream,
_generate_from_stream, generate_from_stream,
) )
from langchain.llms.gigachat import _BaseGigaChat from langchain.llms.gigachat import _BaseGigaChat
@ -115,7 +115,7 @@ class GigaChat(_BaseGigaChat, BaseChatModel):
stream_iter = self._stream( stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
return _generate_from_stream(stream_iter) return generate_from_stream(stream_iter)
payload = self._build_payload(messages) payload = self._build_payload(messages)
response = self._client.chat(payload) response = self._client.chat(payload)
@ -135,7 +135,7 @@ class GigaChat(_BaseGigaChat, BaseChatModel):
stream_iter = self._astream( stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
return await _agenerate_from_stream(stream_iter) return await agenerate_from_stream(stream_iter)
payload = self._build_payload(messages) payload = self._build_payload(messages)
response = await self._client.achat(payload) response = await self._client.achat(payload)

View File

@ -26,7 +26,7 @@ from langchain_core.utils import (
) )
from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel, _generate_from_stream from langchain.chat_models.base import BaseChatModel, generate_from_stream
from langchain.utils import get_from_dict_or_env from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -242,7 +242,7 @@ class ChatHunyuan(BaseChatModel):
stream_iter = self._stream( stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs messages=messages, stop=stop, run_manager=run_manager, **kwargs
) )
return _generate_from_stream(stream_iter) return generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs) res = self._chat(messages, **kwargs)

View File

@ -46,8 +46,8 @@ from langchain.callbacks.manager import (
) )
from langchain.chat_models.base import ( from langchain.chat_models.base import (
BaseChatModel, BaseChatModel,
_agenerate_from_stream, agenerate_from_stream,
_generate_from_stream, generate_from_stream,
) )
from langchain.utils import get_from_dict_or_env from langchain.utils import get_from_dict_or_env
@ -325,7 +325,7 @@ class JinaChat(BaseChatModel):
stream_iter = self._stream( stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs messages=messages, stop=stop, run_manager=run_manager, **kwargs
) )
return _generate_from_stream(stream_iter) return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop) message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs} params = {**params, **kwargs}
@ -384,7 +384,7 @@ class JinaChat(BaseChatModel):
stream_iter = self._astream( stream_iter = self._astream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs messages=messages, stop=stop, run_manager=run_manager, **kwargs
) )
return await _agenerate_from_stream(stream_iter) return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop) message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs} params = {**params, **kwargs}

View File

@ -24,7 +24,7 @@ from langchain.adapters.openai import convert_dict_to_message, convert_message_t
from langchain.callbacks.manager import ( from langchain.callbacks.manager import (
CallbackManagerForLLMRun, CallbackManagerForLLMRun,
) )
from langchain.chat_models.base import BaseChatModel, _generate_from_stream from langchain.chat_models.base import BaseChatModel, generate_from_stream
from langchain.chat_models.openai import _convert_delta_to_message_chunk from langchain.chat_models.openai import _convert_delta_to_message_chunk
from langchain.utils import get_from_dict_or_env from langchain.utils import get_from_dict_or_env
@ -229,7 +229,7 @@ class ChatKonko(BaseChatModel):
stream_iter = self._stream( stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
return _generate_from_stream(stream_iter) return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop) message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs} params = {**params, **kwargs}

View File

@ -43,8 +43,8 @@ from langchain.callbacks.manager import (
) )
from langchain.chat_models.base import ( from langchain.chat_models.base import (
BaseChatModel, BaseChatModel,
_agenerate_from_stream, agenerate_from_stream,
_generate_from_stream, generate_from_stream,
) )
from langchain.llms.base import create_base_retry_decorator from langchain.llms.base import create_base_retry_decorator
from langchain.utils import get_from_dict_or_env from langchain.utils import get_from_dict_or_env
@ -300,7 +300,7 @@ class ChatLiteLLM(BaseChatModel):
stream_iter = self._stream( stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
return _generate_from_stream(stream_iter) return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop) message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs} params = {**params, **kwargs}
@ -395,7 +395,7 @@ class ChatLiteLLM(BaseChatModel):
stream_iter = self._astream( stream_iter = self._astream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs messages=messages, stop=stop, run_manager=run_manager, **kwargs
) )
return await _agenerate_from_stream(stream_iter) return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop) message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs} params = {**params, **kwargs}

View File

@ -45,8 +45,8 @@ from langchain.callbacks.manager import (
) )
from langchain.chat_models.base import ( from langchain.chat_models.base import (
BaseChatModel, BaseChatModel,
_agenerate_from_stream, agenerate_from_stream,
_generate_from_stream, generate_from_stream,
) )
from langchain.llms.base import create_base_retry_decorator from langchain.llms.base import create_base_retry_decorator
from langchain.utils import get_from_dict_or_env from langchain.utils import get_from_dict_or_env
@ -416,7 +416,7 @@ class ChatOpenAI(BaseChatModel):
stream_iter = self._stream( stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
return _generate_from_stream(stream_iter) return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop) message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs} params = {**params, **kwargs}
response = self.completion_with_retry( response = self.completion_with_retry(
@ -499,7 +499,7 @@ class ChatOpenAI(BaseChatModel):
stream_iter = self._astream( stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
return await _agenerate_from_stream(stream_iter) return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop) message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs} params = {**params, **kwargs}

View File

@ -46,7 +46,7 @@ from tenacity import (
from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import ( from langchain.chat_models.base import (
BaseChatModel, BaseChatModel,
_generate_from_stream, generate_from_stream,
) )
from langchain.utils import get_from_dict_or_env from langchain.utils import get_from_dict_or_env
@ -321,7 +321,7 @@ class ChatTongyi(BaseChatModel):
stream_iter = self._stream( stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
return _generate_from_stream(stream_iter) return generate_from_stream(stream_iter)
if not messages: if not messages:
raise ValueError("No messages provided.") raise ValueError("No messages provided.")

View File

@ -19,7 +19,7 @@ from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun, CallbackManagerForLLMRun,
) )
from langchain.chat_models.base import BaseChatModel, _generate_from_stream from langchain.chat_models.base import BaseChatModel, generate_from_stream
from langchain.llms.vertexai import _VertexAICommon, is_codey_model from langchain.llms.vertexai import _VertexAICommon, is_codey_model
from langchain.utilities.vertexai import raise_vertex_import_error from langchain.utilities.vertexai import raise_vertex_import_error
@ -172,7 +172,7 @@ class ChatVertexAI(_VertexAICommon, BaseChatModel):
stream_iter = self._stream( stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
return _generate_from_stream(stream_iter) return generate_from_stream(stream_iter)
question = _get_question(messages) question = _get_question(messages)
history = _parse_chat_history(messages[:-1]) history = _parse_chat_history(messages[:-1])

View File

@ -3,8 +3,8 @@ from langchain.chat_models.base import __all__
EXPECTED_ALL = [ EXPECTED_ALL = [
"BaseChatModel", "BaseChatModel",
"SimpleChatModel", "SimpleChatModel",
"_agenerate_from_stream", "agenerate_from_stream",
"_generate_from_stream", "generate_from_stream",
"_get_verbosity", "_get_verbosity",
] ]