mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-03 18:24:10 +00:00
lint
This commit is contained in:
parent
b8fed06409
commit
61e329637b
@ -740,7 +740,7 @@ class BaseChatModelV1(BaseLanguageModel[AIMessageV1], ABC):
|
|||||||
*,
|
*,
|
||||||
tool_choice: Optional[Union[str]] = None,
|
tool_choice: Optional[Union[str]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> Runnable[LanguageModelInput, BaseMessage]:
|
) -> Runnable[LanguageModelInput, AIMessageV1]:
|
||||||
"""Bind tools to the model.
|
"""Bind tools to the model.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -9,7 +9,7 @@ from typing import Annotated, Any, Optional
|
|||||||
from pydantic import SkipValidation, ValidationError
|
from pydantic import SkipValidation, ValidationError
|
||||||
|
|
||||||
from langchain_core.exceptions import OutputParserException
|
from langchain_core.exceptions import OutputParserException
|
||||||
from langchain_core.messages import AIMessage, InvalidToolCall
|
from langchain_core.messages import AIMessage, InvalidToolCall, ToolCall
|
||||||
from langchain_core.messages.tool import invalid_tool_call
|
from langchain_core.messages.tool import invalid_tool_call
|
||||||
from langchain_core.messages.tool import tool_call as create_tool_call
|
from langchain_core.messages.tool import tool_call as create_tool_call
|
||||||
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
|
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
|
||||||
@ -26,7 +26,7 @@ def parse_tool_call(
|
|||||||
partial: bool = False,
|
partial: bool = False,
|
||||||
strict: bool = False,
|
strict: bool = False,
|
||||||
return_id: bool = True,
|
return_id: bool = True,
|
||||||
) -> Optional[dict[str, Any]]:
|
) -> Optional[ToolCall]:
|
||||||
"""Parse a single tool call.
|
"""Parse a single tool call.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -37,9 +37,9 @@ from langchain_core.callbacks import (
|
|||||||
CallbackManagerForLLMRun,
|
CallbackManagerForLLMRun,
|
||||||
)
|
)
|
||||||
from langchain_core.language_models import LanguageModelInput
|
from langchain_core.language_models import LanguageModelInput
|
||||||
from langchain_core.language_models.chat_models import (
|
from langchain_core.language_models.chat_models import LangSmithParams
|
||||||
BaseChatModel,
|
from langchain_core.language_models.v1.chat_models import (
|
||||||
LangSmithParams,
|
BaseChatModelV1,
|
||||||
agenerate_from_stream,
|
agenerate_from_stream,
|
||||||
generate_from_stream,
|
generate_from_stream,
|
||||||
)
|
)
|
||||||
@ -103,6 +103,7 @@ from langchain_openai.chat_models._compat import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
from langchain_core.messages import content_blocks as types
|
||||||
from openai.types.responses import Response
|
from openai.types.responses import Response
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -138,13 +139,17 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> MessageV1:
|
|||||||
elif role == "assistant":
|
elif role == "assistant":
|
||||||
# Fix for azure
|
# Fix for azure
|
||||||
# Also OpenAI returns None for tool invocations
|
# Also OpenAI returns None for tool invocations
|
||||||
content = [{"type": "text", "text": _dict.get("content", "") or ""}]
|
content: list[types.ContentBlock] = [
|
||||||
|
{"type": "text", "text": _dict.get("content", "") or ""}
|
||||||
|
]
|
||||||
tool_calls = []
|
tool_calls = []
|
||||||
invalid_tool_calls = []
|
invalid_tool_calls = []
|
||||||
if raw_tool_calls := _dict.get("tool_calls"):
|
if raw_tool_calls := _dict.get("tool_calls"):
|
||||||
for raw_tool_call in raw_tool_calls:
|
for raw_tool_call in raw_tool_calls:
|
||||||
try:
|
try:
|
||||||
tool_calls.append(parse_tool_call(raw_tool_call, return_id=True))
|
tool_call = parse_tool_call(raw_tool_call, return_id=True)
|
||||||
|
if tool_call:
|
||||||
|
tool_calls.append(tool_call)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
invalid_tool_calls.append(
|
invalid_tool_calls.append(
|
||||||
make_invalid_tool_call(raw_tool_call, str(e))
|
make_invalid_tool_call(raw_tool_call, str(e))
|
||||||
@ -152,7 +157,9 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> MessageV1:
|
|||||||
content.extend(tool_calls)
|
content.extend(tool_calls)
|
||||||
if audio := _dict.get("audio"):
|
if audio := _dict.get("audio"):
|
||||||
# TODO: populate standard fields
|
# TODO: populate standard fields
|
||||||
content.append({"type": "audio", "audio": audio})
|
content.append(
|
||||||
|
cast(types.AudioContentBlock, {"type": "audio", "audio": audio})
|
||||||
|
)
|
||||||
return AIMessageV1(
|
return AIMessageV1(
|
||||||
content=content,
|
content=content,
|
||||||
name=name,
|
name=name,
|
||||||
@ -368,7 +375,7 @@ class _AllReturnType(TypedDict):
|
|||||||
parsing_error: Optional[BaseException]
|
parsing_error: Optional[BaseException]
|
||||||
|
|
||||||
|
|
||||||
class BaseChatOpenAIV1(BaseChatModel):
|
class BaseChatOpenAIV1(BaseChatModelV1):
|
||||||
client: Any = Field(default=None, exclude=True) #: :meta private:
|
client: Any = Field(default=None, exclude=True) #: :meta private:
|
||||||
async_client: Any = Field(default=None, exclude=True) #: :meta private:
|
async_client: Any = Field(default=None, exclude=True) #: :meta private:
|
||||||
root_client: Any = Field(default=None, exclude=True) #: :meta private:
|
root_client: Any = Field(default=None, exclude=True) #: :meta private:
|
||||||
@ -822,7 +829,7 @@ class BaseChatOpenAIV1(BaseChatModel):
|
|||||||
if generation_chunk:
|
if generation_chunk:
|
||||||
if run_manager:
|
if run_manager:
|
||||||
run_manager.on_llm_new_token(
|
run_manager.on_llm_new_token(
|
||||||
generation_chunk.text, chunk=generation_chunk
|
generation_chunk.text or "", chunk=generation_chunk
|
||||||
)
|
)
|
||||||
is_first_chunk = False
|
is_first_chunk = False
|
||||||
yield generation_chunk
|
yield generation_chunk
|
||||||
@ -873,7 +880,7 @@ class BaseChatOpenAIV1(BaseChatModel):
|
|||||||
if generation_chunk:
|
if generation_chunk:
|
||||||
if run_manager:
|
if run_manager:
|
||||||
await run_manager.on_llm_new_token(
|
await run_manager.on_llm_new_token(
|
||||||
generation_chunk.text, chunk=generation_chunk
|
generation_chunk.text or "", chunk=generation_chunk
|
||||||
)
|
)
|
||||||
is_first_chunk = False
|
is_first_chunk = False
|
||||||
yield generation_chunk
|
yield generation_chunk
|
||||||
@ -944,7 +951,9 @@ class BaseChatOpenAIV1(BaseChatModel):
|
|||||||
logprobs = message_chunk.response_metadata.get("logprobs")
|
logprobs = message_chunk.response_metadata.get("logprobs")
|
||||||
if run_manager:
|
if run_manager:
|
||||||
run_manager.on_llm_new_token(
|
run_manager.on_llm_new_token(
|
||||||
message_chunk.text, chunk=message_chunk, logprobs=logprobs
|
message_chunk.text or "",
|
||||||
|
chunk=message_chunk,
|
||||||
|
logprobs=logprobs,
|
||||||
)
|
)
|
||||||
is_first_chunk = False
|
is_first_chunk = False
|
||||||
yield message_chunk
|
yield message_chunk
|
||||||
@ -954,7 +963,9 @@ class BaseChatOpenAIV1(BaseChatModel):
|
|||||||
final_completion = response.get_final_completion()
|
final_completion = response.get_final_completion()
|
||||||
message_chunk = self._get_message_chunk_from_completion(final_completion)
|
message_chunk = self._get_message_chunk_from_completion(final_completion)
|
||||||
if run_manager:
|
if run_manager:
|
||||||
run_manager.on_llm_new_token(message_chunk.text, chunk=message_chunk)
|
run_manager.on_llm_new_token(
|
||||||
|
message_chunk.text or "", chunk=message_chunk
|
||||||
|
)
|
||||||
yield message_chunk
|
yield message_chunk
|
||||||
|
|
||||||
def _generate(
|
def _generate(
|
||||||
@ -1029,7 +1040,7 @@ class BaseChatOpenAIV1(BaseChatModel):
|
|||||||
stop: Optional[list[str]] = None,
|
stop: Optional[list[str]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> dict:
|
) -> dict:
|
||||||
messages = self._convert_input(input_).to_messages(output_version="v1")
|
messages = self._convert_input(input_)
|
||||||
if stop is not None:
|
if stop is not None:
|
||||||
kwargs["stop"] = stop
|
kwargs["stop"] = stop
|
||||||
|
|
||||||
@ -1168,7 +1179,9 @@ class BaseChatOpenAIV1(BaseChatModel):
|
|||||||
logprobs = message_chunk.response_metadata.get("logprobs")
|
logprobs = message_chunk.response_metadata.get("logprobs")
|
||||||
if run_manager:
|
if run_manager:
|
||||||
await run_manager.on_llm_new_token(
|
await run_manager.on_llm_new_token(
|
||||||
message_chunk.text, chunk=message_chunk, logprobs=logprobs
|
message_chunk.text or "",
|
||||||
|
chunk=message_chunk,
|
||||||
|
logprobs=logprobs,
|
||||||
)
|
)
|
||||||
is_first_chunk = False
|
is_first_chunk = False
|
||||||
yield message_chunk
|
yield message_chunk
|
||||||
@ -1179,7 +1192,7 @@ class BaseChatOpenAIV1(BaseChatModel):
|
|||||||
message_chunk = self._get_message_chunk_from_completion(final_completion)
|
message_chunk = self._get_message_chunk_from_completion(final_completion)
|
||||||
if run_manager:
|
if run_manager:
|
||||||
await run_manager.on_llm_new_token(
|
await run_manager.on_llm_new_token(
|
||||||
message_chunk.text, chunk=message_chunk
|
message_chunk.text or "", chunk=message_chunk
|
||||||
)
|
)
|
||||||
yield message_chunk
|
yield message_chunk
|
||||||
|
|
||||||
@ -1420,7 +1433,7 @@ class BaseChatOpenAIV1(BaseChatModel):
|
|||||||
strict: Optional[bool] = None,
|
strict: Optional[bool] = None,
|
||||||
parallel_tool_calls: Optional[bool] = None,
|
parallel_tool_calls: Optional[bool] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> Runnable[LanguageModelInput, MessageV1]:
|
) -> Runnable[LanguageModelInput, AIMessageV1]:
|
||||||
"""Bind tool-like objects to this chat model.
|
"""Bind tool-like objects to this chat model.
|
||||||
|
|
||||||
Assumes model is compatible with OpenAI tool-calling API.
|
Assumes model is compatible with OpenAI tool-calling API.
|
||||||
@ -1614,7 +1627,7 @@ class BaseChatOpenAIV1(BaseChatModel):
|
|||||||
kwargs: Additional keyword args are passed through to the model.
|
kwargs: Additional keyword args are passed through to the model.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.
|
A Runnable that takes same inputs as a :class:`from langchain_core.language_models.v1.chat_models import BaseChatModelV1`.
|
||||||
|
|
||||||
| If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
|
| If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
|
||||||
|
|
||||||
@ -2629,7 +2642,7 @@ class ChatOpenAI(BaseChatOpenAIV1): # type: ignore[override]
|
|||||||
kwargs: Additional keyword args are passed through to the model.
|
kwargs: Additional keyword args are passed through to the model.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.
|
A Runnable that takes same inputs as a :class:`from langchain_core.language_models.v1.chat_models import BaseChatModelV1`.
|
||||||
|
|
||||||
| If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
|
| If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user