mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-22 06:39:52 +00:00
anthropic: emit informative error message if there are only system messages in a prompt (#30822)
**PR message**: Not sure if I put the check at the right spot, but I thought throwing the error before the loop made sense to me. **Description:** Checks if there are only system messages using AnthropicChat model and throws an error if it's the case. Check Issue for more details **Issue:** #30764 --------- Co-authored-by: Chester Curme <chester.curme@gmail.com>
This commit is contained in:
parent
eb25d7472d
commit
e1af509966
@ -282,7 +282,6 @@ def _format_messages(
|
|||||||
"""
|
"""
|
||||||
system: Union[str, list[dict], None] = None
|
system: Union[str, list[dict], None] = None
|
||||||
formatted_messages: list[dict] = []
|
formatted_messages: list[dict] = []
|
||||||
|
|
||||||
merged_messages = _merge_messages(messages)
|
merged_messages = _merge_messages(messages)
|
||||||
for i, message in enumerate(merged_messages):
|
for i, message in enumerate(merged_messages):
|
||||||
if message.type == "system":
|
if message.type == "system":
|
||||||
@ -410,6 +409,16 @@ def _format_messages(
|
|||||||
return system, formatted_messages
|
return system, formatted_messages
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_anthropic_bad_request(e: anthropic.BadRequestError) -> None:
|
||||||
|
"""Handle Anthropic BadRequestError."""
|
||||||
|
if ("messages: at least one message is required") in e.message:
|
||||||
|
message = "Received only system message(s). "
|
||||||
|
warnings.warn(message)
|
||||||
|
raise e
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
class ChatAnthropic(BaseChatModel):
|
class ChatAnthropic(BaseChatModel):
|
||||||
"""Anthropic chat models.
|
"""Anthropic chat models.
|
||||||
|
|
||||||
@ -1111,23 +1120,26 @@ class ChatAnthropic(BaseChatModel):
|
|||||||
stream_usage = self.stream_usage
|
stream_usage = self.stream_usage
|
||||||
kwargs["stream"] = True
|
kwargs["stream"] = True
|
||||||
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
||||||
stream = self._client.messages.create(**payload)
|
try:
|
||||||
coerce_content_to_string = (
|
stream = self._client.messages.create(**payload)
|
||||||
not _tools_in_params(payload)
|
coerce_content_to_string = (
|
||||||
and not _documents_in_params(payload)
|
not _tools_in_params(payload)
|
||||||
and not _thinking_in_params(payload)
|
and not _documents_in_params(payload)
|
||||||
)
|
and not _thinking_in_params(payload)
|
||||||
for event in stream:
|
|
||||||
msg = _make_message_chunk_from_anthropic_event(
|
|
||||||
event,
|
|
||||||
stream_usage=stream_usage,
|
|
||||||
coerce_content_to_string=coerce_content_to_string,
|
|
||||||
)
|
)
|
||||||
if msg is not None:
|
for event in stream:
|
||||||
chunk = ChatGenerationChunk(message=msg)
|
msg = _make_message_chunk_from_anthropic_event(
|
||||||
if run_manager and isinstance(msg.content, str):
|
event,
|
||||||
run_manager.on_llm_new_token(msg.content, chunk=chunk)
|
stream_usage=stream_usage,
|
||||||
yield chunk
|
coerce_content_to_string=coerce_content_to_string,
|
||||||
|
)
|
||||||
|
if msg is not None:
|
||||||
|
chunk = ChatGenerationChunk(message=msg)
|
||||||
|
if run_manager and isinstance(msg.content, str):
|
||||||
|
run_manager.on_llm_new_token(msg.content, chunk=chunk)
|
||||||
|
yield chunk
|
||||||
|
except anthropic.BadRequestError as e:
|
||||||
|
_handle_anthropic_bad_request(e)
|
||||||
|
|
||||||
async def _astream(
|
async def _astream(
|
||||||
self,
|
self,
|
||||||
@ -1142,23 +1154,26 @@ class ChatAnthropic(BaseChatModel):
|
|||||||
stream_usage = self.stream_usage
|
stream_usage = self.stream_usage
|
||||||
kwargs["stream"] = True
|
kwargs["stream"] = True
|
||||||
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
||||||
stream = await self._async_client.messages.create(**payload)
|
try:
|
||||||
coerce_content_to_string = (
|
stream = await self._async_client.messages.create(**payload)
|
||||||
not _tools_in_params(payload)
|
coerce_content_to_string = (
|
||||||
and not _documents_in_params(payload)
|
not _tools_in_params(payload)
|
||||||
and not _thinking_in_params(payload)
|
and not _documents_in_params(payload)
|
||||||
)
|
and not _thinking_in_params(payload)
|
||||||
async for event in stream:
|
|
||||||
msg = _make_message_chunk_from_anthropic_event(
|
|
||||||
event,
|
|
||||||
stream_usage=stream_usage,
|
|
||||||
coerce_content_to_string=coerce_content_to_string,
|
|
||||||
)
|
)
|
||||||
if msg is not None:
|
async for event in stream:
|
||||||
chunk = ChatGenerationChunk(message=msg)
|
msg = _make_message_chunk_from_anthropic_event(
|
||||||
if run_manager and isinstance(msg.content, str):
|
event,
|
||||||
await run_manager.on_llm_new_token(msg.content, chunk=chunk)
|
stream_usage=stream_usage,
|
||||||
yield chunk
|
coerce_content_to_string=coerce_content_to_string,
|
||||||
|
)
|
||||||
|
if msg is not None:
|
||||||
|
chunk = ChatGenerationChunk(message=msg)
|
||||||
|
if run_manager and isinstance(msg.content, str):
|
||||||
|
await run_manager.on_llm_new_token(msg.content, chunk=chunk)
|
||||||
|
yield chunk
|
||||||
|
except anthropic.BadRequestError as e:
|
||||||
|
_handle_anthropic_bad_request(e)
|
||||||
|
|
||||||
def _format_output(self, data: Any, **kwargs: Any) -> ChatResult:
|
def _format_output(self, data: Any, **kwargs: Any) -> ChatResult:
|
||||||
data_dict = data.model_dump()
|
data_dict = data.model_dump()
|
||||||
@ -1218,7 +1233,10 @@ class ChatAnthropic(BaseChatModel):
|
|||||||
)
|
)
|
||||||
return generate_from_stream(stream_iter)
|
return generate_from_stream(stream_iter)
|
||||||
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
||||||
data = self._client.messages.create(**payload)
|
try:
|
||||||
|
data = self._client.messages.create(**payload)
|
||||||
|
except anthropic.BadRequestError as e:
|
||||||
|
_handle_anthropic_bad_request(e)
|
||||||
return self._format_output(data, **kwargs)
|
return self._format_output(data, **kwargs)
|
||||||
|
|
||||||
async def _agenerate(
|
async def _agenerate(
|
||||||
@ -1234,7 +1252,10 @@ class ChatAnthropic(BaseChatModel):
|
|||||||
)
|
)
|
||||||
return await agenerate_from_stream(stream_iter)
|
return await agenerate_from_stream(stream_iter)
|
||||||
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
||||||
data = await self._async_client.messages.create(**payload)
|
try:
|
||||||
|
data = await self._async_client.messages.create(**payload)
|
||||||
|
except anthropic.BadRequestError as e:
|
||||||
|
_handle_anthropic_bad_request(e)
|
||||||
return self._format_output(data, **kwargs)
|
return self._format_output(data, **kwargs)
|
||||||
|
|
||||||
def _get_llm_for_structured_output_when_thinking_is_enabled(
|
def _get_llm_for_structured_output_when_thinking_is_enabled(
|
||||||
|
Loading…
Reference in New Issue
Block a user