mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-25 21:37:20 +00:00
core: Add ruff rules PLR (#30696)
Add ruff rules [PLR](https://docs.astral.sh/ruff/rules/#refactor-plr) Except PLR09xxx and PLR2004. Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
This commit is contained in:
committed by
GitHub
parent
68361f9c2d
commit
4cc7bc6c93
@@ -53,11 +53,10 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
"""
|
||||
if "name" in kwargs:
|
||||
name = kwargs["name"]
|
||||
elif serialized:
|
||||
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
|
||||
else:
|
||||
if serialized:
|
||||
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
|
||||
else:
|
||||
name = "<unknown>"
|
||||
name = "<unknown>"
|
||||
print_text(
|
||||
f"\n\n\033[1m> Entering new {name} chain...\033[0m",
|
||||
end="\n",
|
||||
|
@@ -364,19 +364,16 @@ async def _ahandle_event_for_handler(
|
||||
event = getattr(handler, event_name)
|
||||
if asyncio.iscoroutinefunction(event):
|
||||
await event(*args, **kwargs)
|
||||
elif handler.run_inline:
|
||||
event(*args, **kwargs)
|
||||
else:
|
||||
if handler.run_inline:
|
||||
event(*args, **kwargs)
|
||||
else:
|
||||
await asyncio.get_event_loop().run_in_executor(
|
||||
None,
|
||||
cast(
|
||||
"Callable",
|
||||
functools.partial(
|
||||
copy_context().run, event, *args, **kwargs
|
||||
),
|
||||
),
|
||||
)
|
||||
await asyncio.get_event_loop().run_in_executor(
|
||||
None,
|
||||
cast(
|
||||
"Callable",
|
||||
functools.partial(copy_context().run, event, *args, **kwargs),
|
||||
),
|
||||
)
|
||||
except NotImplementedError as e:
|
||||
if event_name == "on_chat_model_start":
|
||||
message_strings = [get_buffer_string(m) for m in args[1]]
|
||||
@@ -2426,12 +2423,11 @@ def _configure(
|
||||
for handler in callback_manager.handlers
|
||||
):
|
||||
callback_manager.add_handler(var_handler, inheritable)
|
||||
else:
|
||||
if not any(
|
||||
isinstance(handler, handler_class)
|
||||
for handler in callback_manager.handlers
|
||||
):
|
||||
callback_manager.add_handler(var_handler, inheritable)
|
||||
elif not any(
|
||||
isinstance(handler, handler_class)
|
||||
for handler in callback_manager.handlers
|
||||
):
|
||||
callback_manager.add_handler(var_handler, inheritable)
|
||||
return callback_manager
|
||||
|
||||
|
||||
|
@@ -37,11 +37,10 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""
|
||||
if "name" in kwargs:
|
||||
name = kwargs["name"]
|
||||
elif serialized:
|
||||
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
|
||||
else:
|
||||
if serialized:
|
||||
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
|
||||
else:
|
||||
name = "<unknown>"
|
||||
name = "<unknown>"
|
||||
print(f"\n\n\033[1m> Entering new {name} chain...\033[0m") # noqa: T201
|
||||
|
||||
@override
|
||||
|
@@ -316,7 +316,7 @@ def index(
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
if (cleanup == "incremental" or cleanup == "scoped_full") and source_id_key is None:
|
||||
if (cleanup in {"incremental", "scoped_full"}) and source_id_key is None:
|
||||
msg = (
|
||||
"Source id key is required when cleanup mode is incremental or scoped_full."
|
||||
)
|
||||
@@ -379,7 +379,7 @@ def index(
|
||||
source_id_assigner(doc) for doc in hashed_docs
|
||||
]
|
||||
|
||||
if cleanup == "incremental" or cleanup == "scoped_full":
|
||||
if cleanup in {"incremental", "scoped_full"}:
|
||||
# source ids are required.
|
||||
for source_id, hashed_doc in zip(source_ids, hashed_docs):
|
||||
if source_id is None:
|
||||
@@ -622,7 +622,7 @@ async def aindex(
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
if (cleanup == "incremental" or cleanup == "scoped_full") and source_id_key is None:
|
||||
if (cleanup in {"incremental", "scoped_full"}) and source_id_key is None:
|
||||
msg = (
|
||||
"Source id key is required when cleanup mode is incremental or scoped_full."
|
||||
)
|
||||
@@ -667,11 +667,10 @@ async def aindex(
|
||||
# In such a case, we use the load method and convert it to an async
|
||||
# iterator.
|
||||
async_doc_iterator = _to_async_iterator(docs_source.load())
|
||||
elif hasattr(docs_source, "__aiter__"):
|
||||
async_doc_iterator = docs_source # type: ignore[assignment]
|
||||
else:
|
||||
if hasattr(docs_source, "__aiter__"):
|
||||
async_doc_iterator = docs_source # type: ignore[assignment]
|
||||
else:
|
||||
async_doc_iterator = _to_async_iterator(docs_source)
|
||||
async_doc_iterator = _to_async_iterator(docs_source)
|
||||
|
||||
source_id_assigner = _get_source_id_assigner(source_id_key)
|
||||
|
||||
@@ -694,7 +693,7 @@ async def aindex(
|
||||
source_id_assigner(doc) for doc in hashed_docs
|
||||
]
|
||||
|
||||
if cleanup == "incremental" or cleanup == "scoped_full":
|
||||
if cleanup in {"incremental", "scoped_full"}:
|
||||
# If the cleanup mode is incremental, source ids are required.
|
||||
for source_id, hashed_doc in zip(source_ids, hashed_docs):
|
||||
if source_id is None:
|
||||
|
@@ -955,13 +955,12 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
)
|
||||
chunks.append(chunk)
|
||||
result = generate_from_stream(iter(chunks))
|
||||
elif inspect.signature(self._generate).parameters.get("run_manager"):
|
||||
result = self._generate(
|
||||
messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
else:
|
||||
if inspect.signature(self._generate).parameters.get("run_manager"):
|
||||
result = self._generate(
|
||||
messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
else:
|
||||
result = self._generate(messages, stop=stop, **kwargs)
|
||||
result = self._generate(messages, stop=stop, **kwargs)
|
||||
|
||||
# Add response metadata to each generation
|
||||
for idx, generation in enumerate(result.generations):
|
||||
@@ -1028,13 +1027,12 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
)
|
||||
chunks.append(chunk)
|
||||
result = generate_from_stream(iter(chunks))
|
||||
elif inspect.signature(self._agenerate).parameters.get("run_manager"):
|
||||
result = await self._agenerate(
|
||||
messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
else:
|
||||
if inspect.signature(self._agenerate).parameters.get("run_manager"):
|
||||
result = await self._agenerate(
|
||||
messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
else:
|
||||
result = await self._agenerate(messages, stop=stop, **kwargs)
|
||||
result = await self._agenerate(messages, stop=stop, **kwargs)
|
||||
|
||||
# Add response metadata to each generation
|
||||
for idx, generation in enumerate(result.generations):
|
||||
|
@@ -170,17 +170,16 @@ def merge_content(
|
||||
# If both are lists
|
||||
merged = merge_lists(cast("list", merged), content) # type: ignore
|
||||
# If the first content is a list, and the second content is a string
|
||||
# If the last element of the first content is a string
|
||||
# Add the second content to the last element
|
||||
elif merged and isinstance(merged[-1], str):
|
||||
merged[-1] += content
|
||||
# If second content is an empty string, treat as a no-op
|
||||
elif content == "":
|
||||
pass
|
||||
else:
|
||||
# If the last element of the first content is a string
|
||||
# Add the second content to the last element
|
||||
if merged and isinstance(merged[-1], str):
|
||||
merged[-1] += content
|
||||
# If second content is an empty string, treat as a no-op
|
||||
elif content == "":
|
||||
pass
|
||||
else:
|
||||
# Otherwise, add the second content as a new element of the list
|
||||
merged.append(content)
|
||||
# Otherwise, add the second content as a new element of the list
|
||||
merged.append(content)
|
||||
return merged
|
||||
|
||||
|
||||
|
@@ -1030,231 +1030,222 @@ def convert_to_openai_messages(
|
||||
content = message.content
|
||||
else:
|
||||
content = [{"type": "text", "text": message.content}]
|
||||
else:
|
||||
if text_format == "string" and all(
|
||||
isinstance(block, str) or block.get("type") == "text"
|
||||
elif text_format == "string" and all(
|
||||
isinstance(block, str) or block.get("type") == "text"
|
||||
for block in message.content
|
||||
):
|
||||
content = "\n".join(
|
||||
block if isinstance(block, str) else block["text"]
|
||||
for block in message.content
|
||||
):
|
||||
content = "\n".join(
|
||||
block if isinstance(block, str) else block["text"]
|
||||
for block in message.content
|
||||
)
|
||||
else:
|
||||
content = []
|
||||
for j, block in enumerate(message.content):
|
||||
# OpenAI format
|
||||
if isinstance(block, str):
|
||||
content.append({"type": "text", "text": block})
|
||||
elif block.get("type") == "text":
|
||||
if missing := [k for k in ("text",) if k not in block]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'text' "
|
||||
f"but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
content.append({"type": block["type"], "text": block["text"]})
|
||||
elif block.get("type") == "image_url":
|
||||
if missing := [k for k in ("image_url",) if k not in block]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'image_url' "
|
||||
f"but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": block["image_url"],
|
||||
}
|
||||
)
|
||||
else:
|
||||
content = []
|
||||
for j, block in enumerate(message.content):
|
||||
# OpenAI format
|
||||
if isinstance(block, str):
|
||||
content.append({"type": "text", "text": block})
|
||||
elif block.get("type") == "text":
|
||||
if missing := [k for k in ("text",) if k not in block]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'text' "
|
||||
f"but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
# Anthropic and Bedrock converse format
|
||||
elif (block.get("type") == "image") or "image" in block:
|
||||
# Anthropic
|
||||
if source := block.get("source"):
|
||||
if missing := [
|
||||
k
|
||||
for k in ("media_type", "type", "data")
|
||||
if k not in source
|
||||
]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'image' "
|
||||
f"but 'source' is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (
|
||||
f"data:{source['media_type']};"
|
||||
f"{source['type']},{source['data']}"
|
||||
)
|
||||
},
|
||||
}
|
||||
)
|
||||
# Bedrock converse
|
||||
elif image := block.get("image"):
|
||||
if missing := [
|
||||
k for k in ("source", "format") if k not in image
|
||||
]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has key 'image', "
|
||||
f"but 'image' is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
b64_image = _bytes_to_b64_str(image["source"]["bytes"])
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (
|
||||
f"data:image/{image['format']};"
|
||||
f"base64,{b64_image}"
|
||||
)
|
||||
},
|
||||
}
|
||||
)
|
||||
else:
|
||||
raise ValueError(err)
|
||||
content.append({"type": block["type"], "text": block["text"]})
|
||||
elif block.get("type") == "image_url":
|
||||
if missing := [k for k in ("image_url",) if k not in block]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'image_url' "
|
||||
f"but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": block["image_url"],
|
||||
}
|
||||
)
|
||||
# Anthropic and Bedrock converse format
|
||||
elif (block.get("type") == "image") or "image" in block:
|
||||
# Anthropic
|
||||
if source := block.get("source"):
|
||||
if missing := [
|
||||
k for k in ("media_type", "type", "data") if k not in source
|
||||
]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'image' "
|
||||
f"but does not have a 'source' or 'image' key. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
elif block.get("type") == "tool_use":
|
||||
if missing := [
|
||||
k for k in ("id", "name", "input") if k not in block
|
||||
]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': "
|
||||
f"'tool_use', but is missing expected key(s) "
|
||||
f"but 'source' is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
if not any(
|
||||
tool_call["id"] == block["id"]
|
||||
for tool_call in cast("AIMessage", message).tool_calls
|
||||
):
|
||||
oai_msg["tool_calls"] = oai_msg.get("tool_calls", [])
|
||||
oai_msg["tool_calls"].append(
|
||||
{
|
||||
"type": "function",
|
||||
"id": block["id"],
|
||||
"function": {
|
||||
"name": block["name"],
|
||||
"arguments": json.dumps(block["input"]),
|
||||
},
|
||||
}
|
||||
)
|
||||
elif block.get("type") == "tool_result":
|
||||
if missing := [
|
||||
k for k in ("content", "tool_use_id") if k not in block
|
||||
]:
|
||||
msg = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': "
|
||||
f"'tool_result', but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
tool_message = ToolMessage(
|
||||
block["content"],
|
||||
tool_call_id=block["tool_use_id"],
|
||||
status="error" if block.get("is_error") else "success",
|
||||
)
|
||||
# Recurse to make sure tool message contents are OpenAI format.
|
||||
tool_messages.extend(
|
||||
convert_to_openai_messages(
|
||||
[tool_message], text_format=text_format
|
||||
)
|
||||
)
|
||||
elif (block.get("type") == "json") or "json" in block:
|
||||
if "json" not in block:
|
||||
msg = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'json' "
|
||||
f"but does not have a 'json' key. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
content.append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": json.dumps(block["json"]),
|
||||
}
|
||||
)
|
||||
elif (
|
||||
block.get("type") == "guard_content"
|
||||
) or "guard_content" in block:
|
||||
if (
|
||||
"guard_content" not in block
|
||||
or "text" not in block["guard_content"]
|
||||
):
|
||||
msg = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': "
|
||||
f"'guard_content' but does not have a "
|
||||
f"messages[{i}].content[{j}]['guard_content']['text'] "
|
||||
f"key. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
text = block["guard_content"]["text"]
|
||||
if isinstance(text, dict):
|
||||
text = text["text"]
|
||||
content.append({"type": "text", "text": text})
|
||||
# VertexAI format
|
||||
elif block.get("type") == "media":
|
||||
if missing := [
|
||||
k for k in ("mime_type", "data") if k not in block
|
||||
]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': "
|
||||
f"'media' but does not have key(s) {missing}. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
if "image" not in block["mime_type"]:
|
||||
err = (
|
||||
f"OpenAI messages can only support text and image data."
|
||||
f" Received content block with media of type:"
|
||||
f" {block['mime_type']}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
b64_image = _bytes_to_b64_str(block["data"])
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (
|
||||
f"data:{block['mime_type']};base64,{b64_image}"
|
||||
f"data:{source['media_type']};"
|
||||
f"{source['type']},{source['data']}"
|
||||
)
|
||||
},
|
||||
}
|
||||
)
|
||||
# Bedrock converse
|
||||
elif image := block.get("image"):
|
||||
if missing := [
|
||||
k for k in ("source", "format") if k not in image
|
||||
]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has key 'image', "
|
||||
f"but 'image' is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
b64_image = _bytes_to_b64_str(image["source"]["bytes"])
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (
|
||||
f"data:image/{image['format']};"
|
||||
f"base64,{b64_image}"
|
||||
)
|
||||
},
|
||||
}
|
||||
)
|
||||
elif block.get("type") == "thinking":
|
||||
content.append(block)
|
||||
else:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] does not match OpenAI, "
|
||||
f"Anthropic, Bedrock Converse, or VertexAI format. Full "
|
||||
f"messages[{i}].content[{j}] has 'type': 'image' "
|
||||
f"but does not have a 'source' or 'image' key. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
if text_format == "string" and not any(
|
||||
block["type"] != "text" for block in content
|
||||
):
|
||||
content = "\n".join(block["text"] for block in content)
|
||||
elif block.get("type") == "tool_use":
|
||||
if missing := [
|
||||
k for k in ("id", "name", "input") if k not in block
|
||||
]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': "
|
||||
f"'tool_use', but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
if not any(
|
||||
tool_call["id"] == block["id"]
|
||||
for tool_call in cast("AIMessage", message).tool_calls
|
||||
):
|
||||
oai_msg["tool_calls"] = oai_msg.get("tool_calls", [])
|
||||
oai_msg["tool_calls"].append(
|
||||
{
|
||||
"type": "function",
|
||||
"id": block["id"],
|
||||
"function": {
|
||||
"name": block["name"],
|
||||
"arguments": json.dumps(block["input"]),
|
||||
},
|
||||
}
|
||||
)
|
||||
elif block.get("type") == "tool_result":
|
||||
if missing := [
|
||||
k for k in ("content", "tool_use_id") if k not in block
|
||||
]:
|
||||
msg = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': "
|
||||
f"'tool_result', but is missing expected key(s) "
|
||||
f"{missing}. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
tool_message = ToolMessage(
|
||||
block["content"],
|
||||
tool_call_id=block["tool_use_id"],
|
||||
status="error" if block.get("is_error") else "success",
|
||||
)
|
||||
# Recurse to make sure tool message contents are OpenAI format.
|
||||
tool_messages.extend(
|
||||
convert_to_openai_messages(
|
||||
[tool_message], text_format=text_format
|
||||
)
|
||||
)
|
||||
elif (block.get("type") == "json") or "json" in block:
|
||||
if "json" not in block:
|
||||
msg = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': 'json' "
|
||||
f"but does not have a 'json' key. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
content.append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": json.dumps(block["json"]),
|
||||
}
|
||||
)
|
||||
elif (block.get("type") == "guard_content") or "guard_content" in block:
|
||||
if (
|
||||
"guard_content" not in block
|
||||
or "text" not in block["guard_content"]
|
||||
):
|
||||
msg = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': "
|
||||
f"'guard_content' but does not have a "
|
||||
f"messages[{i}].content[{j}]['guard_content']['text'] "
|
||||
f"key. Full content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
text = block["guard_content"]["text"]
|
||||
if isinstance(text, dict):
|
||||
text = text["text"]
|
||||
content.append({"type": "text", "text": text})
|
||||
# VertexAI format
|
||||
elif block.get("type") == "media":
|
||||
if missing := [k for k in ("mime_type", "data") if k not in block]:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] has 'type': "
|
||||
f"'media' but does not have key(s) {missing}. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
if "image" not in block["mime_type"]:
|
||||
err = (
|
||||
f"OpenAI messages can only support text and image data."
|
||||
f" Received content block with media of type:"
|
||||
f" {block['mime_type']}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
b64_image = _bytes_to_b64_str(block["data"])
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (f"data:{block['mime_type']};base64,{b64_image}")
|
||||
},
|
||||
}
|
||||
)
|
||||
elif block.get("type") == "thinking":
|
||||
content.append(block)
|
||||
else:
|
||||
err = (
|
||||
f"Unrecognized content block at "
|
||||
f"messages[{i}].content[{j}] does not match OpenAI, "
|
||||
f"Anthropic, Bedrock Converse, or VertexAI format. Full "
|
||||
f"content block:\n\n{block}"
|
||||
)
|
||||
raise ValueError(err)
|
||||
if text_format == "string" and not any(
|
||||
block["type"] != "text" for block in content
|
||||
):
|
||||
content = "\n".join(block["text"] for block in content)
|
||||
oai_msg["content"] = content
|
||||
if message.content and not oai_msg["content"] and tool_messages:
|
||||
oai_messages.extend(tool_messages)
|
||||
|
@@ -118,26 +118,23 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
||||
}
|
||||
except json.JSONDecodeError:
|
||||
return None
|
||||
elif self.args_only:
|
||||
try:
|
||||
return json.loads(function_call["arguments"], strict=self.strict)
|
||||
except (json.JSONDecodeError, TypeError) as exc:
|
||||
msg = f"Could not parse function call data: {exc}"
|
||||
raise OutputParserException(msg) from exc
|
||||
else:
|
||||
if self.args_only:
|
||||
try:
|
||||
return json.loads(
|
||||
try:
|
||||
return {
|
||||
**function_call,
|
||||
"arguments": json.loads(
|
||||
function_call["arguments"], strict=self.strict
|
||||
)
|
||||
except (json.JSONDecodeError, TypeError) as exc:
|
||||
msg = f"Could not parse function call data: {exc}"
|
||||
raise OutputParserException(msg) from exc
|
||||
else:
|
||||
try:
|
||||
return {
|
||||
**function_call,
|
||||
"arguments": json.loads(
|
||||
function_call["arguments"], strict=self.strict
|
||||
),
|
||||
}
|
||||
except (json.JSONDecodeError, TypeError) as exc:
|
||||
msg = f"Could not parse function call data: {exc}"
|
||||
raise OutputParserException(msg) from exc
|
||||
),
|
||||
}
|
||||
except (json.JSONDecodeError, TypeError) as exc:
|
||||
msg = f"Could not parse function call data: {exc}"
|
||||
raise OutputParserException(msg) from exc
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
@@ -9,10 +9,9 @@ from typing import Any, Callable, Literal
|
||||
|
||||
from pydantic import BaseModel, create_model
|
||||
|
||||
import langchain_core.utils.mustache as mustache
|
||||
from langchain_core.prompt_values import PromptValue, StringPromptValue
|
||||
from langchain_core.prompts.base import BasePromptTemplate
|
||||
from langchain_core.utils import get_colored_text
|
||||
from langchain_core.utils import get_colored_text, mustache
|
||||
from langchain_core.utils.formatting import formatter
|
||||
from langchain_core.utils.interactive_env import is_interactive_env
|
||||
|
||||
|
@@ -351,9 +351,7 @@ class Graph:
|
||||
"""
|
||||
self.nodes.pop(node.id)
|
||||
self.edges = [
|
||||
edge
|
||||
for edge in self.edges
|
||||
if edge.source != node.id and edge.target != node.id
|
||||
edge for edge in self.edges if node.id not in (edge.source, edge.target)
|
||||
]
|
||||
|
||||
def add_edge(
|
||||
|
@@ -401,7 +401,7 @@ def _render_mermaid_using_api(
|
||||
f"?type={file_type}&bgColor={background_color}"
|
||||
)
|
||||
response = requests.get(image_url, timeout=10)
|
||||
if response.status_code == 200:
|
||||
if response.status_code == requests.codes.ok:
|
||||
img_bytes = response.content
|
||||
if output_file_path is not None:
|
||||
Path(output_file_path).write_bytes(response.content)
|
||||
|
@@ -79,9 +79,8 @@ class RootListenersTracer(BaseTracer):
|
||||
if run.error is None:
|
||||
if self._arg_on_end is not None:
|
||||
call_func_with_variable_args(self._arg_on_end, run, self.config)
|
||||
else:
|
||||
if self._arg_on_error is not None:
|
||||
call_func_with_variable_args(self._arg_on_error, run, self.config)
|
||||
elif self._arg_on_error is not None:
|
||||
call_func_with_variable_args(self._arg_on_error, run, self.config)
|
||||
|
||||
|
||||
class AsyncRootListenersTracer(AsyncBaseTracer):
|
||||
@@ -143,8 +142,5 @@ class AsyncRootListenersTracer(AsyncBaseTracer):
|
||||
if run.error is None:
|
||||
if self._arg_on_end is not None:
|
||||
await acall_func_with_variable_args(self._arg_on_end, run, self.config)
|
||||
else:
|
||||
if self._arg_on_error is not None:
|
||||
await acall_func_with_variable_args(
|
||||
self._arg_on_error, run, self.config
|
||||
)
|
||||
elif self._arg_on_error is not None:
|
||||
await acall_func_with_variable_args(self._arg_on_error, run, self.config)
|
||||
|
@@ -7,6 +7,8 @@ from langchain_core.tracers.base import BaseTracer
|
||||
from langchain_core.tracers.schemas import Run
|
||||
from langchain_core.utils.input import get_bolded_text, get_colored_text
|
||||
|
||||
MILLISECONDS_IN_SECOND = 1000
|
||||
|
||||
|
||||
def try_json_stringify(obj: Any, fallback: str) -> str:
|
||||
"""Try to stringify an object to JSON.
|
||||
@@ -36,10 +38,10 @@ def elapsed(run: Any) -> str:
|
||||
|
||||
"""
|
||||
elapsed_time = run.end_time - run.start_time
|
||||
milliseconds = elapsed_time.total_seconds() * 1000
|
||||
if milliseconds < 1000:
|
||||
return f"{milliseconds:.0f}ms"
|
||||
return f"{(milliseconds / 1000):.2f}s"
|
||||
seconds = elapsed_time.total_seconds()
|
||||
if seconds < 1:
|
||||
return f"{seconds * MILLISECONDS_IN_SECOND:.0f}ms"
|
||||
return f"{seconds:.2f}s"
|
||||
|
||||
|
||||
class FunctionCallbackHandler(BaseTracer):
|
||||
|
@@ -85,7 +85,7 @@ def extract_sub_links(
|
||||
try:
|
||||
parsed_link = urlparse(link)
|
||||
# Some may be absolute links like https://to/path
|
||||
if parsed_link.scheme == "http" or parsed_link.scheme == "https":
|
||||
if parsed_link.scheme in {"http", "https"}:
|
||||
absolute_path = link
|
||||
# Some may have omitted the protocol like //to/path
|
||||
elif link.startswith("//"):
|
||||
|
@@ -78,20 +78,19 @@ def parse_partial_json(s: str, *, strict: bool = False) -> Any:
|
||||
escaped = not escaped
|
||||
else:
|
||||
escaped = False
|
||||
else:
|
||||
if char == '"':
|
||||
is_inside_string = True
|
||||
escaped = False
|
||||
elif char == "{":
|
||||
stack.append("}")
|
||||
elif char == "[":
|
||||
stack.append("]")
|
||||
elif char == "}" or char == "]":
|
||||
if stack and stack[-1] == char:
|
||||
stack.pop()
|
||||
else:
|
||||
# Mismatched closing character; the input is malformed.
|
||||
return None
|
||||
elif char == '"':
|
||||
is_inside_string = True
|
||||
escaped = False
|
||||
elif char == "{":
|
||||
stack.append("}")
|
||||
elif char == "[":
|
||||
stack.append("]")
|
||||
elif char in {"}", "]"}:
|
||||
if stack and stack[-1] == char:
|
||||
stack.pop()
|
||||
else:
|
||||
# Mismatched closing character; the input is malformed.
|
||||
return None
|
||||
|
||||
# Append the processed character to the new string.
|
||||
new_chars.append(new_char)
|
||||
|
@@ -459,12 +459,11 @@ def render(
|
||||
# Then we don't need to tokenize it
|
||||
# But it does need to be a generator
|
||||
tokens: Iterator[tuple[str, str]] = (token for token in template)
|
||||
elif template in g_token_cache:
|
||||
tokens = (token for token in g_token_cache[template])
|
||||
else:
|
||||
if template in g_token_cache:
|
||||
tokens = (token for token in g_token_cache[template])
|
||||
else:
|
||||
# Otherwise make a generator
|
||||
tokens = tokenize(template, def_ldel, def_rdel)
|
||||
# Otherwise make a generator
|
||||
tokens = tokenize(template, def_ldel, def_rdel)
|
||||
|
||||
output = ""
|
||||
|
||||
|
@@ -103,7 +103,7 @@ ignore = [
|
||||
"FBT001",
|
||||
"FBT002",
|
||||
"PGH003",
|
||||
"PLR",
|
||||
"PLR2004",
|
||||
"RUF",
|
||||
"SLF",
|
||||
]
|
||||
|
@@ -112,11 +112,8 @@ def pytest_collection_modifyitems(
|
||||
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
|
||||
)
|
||||
break
|
||||
else:
|
||||
if only_extended:
|
||||
item.add_marker(
|
||||
pytest.mark.skip(reason="Skipping not an extended test.")
|
||||
)
|
||||
elif only_extended:
|
||||
item.add_marker(pytest.mark.skip(reason="Skipping not an extended test."))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@@ -1,4 +1,4 @@
|
||||
import langchain_core.tracers.schemas as schemas
|
||||
from langchain_core.tracers import schemas
|
||||
from langchain_core.tracers.schemas import __all__ as schemas_all
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user