From 616cdb82e9167970a84d0caedfb8bf04925b27f6 Mon Sep 17 00:00:00 2001 From: Mason Daugherty Date: Sun, 8 Feb 2026 23:53:19 -0500 Subject: [PATCH] fix formatting --- .../langchain_model_profiles/cli.py | 5 +- .../langchain_deepseek/data/_profiles.py | 7 +-- .../langchain_perplexity/data/_profiles.py | 11 +++-- .../xai/langchain_xai/data/_profiles.py | 47 ++++++++++--------- 4 files changed, 38 insertions(+), 32 deletions(-) diff --git a/libs/model-profiles/langchain_model_profiles/cli.py b/libs/model-profiles/langchain_model_profiles/cli.py index a7d4ef59622..946d9d82e5e 100644 --- a/libs/model-profiles/langchain_model_profiles/cli.py +++ b/libs/model-profiles/langchain_model_profiles/cli.py @@ -2,6 +2,7 @@ import argparse import json +import re import sys import tempfile from pathlib import Path @@ -307,7 +308,7 @@ def refresh(provider: str, data_dir: Path) -> None: # noqa: C901, PLR0915 # Write as Python module output_file = data_dir / "_profiles.py" print(f"Writing to {output_file}...") - module_content = [f'"""{MODULE_ADMONITION}"""\n', "from typing import Any\n\n"] + module_content = [f'"""{MODULE_ADMONITION}"""\n\n', "from typing import Any\n\n"] module_content.append("_PROFILES: dict[str, dict[str, Any]] = ") json_str = json.dumps(profiles, indent=4) json_str = ( @@ -315,6 +316,8 @@ def refresh(provider: str, data_dir: Path) -> None: # noqa: C901, PLR0915 .replace("false", "False") .replace("null", "None") ) + # Add trailing commas for ruff format compliance + json_str = re.sub(r"([^\s,{\[])(\n\s*[\}\]])", r"\1,\2", json_str) module_content.append(f"{json_str}\n") _write_profiles_file(output_file, "".join(module_content)) diff --git a/libs/partners/deepseek/langchain_deepseek/data/_profiles.py b/libs/partners/deepseek/langchain_deepseek/data/_profiles.py index 46fc89345e7..f24a7b99074 100644 --- a/libs/partners/deepseek/langchain_deepseek/data/_profiles.py +++ b/libs/partners/deepseek/langchain_deepseek/data/_profiles.py @@ -12,6 +12,7 @@ To update these data, refer to the instructions here: https://docs.langchain.com/oss/python/langchain/models#updating-or-overwriting-profile-data """ + from typing import Any _PROFILES: dict[str, dict[str, Any]] = { @@ -27,7 +28,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "deepseek-reasoner": { "max_input_tokens": 128000, @@ -41,6 +42,6 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": True, - "tool_calling": True - } + "tool_calling": True, + }, } diff --git a/libs/partners/perplexity/langchain_perplexity/data/_profiles.py b/libs/partners/perplexity/langchain_perplexity/data/_profiles.py index 3572429fb9a..9caac20bedb 100644 --- a/libs/partners/perplexity/langchain_perplexity/data/_profiles.py +++ b/libs/partners/perplexity/langchain_perplexity/data/_profiles.py @@ -12,6 +12,7 @@ To update these data, refer to the instructions here: https://docs.langchain.com/oss/python/langchain/models#updating-or-overwriting-profile-data """ + from typing import Any _PROFILES: dict[str, dict[str, Any]] = { @@ -27,7 +28,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": False + "tool_calling": False, }, "sonar-pro": { "max_input_tokens": 200000, @@ -41,7 +42,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": False + "tool_calling": False, }, "sonar-reasoning-pro": { "max_input_tokens": 128000, @@ -55,7 +56,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": True, - "tool_calling": False + "tool_calling": False, }, "sonar-deep-research": { "max_input_tokens": 128000, @@ -67,6 +68,6 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": True, - "tool_calling": False - } + "tool_calling": False, + }, } diff --git a/libs/partners/xai/langchain_xai/data/_profiles.py b/libs/partners/xai/langchain_xai/data/_profiles.py index f8730d1c64b..43e4d4d98b9 100644 --- a/libs/partners/xai/langchain_xai/data/_profiles.py +++ b/libs/partners/xai/langchain_xai/data/_profiles.py @@ -12,6 +12,7 @@ To update these data, refer to the instructions here: https://docs.langchain.com/oss/python/langchain/models#updating-or-overwriting-profile-data """ + from typing import Any _PROFILES: dict[str, dict[str, Any]] = { @@ -27,7 +28,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-3-fast": { "max_input_tokens": 131072, @@ -41,7 +42,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-4": { "max_input_tokens": 256000, @@ -55,7 +56,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": True, - "tool_calling": True + "tool_calling": True, }, "grok-2-vision": { "max_input_tokens": 8192, @@ -69,7 +70,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-code-fast-1": { "max_input_tokens": 256000, @@ -83,7 +84,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": True, - "tool_calling": True + "tool_calling": True, }, "grok-2": { "max_input_tokens": 131072, @@ -97,7 +98,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-3-mini-fast-latest": { "max_input_tokens": 131072, @@ -111,7 +112,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": True, - "tool_calling": True + "tool_calling": True, }, "grok-2-vision-1212": { "max_input_tokens": 8192, @@ -125,7 +126,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-3": { "max_input_tokens": 131072, @@ -139,7 +140,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-4-fast": { "max_input_tokens": 2000000, @@ -153,7 +154,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": True, - "tool_calling": True + "tool_calling": True, }, "grok-2-latest": { "max_input_tokens": 131072, @@ -167,7 +168,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-4-1-fast": { "max_input_tokens": 2000000, @@ -181,7 +182,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": True, - "tool_calling": True + "tool_calling": True, }, "grok-2-1212": { "max_input_tokens": 131072, @@ -195,7 +196,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-3-fast-latest": { "max_input_tokens": 131072, @@ -209,7 +210,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-3-latest": { "max_input_tokens": 131072, @@ -223,7 +224,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-2-vision-latest": { "max_input_tokens": 8192, @@ -237,7 +238,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-vision-beta": { "max_input_tokens": 8192, @@ -251,7 +252,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-3-mini": { "max_input_tokens": 131072, @@ -265,7 +266,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": True, - "tool_calling": True + "tool_calling": True, }, "grok-beta": { "max_input_tokens": 131072, @@ -279,7 +280,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-3-mini-latest": { "max_input_tokens": 131072, @@ -293,7 +294,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": True, - "tool_calling": True + "tool_calling": True, }, "grok-4-1-fast-non-reasoning": { "max_input_tokens": 2000000, @@ -307,7 +308,7 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": False, - "tool_calling": True + "tool_calling": True, }, "grok-3-mini-fast": { "max_input_tokens": 131072, @@ -321,6 +322,6 @@ _PROFILES: dict[str, dict[str, Any]] = { "audio_outputs": False, "video_outputs": False, "reasoning_output": True, - "tool_calling": True - } + "tool_calling": True, + }, }