style(core,langchain-classic,openai): fix griffe warnings (#34074)

This commit is contained in:
Mason Daugherty
2025-11-23 01:06:46 -05:00
committed by GitHub
parent f070217c3b
commit cbaea351b2
6 changed files with 26 additions and 71 deletions

View File

@@ -903,23 +903,28 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
5. A string which is shorthand for `("human", template)`; e.g.,
`"{user_input}"`
template_format: Format of the template.
input_variables: A list of the names of the variables whose values are
required as inputs to the prompt.
optional_variables: A list of the names of the variables for placeholder
or MessagePlaceholder that are optional.
**kwargs: Additional keyword arguments passed to `BasePromptTemplate`,
including (but not limited to):
These variables are auto inferred from the prompt and user need not
provide them.
partial_variables: A dictionary of the partial variables the prompt
template carries.
- `input_variables`: A list of the names of the variables whose values
are required as inputs to the prompt.
- `optional_variables`: A list of the names of the variables for
placeholder or `MessagePlaceholder` that are optional.
Partial variables populate the template so that you don't need to pass
them in every time you call the prompt.
validate_template: Whether to validate the template.
input_types: A dictionary of the types of the variables the prompt template
expects.
These variables are auto inferred from the prompt and user need not
provide them.
If not provided, all variables are assumed to be strings.
- `partial_variables`: A dictionary of the partial variables the prompt
template carries.
Partial variables populate the template so that you don't need to
pass them in every time you call the prompt.
- `validate_template`: Whether to validate the template.
- `input_types`: A dictionary of the types of the variables the prompt
template expects.
If not provided, all variables are assumed to be strings.
Examples:
Instantiation from a list of message templates:

4
libs/core/uv.lock generated
View File

@@ -1,5 +1,5 @@
version = 1
revision = 2
revision = 3
requires-python = ">=3.10.0, <4.0.0"
resolution-markers = [
"python_full_version >= '3.14' and platform_python_implementation == 'PyPy'",
@@ -1053,7 +1053,7 @@ typing = [
[[package]]
name = "langchain-tests"
version = "1.0.1"
version = "1.0.2"
source = { directory = "../standard-tests" }
dependencies = [
{ name = "httpx" },

View File

@@ -26,7 +26,6 @@ class ProgressBarCallback(base_callbacks.BaseCallbackHandler):
total: The total number of items to be processed.
ncols: The character width of the progress bar.
end_with: Last string to print after progress bar reaches end.
**kwargs: Additional keyword arguments.
"""
self.total = total
self.ncols = ncols

View File

@@ -295,11 +295,7 @@ def _get_prompt(inputs: dict[str, Any]) -> str:
class ChatModelInput(TypedDict):
"""Input for a chat model.
Args:
messages: List of chat messages.
"""
"""Input for a chat model."""
messages: list[BaseMessage]

View File

@@ -108,8 +108,8 @@ class LLMStringRunMapper(StringRunMapper):
The serialized output text from the first generation.
Raises:
ValueError: If no generations are found in the outputs,
or if the generations are empty.
ValueError: If no generations are found in the outputs or if the generations
are empty.
"""
if not outputs.get("generations"):
msg = "Cannot evaluate LLM Run without generations."
@@ -436,8 +436,8 @@ class StringRunEvaluatorChain(Chain, RunEvaluator):
The instantiated evaluation chain.
Raises:
If the run type is not supported, or if the evaluator requires a
reference from the dataset but the reference key is not provided.
ValueError: If the run type is not supported, or if the evaluator requires a
reference from the dataset but the reference key is not provided.
"""
# Configure how run inputs/predictions are passed to the evaluator

View File

@@ -905,51 +905,6 @@ class AzureChatOpenAI(BaseChatOpenAI):
!!! note
`strict` can only be non-null if `method` is `'json_schema'`
or `'function_calling'`.
tools:
A list of tool-like objects to bind to the chat model. Requires that:
- `method` is `'json_schema'` (default).
- `strict=True`
- `include_raw=True`
If a model elects to call a
tool, the resulting `AIMessage` in `'raw'` will include tool calls.
??? example
```python
from langchain.chat_models import init_chat_model
from pydantic import BaseModel
class ResponseSchema(BaseModel):
response: str
def get_weather(location: str) -> str:
\"\"\"Get weather at a location.\"\"\"
pass
model = init_chat_model("openai:gpt-4o-mini")
structured_model = model.with_structured_output(
ResponseSchema,
tools=[get_weather],
strict=True,
include_raw=True,
)
structured_model.invoke("What's the weather in Boston?")
```
```python
{
"raw": AIMessage(content="", tool_calls=[...], ...),
"parsing_error": None,
"parsed": None,
}
```
kwargs: Additional keyword args are passed through to the model.
Returns: