Compare commits

...

6 Commits

Author SHA1 Message Date
ccurme
0b51de4cab release(core): 0.3.78 (#33253) 2025-10-03 12:40:15 -04:00
ccurme
5904cbea89 feat(core): add optional include_id param to convert_to_openai_messages function (#33248) 2025-10-03 11:37:16 -04:00
Mason Daugherty
c9590ef79d docs: fix infinite loop in vercel.json redirects (#33240) 2025-10-02 20:24:09 -04:00
Mason Daugherty
c972552c40 docs: work for freeze (#33239) 2025-10-02 20:01:26 -04:00
Mason Daugherty
e16feb93b9 release(ollama): 0.3.10 (#33210) 2025-10-02 11:35:24 -04:00
Mason Daugherty
2bb57d45d2 fix(ollama): exclude None parameters from options dictionary (#33208) (#33209)
fix #33206
2025-10-02 11:32:21 -04:00
13 changed files with 153 additions and 34 deletions

View File

@@ -431,7 +431,7 @@ jobs:
git ls-remote --tags origin "langchain-${{ matrix.partner }}*" \
| awk '{print $2}' \
| sed 's|refs/tags/||' \
| grep -E '[0-9]+\.[0-9]+\.[0-9]+$' \
| grep -E '==0\.3\.[0-9]+$' \
| sort -Vr \
| head -n 1
)"

View File

@@ -34,7 +34,7 @@ Currently, the build process roughly follows these steps:
The docs site is served by Vercel. The Vercel deployment process copies the HTML
files from the `langchain-api-docs-html` repository and deploys them to the live
site. Deployments are triggered on each new commit pushed to `master`.
site. Deployments are triggered on each new commit pushed to `v0.3`.
#### Build Technical Details

View File

@@ -87,7 +87,7 @@ const config = {
({
docs: {
editUrl:
"https://github.com/langchain-ai/langchain/edit/master/docs/",
"https://github.com/langchain-ai/langchain/edit/v0.3/docs/",
sidebarPath: require.resolve("./sidebars.js"),
remarkPlugins: [
[require("@docusaurus/remark-plugin-npm2yarn"), { sync: true }],

View File

@@ -16,14 +16,13 @@ fi
if { \
[ "$VERCEL_ENV" == "production" ] || \
[ "$VERCEL_GIT_COMMIT_REF" == "master" ] || \
[ "$VERCEL_GIT_COMMIT_REF" == "v0.1" ] || \
[ "$VERCEL_GIT_COMMIT_REF" == "v0.2" ] || \
[ "$VERCEL_GIT_COMMIT_REF" == "v0.3rc" ]; \
} && [ "$VERCEL_GIT_REPO_OWNER" == "langchain-ai" ]
then
echo "✅ Production build - proceeding with build"
exit 1
echo "✅ Production build - proceeding with build"
exit 1
fi

View File

@@ -1040,6 +1040,7 @@ def convert_to_openai_messages(
messages: Union[MessageLikeRepresentation, Sequence[MessageLikeRepresentation]],
*,
text_format: Literal["string", "block"] = "string",
include_id: bool = False,
) -> Union[dict, list[dict]]:
"""Convert LangChain messages into OpenAI message dicts.
@@ -1057,6 +1058,8 @@ def convert_to_openai_messages(
If a message has a string content, this is turned into a list
with a single content block of type ``'text'``. If a message has
content blocks these are left as is.
include_id: Whether to include message ids in the openai messages, if they
are present in the source messages.
Raises:
ValueError: if an unrecognized ``text_format`` is specified, or if a message
@@ -1145,6 +1148,8 @@ def convert_to_openai_messages(
oai_msg["refusal"] = message.additional_kwargs["refusal"]
if isinstance(message, ToolMessage):
oai_msg["tool_call_id"] = message.tool_call_id
if include_id and message.id:
oai_msg["id"] = message.id
if not message.content:
content = "" if text_format == "string" else []

View File

@@ -1,3 +1,3 @@
"""langchain-core version information and utilities."""
VERSION = "0.3.77"
VERSION = "0.3.78"

View File

@@ -16,7 +16,7 @@ dependencies = [
"pydantic>=2.7.4,<3.0.0",
]
name = "langchain-core"
version = "0.3.77"
version = "0.3.78"
description = "Building applications with LLMs through composability"
readme = "README.md"

View File

@@ -882,10 +882,21 @@ def test_convert_to_openai_messages_string() -> None:
def test_convert_to_openai_messages_single_message() -> None:
message = HumanMessage(content="Hello")
message: BaseMessage = HumanMessage(content="Hello")
result = convert_to_openai_messages(message)
assert result == {"role": "user", "content": "Hello"}
# Test IDs
result = convert_to_openai_messages(message, include_id=True)
assert result == {"role": "user", "content": "Hello"} # no ID
message = AIMessage(content="Hello", id="resp_123")
result = convert_to_openai_messages(message)
assert result == {"role": "assistant", "content": "Hello"}
result = convert_to_openai_messages(message, include_id=True)
assert result == {"role": "assistant", "content": "Hello", "id": "resp_123"}
def test_convert_to_openai_messages_multiple_messages() -> None:
messages = [

4
libs/core/uv.lock generated
View File

@@ -958,7 +958,7 @@ wheels = [
[[package]]
name = "langchain-core"
version = "0.3.77"
version = "0.3.78"
source = { editable = "." }
dependencies = [
{ name = "jsonpatch" },
@@ -1050,7 +1050,7 @@ typing = [
[[package]]
name = "langchain-tests"
version = "0.3.21"
version = "0.3.22"
source = { directory = "../standard-tests" }
dependencies = [
{ name = "httpx" },

View File

@@ -648,26 +648,30 @@ class ChatOllama(BaseChatModel):
if self.stop is not None:
stop = self.stop
options_dict = kwargs.pop(
"options",
{
"mirostat": self.mirostat,
"mirostat_eta": self.mirostat_eta,
"mirostat_tau": self.mirostat_tau,
"num_ctx": self.num_ctx,
"num_gpu": self.num_gpu,
"num_thread": self.num_thread,
"num_predict": self.num_predict,
"repeat_last_n": self.repeat_last_n,
"repeat_penalty": self.repeat_penalty,
"temperature": self.temperature,
"seed": self.seed,
"stop": self.stop if stop is None else stop,
"tfs_z": self.tfs_z,
"top_k": self.top_k,
"top_p": self.top_p,
},
)
options_dict = kwargs.pop("options", None)
if options_dict is None:
# Only include parameters that are explicitly set (not None)
options_dict = {
k: v
for k, v in {
"mirostat": self.mirostat,
"mirostat_eta": self.mirostat_eta,
"mirostat_tau": self.mirostat_tau,
"num_ctx": self.num_ctx,
"num_gpu": self.num_gpu,
"num_thread": self.num_thread,
"num_predict": self.num_predict,
"repeat_last_n": self.repeat_last_n,
"repeat_penalty": self.repeat_penalty,
"temperature": self.temperature,
"seed": self.seed,
"stop": self.stop if stop is None else stop,
"tfs_z": self.tfs_z,
"top_k": self.top_k,
"top_p": self.top_p,
}.items()
if v is not None
}
params = {
"messages": ollama_messages,

View File

@@ -11,7 +11,7 @@ dependencies = [
"langchain-core>=0.3.76,<2.0.0",
]
name = "langchain-ollama"
version = "0.3.9"
version = "0.3.10"
description = "An integration package connecting Ollama and LangChain"
readme = "README.md"

View File

@@ -24,7 +24,7 @@ MODEL_NAME = "llama3.1"
@contextmanager
def _mock_httpx_client_stream(
*args: Any, **kwargs: Any
*_args: Any, **_kwargs: Any
) -> Generator[Response, Any, Any]:
yield Response(
status_code=200,
@@ -310,3 +310,103 @@ def test_load_response_with_actual_content_is_not_skipped(
assert result.content == "This is actual content"
assert result.response_metadata.get("done_reason") == "load"
assert not caplog.text
def test_none_parameters_excluded_from_options() -> None:
"""Test that None parameters are excluded from the options dict sent to Ollama."""
response = [
{
"model": "test-model",
"created_at": "2025-01-01T00:00:00.000000000Z",
"done": True,
"done_reason": "stop",
"message": {"role": "assistant", "content": "Hello!"},
}
]
with patch("langchain_ollama.chat_models.Client") as mock_client_class:
mock_client = MagicMock()
mock_client_class.return_value = mock_client
mock_client.chat.return_value = response
# Create ChatOllama with only num_ctx set
llm = ChatOllama(model="test-model", num_ctx=4096)
llm.invoke([HumanMessage("Hello")])
# Verify that chat was called
assert mock_client.chat.called
# Get the options dict that was passed to chat
call_kwargs = mock_client.chat.call_args[1]
options = call_kwargs.get("options", {})
# Only num_ctx should be in options, not None parameters
assert "num_ctx" in options
assert options["num_ctx"] == 4096
# These parameters should NOT be in options since they were None
assert "mirostat" not in options
assert "mirostat_eta" not in options
assert "mirostat_tau" not in options
assert "tfs_z" not in options
def test_all_none_parameters_results_in_empty_options() -> None:
"""Test that when all parameters are None, options dict is empty."""
response = [
{
"model": "test-model",
"created_at": "2025-01-01T00:00:00.000000000Z",
"done": True,
"done_reason": "stop",
"message": {"role": "assistant", "content": "Hello!"},
}
]
with patch("langchain_ollama.chat_models.Client") as mock_client_class:
mock_client = MagicMock()
mock_client_class.return_value = mock_client
mock_client.chat.return_value = response
# Create ChatOllama with no parameters set
llm = ChatOllama(model="test-model")
llm.invoke([HumanMessage("Hello")])
# Get the options dict that was passed to chat
call_kwargs = mock_client.chat.call_args[1]
options = call_kwargs.get("options", {})
# Options should be empty when no parameters are set
assert options == {}
def test_explicit_options_dict_preserved() -> None:
"""Test that explicitly provided options dict is preserved and not filtered."""
response = [
{
"model": "test-model",
"created_at": "2025-01-01T00:00:00.000000000Z",
"done": True,
"done_reason": "stop",
"message": {"role": "assistant", "content": "Hello!"},
}
]
with patch("langchain_ollama.chat_models.Client") as mock_client_class:
mock_client = MagicMock()
mock_client_class.return_value = mock_client
mock_client.chat.return_value = response
llm = ChatOllama(model="test-model")
# Pass explicit options dict, including None values
llm.invoke(
[HumanMessage("Hello")],
options={"temperature": 0.5, "custom_param": None},
)
# Get the options dict that was passed to chat
call_kwargs = mock_client.chat.call_args[1]
options = call_kwargs.get("options", {})
# Explicit options should be preserved as-is
assert options == {"temperature": 0.5, "custom_param": None}

View File

@@ -381,7 +381,7 @@ typing = [
[[package]]
name = "langchain-ollama"
version = "0.3.9"
version = "0.3.10"
source = { editable = "." }
dependencies = [
{ name = "langchain-core" },