mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-22 02:45:49 +00:00
Merge branch 'master' into pprados/07-zeroxpdf
This commit is contained in:
commit
2a5c399730
22
.github/workflows/_release.yml
vendored
22
.github/workflows/_release.yml
vendored
@ -336,7 +336,6 @@ jobs:
|
||||
- release-notes
|
||||
- test-pypi-publish
|
||||
- pre-release-checks
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/core') }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
@ -355,17 +354,29 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# We implement this conditional as Github Actions does not have good support
|
||||
# for conditionally needing steps. https://github.com/actions/runner/issues/491
|
||||
- name: Check if libs/core
|
||||
run: |
|
||||
if [ "${{ startsWith(inputs.working-directory, 'libs/core') }}" != "true" ]; then
|
||||
echo "Not in libs/core. Exiting successfully."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
- name: Set up Python + uv
|
||||
if: startsWith(inputs.working-directory, 'libs/core')
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
if: startsWith(inputs.working-directory, 'libs/core')
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
- name: Test against ${{ matrix.partner }}
|
||||
if: startsWith(inputs.working-directory, 'libs/core')
|
||||
run: |
|
||||
# Identify latest tag
|
||||
LATEST_PACKAGE_TAG="$(
|
||||
@ -401,15 +412,6 @@ jobs:
|
||||
- test-pypi-publish
|
||||
- pre-release-checks
|
||||
- test-prior-published-packages-against-new-core
|
||||
if: >
|
||||
always() &&
|
||||
needs.build.result == 'success' &&
|
||||
needs.release-notes.result == 'success' &&
|
||||
needs.test-pypi-publish.result == 'success' &&
|
||||
needs.pre-release-checks.result == 'success' && (
|
||||
(startsWith(inputs.working-directory, 'libs/core') && needs.test-prior-published-packages-against-new-core.result == 'success')
|
||||
|| (!startsWith(inputs.working-directory, 'libs/core'))
|
||||
)
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# This permission is used for trusted publishing:
|
||||
|
@ -1,6 +1,6 @@
|
||||
"""Tool for the Tavily search API."""
|
||||
|
||||
from typing import Dict, List, Literal, Optional, Tuple, Type, Union
|
||||
from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union
|
||||
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManagerForToolRun,
|
||||
@ -149,6 +149,15 @@ class TavilySearchResults(BaseTool): # type: ignore[override, override]
|
||||
api_wrapper: TavilySearchAPIWrapper = Field(default_factory=TavilySearchAPIWrapper) # type: ignore[arg-type]
|
||||
response_format: Literal["content_and_artifact"] = "content_and_artifact"
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
# Create api_wrapper with tavily_api_key if provided
|
||||
if "tavily_api_key" in kwargs:
|
||||
kwargs["api_wrapper"] = TavilySearchAPIWrapper(
|
||||
tavily_api_key=kwargs["tavily_api_key"]
|
||||
)
|
||||
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
|
@ -98,8 +98,7 @@ class Reviver:
|
||||
else:
|
||||
if self.secrets_from_env and key in os.environ and os.environ[key]:
|
||||
return os.environ[key]
|
||||
msg = f'Missing key "{key}" in load(secrets_map)'
|
||||
raise KeyError(msg)
|
||||
return None
|
||||
|
||||
if (
|
||||
value.get("lc") == 1
|
||||
|
@ -17,7 +17,7 @@ dependencies = [
|
||||
"pydantic<3.0.0,>=2.7.4; python_full_version >= \"3.12.4\"",
|
||||
]
|
||||
name = "langchain-core"
|
||||
version = "0.3.45-rc.1"
|
||||
version = "0.3.45"
|
||||
description = "Building applications with LLMs through composability"
|
||||
readme = "README.md"
|
||||
|
||||
|
@ -935,7 +935,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.44"
|
||||
version = "0.3.45"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
|
@ -167,3 +167,13 @@ def test_load_llmchain_with_non_serializable_arg() -> None:
|
||||
chain_obj = dumpd(chain)
|
||||
with pytest.raises(NotImplementedError):
|
||||
load(chain_obj, secrets_map={"OPENAI_API_KEY": "hello"})
|
||||
|
||||
|
||||
@pytest.mark.requires("openai", "langchain_openai")
|
||||
def test_loads_with_missing_secrets() -> None:
|
||||
import openai
|
||||
|
||||
llm_string = '{"lc": 1, "type": "constructor", "id": ["langchain", "llms", "openai", "OpenAI"], "kwargs": {"model_name": "davinci", "temperature": 0.5, "max_tokens": 256, "top_p": 0.8, "n": 1, "best_of": 1, "openai_api_key": {"lc": 1, "type": "secret", "id": ["OPENAI_API_KEY"]}, "batch_size": 20, "max_retries": 2, "disallowed_special": "all"}, "name": "OpenAI"}' # noqa: E501
|
||||
# Should throw on instantiation, not deserialization
|
||||
with pytest.raises(openai.OpenAIError):
|
||||
loads(llm_string)
|
||||
|
@ -7,14 +7,14 @@ authors = []
|
||||
license = { text = "MIT" }
|
||||
requires-python = "<4.0,>=3.9"
|
||||
dependencies = [
|
||||
"langchain-core<1.0.0,>=0.3.37",
|
||||
"langchain-core<1.0.0,>=0.3.45",
|
||||
"tokenizers<1,>=0.15.1",
|
||||
"httpx<1,>=0.25.2",
|
||||
"httpx-sse<1,>=0.3.1",
|
||||
"pydantic<3,>=2",
|
||||
]
|
||||
name = "langchain-mistralai"
|
||||
version = "0.2.7"
|
||||
version = "0.2.8"
|
||||
description = "An integration package connecting Mistral and LangChain"
|
||||
readme = "README.md"
|
||||
|
||||
|
@ -332,7 +332,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.37"
|
||||
version = "0.3.45rc1"
|
||||
source = { editable = "../../core" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@ -390,7 +390,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-mistralai"
|
||||
version = "0.2.7"
|
||||
version = "0.2.8"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "httpx" },
|
||||
@ -450,7 +450,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-tests"
|
||||
version = "0.3.12"
|
||||
version = "0.3.14"
|
||||
source = { editable = "../../standard-tests" }
|
||||
dependencies = [
|
||||
{ name = "httpx" },
|
||||
@ -467,8 +467,7 @@ dependencies = [
|
||||
requires-dist = [
|
||||
{ name = "httpx", specifier = ">=0.25.0,<1" },
|
||||
{ name = "langchain-core", editable = "../../core" },
|
||||
{ name = "numpy", marker = "python_full_version < '3.12'", specifier = ">=1.24.0,<2.0.0" },
|
||||
{ name = "numpy", marker = "python_full_version >= '3.12'", specifier = ">=1.26.2,<3" },
|
||||
{ name = "numpy", specifier = ">=1.26.2,<3" },
|
||||
{ name = "pytest", specifier = ">=7,<9" },
|
||||
{ name = "pytest-asyncio", specifier = ">=0.20,<1" },
|
||||
{ name = "pytest-socket", specifier = ">=0.6.0,<1" },
|
||||
|
@ -2139,6 +2139,71 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
assert isinstance(result.content, str)
|
||||
assert len(result.content) > 0
|
||||
|
||||
def test_agent_loop(self, model: BaseChatModel) -> None:
|
||||
"""Test that the model supports a simple ReAct agent loop. This test is skipped
|
||||
if the ``has_tool_calling`` property on the test class is set to False.
|
||||
|
||||
This test is optional and should be skipped if the model does not support
|
||||
tool calling (see Configuration below).
|
||||
|
||||
.. dropdown:: Configuration
|
||||
|
||||
To disable tool calling tests, set ``has_tool_calling`` to False in your
|
||||
test class:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class TestMyChatModelIntegration(ChatModelIntegrationTests):
|
||||
@property
|
||||
def has_tool_calling(self) -> bool:
|
||||
return False
|
||||
|
||||
.. dropdown:: Troubleshooting
|
||||
|
||||
If this test fails, check that ``bind_tools`` is implemented to correctly
|
||||
translate LangChain tool objects into the appropriate schema for your
|
||||
chat model.
|
||||
|
||||
Check also that all required information (e.g., tool calling identifiers)
|
||||
from AIMessage objects is propagated correctly to model payloads.
|
||||
|
||||
This test may fail if the chat model does not consistently generate tool
|
||||
calls in response to an appropriate query. In these cases you can ``xfail``
|
||||
the test:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.xfail(reason=("Does not support tool_choice."))
|
||||
def test_agent_loop(self, model: BaseChatModel) -> None:
|
||||
super().test_agent_loop(model)
|
||||
|
||||
"""
|
||||
if not self.has_tool_calling:
|
||||
pytest.skip("Test requires tool calling.")
|
||||
|
||||
@tool
|
||||
def get_weather(location: str) -> str:
|
||||
"""Call to surf the web."""
|
||||
return "It's sunny."
|
||||
|
||||
llm_with_tools = model.bind_tools([get_weather])
|
||||
input_message = HumanMessage("What is the weather in San Francisco, CA?")
|
||||
tool_call_message = llm_with_tools.invoke([input_message])
|
||||
assert isinstance(tool_call_message, AIMessage)
|
||||
tool_calls = tool_call_message.tool_calls
|
||||
assert len(tool_calls) == 1
|
||||
tool_call = tool_calls[0]
|
||||
tool_message = get_weather.invoke(tool_call)
|
||||
assert isinstance(tool_message, ToolMessage)
|
||||
response = llm_with_tools.invoke(
|
||||
[
|
||||
input_message,
|
||||
tool_call_message,
|
||||
tool_message,
|
||||
]
|
||||
)
|
||||
assert isinstance(response, AIMessage)
|
||||
|
||||
def invoke_with_audio_input(self, *, stream: bool = False) -> AIMessage:
|
||||
""":private:"""
|
||||
raise NotImplementedError()
|
||||
|
Loading…
Reference in New Issue
Block a user