mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-20 13:54:48 +00:00
google-vertexai[patch]: serializable citation metadata, release 0.0.4 (#17145)
was breaking in langserve before
This commit is contained in:
parent
19ff81e74f
commit
52be84a603
@ -5,6 +5,7 @@ from importlib import metadata
|
|||||||
from typing import Any, Callable, Dict, Optional, Union
|
from typing import Any, Callable, Dict, Optional, Union
|
||||||
|
|
||||||
import google.api_core
|
import google.api_core
|
||||||
|
import proto # type: ignore[import-untyped]
|
||||||
from google.api_core.gapic_v1.client_info import ClientInfo
|
from google.api_core.gapic_v1.client_info import ClientInfo
|
||||||
from google.cloud import storage
|
from google.cloud import storage
|
||||||
from langchain_core.callbacks import (
|
from langchain_core.callbacks import (
|
||||||
@ -114,7 +115,11 @@ def get_generation_info(
|
|||||||
}
|
}
|
||||||
for rating in candidate.safety_ratings
|
for rating in candidate.safety_ratings
|
||||||
],
|
],
|
||||||
"citation_metadata": candidate.citation_metadata,
|
"citation_metadata": (
|
||||||
|
proto.Message.to_dict(candidate.citation_metadata)
|
||||||
|
if candidate.citation_metadata
|
||||||
|
else None
|
||||||
|
),
|
||||||
}
|
}
|
||||||
# https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/text-chat#response_body
|
# https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/text-chat#response_body
|
||||||
else:
|
else:
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "langchain-google-vertexai"
|
name = "langchain-google-vertexai"
|
||||||
version = "0.0.3"
|
version = "0.0.4"
|
||||||
description = "An integration package connecting GoogleVertexAI and LangChain"
|
description = "An integration package connecting GoogleVertexAI and LangChain"
|
||||||
authors = []
|
authors = []
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from typing import List, Union
|
from typing import Any, List, Union
|
||||||
|
|
||||||
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
|
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
|
||||||
from langchain_core.messages import AIMessageChunk
|
from langchain_core.messages import AIMessageChunk
|
||||||
@ -44,11 +44,11 @@ class _TestOutputParser(BaseOutputParser):
|
|||||||
|
|
||||||
|
|
||||||
def test_tools() -> None:
|
def test_tools() -> None:
|
||||||
from langchain.agents import AgentExecutor # type: ignore[import-not-found]
|
from langchain.agents import AgentExecutor
|
||||||
from langchain.agents.format_scratchpad import ( # type: ignore[import-not-found]
|
from langchain.agents.format_scratchpad import (
|
||||||
format_to_openai_function_messages,
|
format_to_openai_function_messages,
|
||||||
)
|
)
|
||||||
from langchain.chains import LLMMathChain # type: ignore[import-not-found]
|
from langchain.chains import LLMMathChain
|
||||||
|
|
||||||
llm = ChatVertexAI(model_name="gemini-pro")
|
llm = ChatVertexAI(model_name="gemini-pro")
|
||||||
math_chain = LLMMathChain.from_llm(llm=llm)
|
math_chain = LLMMathChain.from_llm(llm=llm)
|
||||||
@ -67,8 +67,8 @@ def test_tools() -> None:
|
|||||||
)
|
)
|
||||||
llm_with_tools = llm.bind(functions=tools)
|
llm_with_tools = llm.bind(functions=tools)
|
||||||
|
|
||||||
agent = (
|
agent: Any = (
|
||||||
{ # type: ignore[var-annotated]
|
{
|
||||||
"input": lambda x: x["input"],
|
"input": lambda x: x["input"],
|
||||||
"agent_scratchpad": lambda x: format_to_openai_function_messages(
|
"agent_scratchpad": lambda x: format_to_openai_function_messages(
|
||||||
x["intermediate_steps"]
|
x["intermediate_steps"]
|
||||||
@ -115,7 +115,7 @@ def test_multiple_tools() -> None:
|
|||||||
from langchain.agents import AgentExecutor
|
from langchain.agents import AgentExecutor
|
||||||
from langchain.agents.format_scratchpad import format_to_openai_function_messages
|
from langchain.agents.format_scratchpad import format_to_openai_function_messages
|
||||||
from langchain.chains import LLMMathChain
|
from langchain.chains import LLMMathChain
|
||||||
from langchain.utilities import ( # type: ignore[import-not-found]
|
from langchain.utilities import (
|
||||||
GoogleSearchAPIWrapper,
|
GoogleSearchAPIWrapper,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -149,8 +149,8 @@ def test_multiple_tools() -> None:
|
|||||||
)
|
)
|
||||||
llm_with_tools = llm.bind(functions=tools)
|
llm_with_tools = llm.bind(functions=tools)
|
||||||
|
|
||||||
agent = (
|
agent: Any = (
|
||||||
{ # type: ignore[var-annotated]
|
{
|
||||||
"input": lambda x: x["input"],
|
"input": lambda x: x["input"],
|
||||||
"agent_scratchpad": lambda x: format_to_openai_function_messages(
|
"agent_scratchpad": lambda x: format_to_openai_function_messages(
|
||||||
x["intermediate_steps"]
|
x["intermediate_steps"]
|
||||||
|
@ -187,7 +187,7 @@ def test_default_params_gemini() -> None:
|
|||||||
StubGeminiResponse(
|
StubGeminiResponse(
|
||||||
text="Goodbye",
|
text="Goodbye",
|
||||||
content=Mock(parts=[Mock(function_call=None)]),
|
content=Mock(parts=[Mock(function_call=None)]),
|
||||||
citation_metadata=Mock(),
|
citation_metadata=None,
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
mock_chat = MagicMock()
|
mock_chat = MagicMock()
|
||||||
|
Loading…
Reference in New Issue
Block a user