mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-22 06:39:52 +00:00
ollama[patch]: support standard image format (#30864)
Following https://github.com/langchain-ai/langchain/pull/30746
This commit is contained in:
parent
47ded80b64
commit
085baef926
@ -30,6 +30,7 @@ from langchain_core.messages import (
|
|||||||
SystemMessage,
|
SystemMessage,
|
||||||
ToolCall,
|
ToolCall,
|
||||||
ToolMessage,
|
ToolMessage,
|
||||||
|
is_data_content_block,
|
||||||
)
|
)
|
||||||
from langchain_core.messages.ai import UsageMetadata
|
from langchain_core.messages.ai import UsageMetadata
|
||||||
from langchain_core.messages.tool import tool_call
|
from langchain_core.messages.tool import tool_call
|
||||||
@ -173,6 +174,20 @@ def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _get_image_from_data_content_block(block: dict) -> str:
|
||||||
|
"""Format standard data content block to format expected by Ollama."""
|
||||||
|
if block["type"] == "image":
|
||||||
|
if block["source_type"] == "base64":
|
||||||
|
return block["data"]
|
||||||
|
else:
|
||||||
|
error_message = "Image data only supported through in-line base64 format."
|
||||||
|
raise ValueError(error_message)
|
||||||
|
|
||||||
|
else:
|
||||||
|
error_message = f"Blocks of type {block['type']} not supported."
|
||||||
|
raise ValueError(error_message)
|
||||||
|
|
||||||
|
|
||||||
def _is_pydantic_class(obj: Any) -> bool:
|
def _is_pydantic_class(obj: Any) -> bool:
|
||||||
return isinstance(obj, type) and is_basemodel_subclass(obj)
|
return isinstance(obj, type) and is_basemodel_subclass(obj)
|
||||||
|
|
||||||
@ -553,7 +568,9 @@ class ChatOllama(BaseChatModel):
|
|||||||
images.append(image_url_components[1])
|
images.append(image_url_components[1])
|
||||||
else:
|
else:
|
||||||
images.append(image_url_components[0])
|
images.append(image_url_components[0])
|
||||||
|
elif is_data_content_block(content_part):
|
||||||
|
image = _get_image_from_data_content_block(content_part)
|
||||||
|
images.append(image)
|
||||||
else:
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Unsupported message content type. "
|
"Unsupported message content type. "
|
||||||
|
@ -14,10 +14,6 @@ class TestChatOllama(ChatModelIntegrationTests):
|
|||||||
def chat_model_params(self) -> dict:
|
def chat_model_params(self) -> dict:
|
||||||
return {"model": "llama3.1"}
|
return {"model": "llama3.1"}
|
||||||
|
|
||||||
@property
|
|
||||||
def supports_image_inputs(self) -> bool:
|
|
||||||
return True
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def supports_json_mode(self) -> bool:
|
def supports_json_mode(self) -> bool:
|
||||||
return True
|
return True
|
||||||
@ -25,3 +21,22 @@ class TestChatOllama(ChatModelIntegrationTests):
|
|||||||
@property
|
@property
|
||||||
def has_tool_choice(self) -> bool:
|
def has_tool_choice(self) -> bool:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def test_image_model() -> None:
|
||||||
|
class ImageModelTests(ChatModelIntegrationTests):
|
||||||
|
@property
|
||||||
|
def chat_model_class(self) -> type[ChatOllama]:
|
||||||
|
return ChatOllama
|
||||||
|
|
||||||
|
@property
|
||||||
|
def chat_model_params(self) -> dict:
|
||||||
|
return {"model": "gemma3:4b"}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def supports_image_inputs(self) -> bool:
|
||||||
|
return True
|
||||||
|
|
||||||
|
test_instance = ImageModelTests()
|
||||||
|
model = test_instance.chat_model_class(**test_instance.chat_model_params)
|
||||||
|
ImageModelTests().test_image_inputs(model)
|
||||||
|
Loading…
Reference in New Issue
Block a user