diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 9e6d24c9b6e..9311ac4fff8 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -32,6 +32,7 @@ from urllib.parse import urlparse import certifi import openai import tiktoken +from langchain_core._api.beta_decorator import warn_beta from langchain_core._api.deprecation import deprecated from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, @@ -416,6 +417,7 @@ def _handle_openai_bad_request(e: openai.BadRequestError) -> None: _MessageContent = Union[str, list[Union[str, dict]]] +WARNED_IMAGE_GEN_BETA = False def _get_image_bytes_from_content( @@ -1279,10 +1281,20 @@ class BaseChatOpenAI(BaseChatModel): run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: + global WARNED_IMAGE_GEN_BETA + if not WARNED_IMAGE_GEN_BETA: + warn_beta(message="Image generation via ChatOpenAI is in beta.") + WARNED_IMAGE_GEN_BETA = True + prompt = messages[-1].text() - images = [] - for message in messages: - images.extend(_get_image_bytes_from_content(message.content)) + + # Get last set of images + for message in reversed(messages): + images = _get_image_bytes_from_content(message.content) + if images: + break + else: + images = [] if images: result: ImagesResponse = self.root_client.images.edit( model=self.model_name, image=images, prompt=prompt, **kwargs @@ -1291,7 +1303,27 @@ class BaseChatOpenAI(BaseChatModel): result = self.root_client.images.generate( model=self.model_name, prompt=prompt, **kwargs ) - output_message = AIMessage(content="", response_metadata=result.model_dump()) + image_blocks = [] + if result.data: + for image in result.data: + if image.b64_json: + image_blocks.append( + { + "type": "image", + "source_type": "base64", + "data": image.b64_json, + "mime_type": "image/png", + } + ) + if result.usage: + usage_metadata = _create_usage_metadata_responses(result.usage.model_dump()) + else: + usage_metadata = None + output_message = AIMessage( + content=image_blocks or "", # type: ignore[arg-type] + response_metadata={"created": result.created}, + usage_metadata=usage_metadata, + ) return ChatResult(generations=[ChatGeneration(message=output_message)]) def _get_encoding_model(self) -> tuple[str, tiktoken.Encoding]: diff --git a/libs/partners/openai/pyproject.toml b/libs/partners/openai/pyproject.toml index c2468fd3c07..7afb66316c3 100644 --- a/libs/partners/openai/pyproject.toml +++ b/libs/partners/openai/pyproject.toml @@ -8,7 +8,7 @@ license = { text = "MIT" } requires-python = "<4.0,>=3.9" dependencies = [ "langchain-core<1.0.0,>=0.3.53", - "openai<2.0.0,>=1.68.2", + "openai<2.0.0,>=1.76.0", "tiktoken<1,>=0.7", ] name = "langchain-openai" diff --git a/libs/partners/openai/uv.lock b/libs/partners/openai/uv.lock index f0e68ea97eb..a1f03a8f75f 100644 --- a/libs/partners/openai/uv.lock +++ b/libs/partners/openai/uv.lock @@ -463,7 +463,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "0.3.53" +version = "0.3.55" source = { editable = "../../core" } dependencies = [ { name = "jsonpatch" }, @@ -571,7 +571,7 @@ typing = [ [package.metadata] requires-dist = [ { name = "langchain-core", editable = "../../core" }, - { name = "openai", specifier = ">=1.68.2,<2.0.0" }, + { name = "openai", specifier = ">=1.76.0,<2.0.0" }, { name = "tiktoken", specifier = ">=0.7,<1" }, ] @@ -829,7 +829,7 @@ wheels = [ [[package]] name = "openai" -version = "1.68.2" +version = "1.76.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -841,9 +841,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3f/6b/6b002d5d38794645437ae3ddb42083059d556558493408d39a0fcea608bc/openai-1.68.2.tar.gz", hash = "sha256:b720f0a95a1dbe1429c0d9bb62096a0d98057bcda82516f6e8af10284bdd5b19", size = 413429 } +sdist = { url = "https://files.pythonhosted.org/packages/84/51/817969ec969b73d8ddad085670ecd8a45ef1af1811d8c3b8a177ca4d1309/openai-1.76.0.tar.gz", hash = "sha256:fd2bfaf4608f48102d6b74f9e11c5ecaa058b60dad9c36e409c12477dfd91fb2", size = 434660 } wheels = [ - { url = "https://files.pythonhosted.org/packages/fd/34/cebce15f64eb4a3d609a83ac3568d43005cc9a1cba9d7fde5590fd415423/openai-1.68.2-py3-none-any.whl", hash = "sha256:24484cb5c9a33b58576fdc5acf0e5f92603024a4e39d0b99793dfa1eb14c2b36", size = 606073 }, + { url = "https://files.pythonhosted.org/packages/59/aa/84e02ab500ca871eb8f62784426963a1c7c17a72fea3c7f268af4bbaafa5/openai-1.76.0-py3-none-any.whl", hash = "sha256:a712b50e78cf78e6d7b2a8f69c4978243517c2c36999756673e07a14ce37dc0a", size = 661201 }, ] [[package]]