From 5fb8fd863a69f4e8f5d1ecb7e266fd9f877efdf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B9=9B=E9=9C=B2=E5=85=88=E7=94=9F?= Date: Mon, 28 Apr 2025 03:07:41 +0800 Subject: [PATCH] langchain_openai: clean duplicate code for openai embedding. (#30872) The `_chunk_size` has not changed by method `self._tokenize`, So i think these is duplicate code. Signed-off-by: zhanluxianshen --- libs/partners/openai/langchain_openai/embeddings/base.py | 1 - 1 file changed, 1 deletion(-) diff --git a/libs/partners/openai/langchain_openai/embeddings/base.py b/libs/partners/openai/langchain_openai/embeddings/base.py index 826f3563adf..67ce54c0445 100644 --- a/libs/partners/openai/langchain_openai/embeddings/base.py +++ b/libs/partners/openai/langchain_openai/embeddings/base.py @@ -517,7 +517,6 @@ class OpenAIEmbeddings(BaseModel, Embeddings): _chunk_size = chunk_size or self.chunk_size _iter, tokens, indices = self._tokenize(texts, _chunk_size) batched_embeddings: list[list[float]] = [] - _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = await self.async_client.create( input=tokens[i : i + _chunk_size], **self._invocation_params