fix: bump lockfiles (#31923)

* bump lockfiles after upgrading ruff
* resolve resulting linting fixes
This commit is contained in:
Mason Daugherty
2025-07-08 13:27:55 -04:00
committed by GitHub
parent e7f1ceee67
commit 4d9eefecab
20 changed files with 5643 additions and 5631 deletions

View File

@@ -12,8 +12,8 @@ from langchain_huggingface.llms import (
__all__ = [
"ChatHuggingFace",
"HuggingFaceEndpointEmbeddings",
"HuggingFaceEmbeddings",
"HuggingFaceEndpoint",
"HuggingFaceEndpointEmbeddings",
"HuggingFacePipeline",
]

View File

@@ -5,4 +5,4 @@ from langchain_huggingface.chat_models.huggingface import ( # type: ignore[impo
_convert_dict_to_message,
)
__all__ = ["ChatHuggingFace", "_convert_dict_to_message", "TGI_MESSAGE", "TGI_RESPONSE"]
__all__ = ["TGI_MESSAGE", "TGI_RESPONSE", "ChatHuggingFace", "_convert_dict_to_message"]

View File

@@ -552,7 +552,7 @@ class ChatHuggingFace(BaseChatModel):
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
stream: Optional[bool] = None, # noqa: FBT001
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
@@ -593,7 +593,7 @@ class ChatHuggingFace(BaseChatModel):
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
stream: Optional[bool] = None, # noqa: FBT001
**kwargs: Any,
) -> ChatResult:
if _is_huggingface_textgen_inference(self.llm):

View File

@@ -76,7 +76,7 @@ class HuggingFaceEmbeddings(BaseModel, Embeddings):
if self.model_kwargs.get("backend", "torch") == "ipex":
if not is_optimum_intel_available() or not is_ipex_available():
msg = f'Backend: ipex {IMPORT_ERROR.format("optimum[ipex]")}'
msg = f"Backend: ipex {IMPORT_ERROR.format('optimum[ipex]')}"
raise ImportError(msg)
if is_optimum_intel_version("<", _MIN_OPTIMUM_VERSION):

View File

@@ -149,7 +149,7 @@ class HuggingFacePipeline(BaseLLM):
)
raise ValueError(msg)
err_msg = f'Backend: {backend} {IMPORT_ERROR.format(f"optimum[{backend}]")}'
err_msg = f"Backend: {backend} {IMPORT_ERROR.format(f'optimum[{backend}]')}"
if not is_optimum_intel_available():
raise ImportError(err_msg)