mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-09 23:12:38 +00:00
fix: bump lockfiles (#31923)
* bump lockfiles after upgrading ruff * resolve resulting linting fixes
This commit is contained in:
@@ -12,8 +12,8 @@ from langchain_huggingface.llms import (
|
||||
|
||||
__all__ = [
|
||||
"ChatHuggingFace",
|
||||
"HuggingFaceEndpointEmbeddings",
|
||||
"HuggingFaceEmbeddings",
|
||||
"HuggingFaceEndpoint",
|
||||
"HuggingFaceEndpointEmbeddings",
|
||||
"HuggingFacePipeline",
|
||||
]
|
||||
|
@@ -5,4 +5,4 @@ from langchain_huggingface.chat_models.huggingface import ( # type: ignore[impo
|
||||
_convert_dict_to_message,
|
||||
)
|
||||
|
||||
__all__ = ["ChatHuggingFace", "_convert_dict_to_message", "TGI_MESSAGE", "TGI_RESPONSE"]
|
||||
__all__ = ["TGI_MESSAGE", "TGI_RESPONSE", "ChatHuggingFace", "_convert_dict_to_message"]
|
||||
|
@@ -552,7 +552,7 @@ class ChatHuggingFace(BaseChatModel):
|
||||
messages: list[BaseMessage],
|
||||
stop: Optional[list[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
stream: Optional[bool] = None,
|
||||
stream: Optional[bool] = None, # noqa: FBT001
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
should_stream = stream if stream is not None else self.streaming
|
||||
@@ -593,7 +593,7 @@ class ChatHuggingFace(BaseChatModel):
|
||||
messages: list[BaseMessage],
|
||||
stop: Optional[list[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
stream: Optional[bool] = None,
|
||||
stream: Optional[bool] = None, # noqa: FBT001
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
if _is_huggingface_textgen_inference(self.llm):
|
||||
|
@@ -76,7 +76,7 @@ class HuggingFaceEmbeddings(BaseModel, Embeddings):
|
||||
|
||||
if self.model_kwargs.get("backend", "torch") == "ipex":
|
||||
if not is_optimum_intel_available() or not is_ipex_available():
|
||||
msg = f'Backend: ipex {IMPORT_ERROR.format("optimum[ipex]")}'
|
||||
msg = f"Backend: ipex {IMPORT_ERROR.format('optimum[ipex]')}"
|
||||
raise ImportError(msg)
|
||||
|
||||
if is_optimum_intel_version("<", _MIN_OPTIMUM_VERSION):
|
||||
|
@@ -149,7 +149,7 @@ class HuggingFacePipeline(BaseLLM):
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
err_msg = f'Backend: {backend} {IMPORT_ERROR.format(f"optimum[{backend}]")}'
|
||||
err_msg = f"Backend: {backend} {IMPORT_ERROR.format(f'optimum[{backend}]')}"
|
||||
if not is_optimum_intel_available():
|
||||
raise ImportError(err_msg)
|
||||
|
||||
|
Reference in New Issue
Block a user