mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-21 14:43:07 +00:00
more
This commit is contained in:
@@ -274,15 +274,15 @@ class JinaChat(BaseChatModel):
|
||||
before_sleep=before_sleep_log(logger, logging.WARNING),
|
||||
)
|
||||
|
||||
def completion_with_retry(self, **kwargs: Any) -> Any:
|
||||
def _completion_with_retry(self, **kwargs: Any) -> Any:
|
||||
"""Use tenacity to retry the completion call."""
|
||||
retry_decorator = self._create_retry_decorator()
|
||||
|
||||
@retry_decorator
|
||||
def _completion_with_retry(**kwargs: Any) -> Any:
|
||||
def __completion_with_retry(**kwargs: Any) -> Any:
|
||||
return self.client.create(**kwargs)
|
||||
|
||||
return _completion_with_retry(**kwargs)
|
||||
return __completion_with_retry(**kwargs)
|
||||
|
||||
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
||||
overall_token_usage: dict = {}
|
||||
@@ -309,7 +309,7 @@ class JinaChat(BaseChatModel):
|
||||
params = {**params, **kwargs, "stream": True}
|
||||
|
||||
default_chunk_class = AIMessageChunk
|
||||
for chunk in self.completion_with_retry(messages=message_dicts, **params):
|
||||
for chunk in self._completion_with_retry(messages=message_dicts, **params):
|
||||
delta = chunk["choices"][0]["delta"]
|
||||
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
|
||||
default_chunk_class = chunk.__class__
|
||||
@@ -332,7 +332,7 @@ class JinaChat(BaseChatModel):
|
||||
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
params = {**params, **kwargs}
|
||||
response = self.completion_with_retry(messages=message_dicts, **params)
|
||||
response = self._completion_with_retry(messages=message_dicts, **params)
|
||||
return self._create_chat_result(response)
|
||||
|
||||
def _create_message_dicts(
|
||||
|
||||
@@ -165,13 +165,13 @@ class ChatKonko(ChatOpenAI):
|
||||
|
||||
return {model["id"] for model in models_response.json()["data"]}
|
||||
|
||||
def completion_with_retry(
|
||||
def _completion_with_retry(
|
||||
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
|
||||
) -> Any:
|
||||
def _completion_with_retry(**kwargs: Any) -> Any:
|
||||
def __completion_with_retry(**kwargs: Any) -> Any:
|
||||
return self.client.create(**kwargs)
|
||||
|
||||
return _completion_with_retry(**kwargs)
|
||||
return __completion_with_retry(**kwargs)
|
||||
|
||||
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
||||
overall_token_usage: dict = {}
|
||||
@@ -198,7 +198,7 @@ class ChatKonko(ChatOpenAI):
|
||||
params = {**params, **kwargs, "stream": True}
|
||||
|
||||
default_chunk_class = AIMessageChunk
|
||||
for chunk in self.completion_with_retry(
|
||||
for chunk in self._completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
):
|
||||
if len(chunk["choices"]) == 0:
|
||||
@@ -233,7 +233,7 @@ class ChatKonko(ChatOpenAI):
|
||||
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
params = {**params, **kwargs}
|
||||
response = self.completion_with_retry(
|
||||
response = self._completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
)
|
||||
return self._create_chat_result(response)
|
||||
|
||||
@@ -225,17 +225,17 @@ class ChatLiteLLM(BaseChatModel):
|
||||
}
|
||||
return {**self._default_params, **creds}
|
||||
|
||||
def completion_with_retry(
|
||||
def _completion_with_retry(
|
||||
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Use tenacity to retry the completion call."""
|
||||
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
|
||||
|
||||
@retry_decorator
|
||||
def _completion_with_retry(**kwargs: Any) -> Any:
|
||||
def __completion_with_retry(**kwargs: Any) -> Any:
|
||||
return self.client.completion(**kwargs)
|
||||
|
||||
return _completion_with_retry(**kwargs)
|
||||
return __completion_with_retry(**kwargs)
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
@@ -302,7 +302,7 @@ class ChatLiteLLM(BaseChatModel):
|
||||
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
params = {**params, **kwargs}
|
||||
response = self.completion_with_retry(
|
||||
response = self._completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
)
|
||||
return self._create_chat_result(response)
|
||||
@@ -345,7 +345,7 @@ class ChatLiteLLM(BaseChatModel):
|
||||
params = {**params, **kwargs, "stream": True}
|
||||
|
||||
default_chunk_class = AIMessageChunk
|
||||
for chunk in self.completion_with_retry(
|
||||
for chunk in self._completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
):
|
||||
if len(chunk["choices"]) == 0:
|
||||
|
||||
@@ -286,17 +286,17 @@ class ChatOpenAI(BaseChatModel):
|
||||
**self.model_kwargs,
|
||||
}
|
||||
|
||||
def completion_with_retry(
|
||||
def _completion_with_retry(
|
||||
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Use tenacity to retry the completion call."""
|
||||
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
|
||||
|
||||
@retry_decorator
|
||||
def _completion_with_retry(**kwargs: Any) -> Any:
|
||||
def __completion_with_retry(**kwargs: Any) -> Any:
|
||||
return self.client.create(**kwargs)
|
||||
|
||||
return _completion_with_retry(**kwargs)
|
||||
return __completion_with_retry(**kwargs)
|
||||
|
||||
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
||||
overall_token_usage: dict = {}
|
||||
@@ -323,7 +323,7 @@ class ChatOpenAI(BaseChatModel):
|
||||
params = {**params, **kwargs, "stream": True}
|
||||
|
||||
default_chunk_class = AIMessageChunk
|
||||
for chunk in self.completion_with_retry(
|
||||
for chunk in self._completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
):
|
||||
if len(chunk["choices"]) == 0:
|
||||
@@ -357,7 +357,7 @@ class ChatOpenAI(BaseChatModel):
|
||||
return _generate_from_stream(stream_iter)
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
params = {**params, **kwargs}
|
||||
response = self.completion_with_retry(
|
||||
response = self._completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
)
|
||||
return self._create_chat_result(response)
|
||||
|
||||
@@ -266,14 +266,14 @@ class ChatTongyi(BaseChatModel):
|
||||
**self.model_kwargs,
|
||||
}
|
||||
|
||||
def completion_with_retry(
|
||||
def _completion_with_retry(
|
||||
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Use tenacity to retry the completion call."""
|
||||
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
|
||||
|
||||
@retry_decorator
|
||||
def _completion_with_retry(**_kwargs: Any) -> Any:
|
||||
def __completion_with_retry(**_kwargs: Any) -> Any:
|
||||
resp = self.client.call(**_kwargs)
|
||||
if resp.status_code == 200:
|
||||
return resp
|
||||
@@ -289,19 +289,19 @@ class ChatTongyi(BaseChatModel):
|
||||
response=resp,
|
||||
)
|
||||
|
||||
return _completion_with_retry(**kwargs)
|
||||
return __completion_with_retry(**kwargs)
|
||||
|
||||
def stream_completion_with_retry(
|
||||
def _stream_completion_with_retry(
|
||||
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Use tenacity to retry the completion call."""
|
||||
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
|
||||
|
||||
@retry_decorator
|
||||
def _stream_completion_with_retry(**_kwargs: Any) -> Any:
|
||||
def __stream_completion_with_retry(**_kwargs: Any) -> Any:
|
||||
return self.client.call(**_kwargs)
|
||||
|
||||
return _stream_completion_with_retry(**kwargs)
|
||||
return __stream_completion_with_retry(**kwargs)
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
@@ -320,7 +320,7 @@ class ChatTongyi(BaseChatModel):
|
||||
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
params = {**params, **kwargs}
|
||||
response = self.completion_with_retry(
|
||||
response = self._completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
)
|
||||
return self._create_chat_result(response)
|
||||
@@ -337,7 +337,7 @@ class ChatTongyi(BaseChatModel):
|
||||
# Mark current chunk total length
|
||||
length = 0
|
||||
default_chunk_class = AIMessageChunk
|
||||
for chunk in self.stream_completion_with_retry(
|
||||
for chunk in self._stream_completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
):
|
||||
if len(chunk["output"]["choices"]) == 0:
|
||||
|
||||
@@ -17,8 +17,8 @@ from langchain.callbacks.manager import (
|
||||
)
|
||||
from langchain.llms.openai import (
|
||||
BaseOpenAI,
|
||||
acompletion_with_retry,
|
||||
completion_with_retry,
|
||||
_acompletion_with_retry,
|
||||
_completion_with_retry,
|
||||
)
|
||||
from langchain.pydantic_v1 import Field, root_validator
|
||||
from langchain.schema import Generation, LLMResult
|
||||
@@ -162,7 +162,7 @@ class Anyscale(BaseOpenAI):
|
||||
) -> Iterator[GenerationChunk]:
|
||||
messages, params = self._get_chat_messages([prompt], stop)
|
||||
params = {**params, **kwargs, "stream": True}
|
||||
for stream_resp in completion_with_retry(
|
||||
for stream_resp in _completion_with_retry(
|
||||
self, messages=messages, run_manager=run_manager, **params
|
||||
):
|
||||
token = stream_resp["choices"][0]["delta"].get("content", "")
|
||||
@@ -180,7 +180,7 @@ class Anyscale(BaseOpenAI):
|
||||
) -> AsyncIterator[GenerationChunk]:
|
||||
messages, params = self._get_chat_messages([prompt], stop)
|
||||
params = {**params, **kwargs, "stream": True}
|
||||
async for stream_resp in await acompletion_with_retry(
|
||||
async for stream_resp in await _acompletion_with_retry(
|
||||
self, messages=messages, run_manager=run_manager, **params
|
||||
):
|
||||
token = stream_resp["choices"][0]["delta"].get("content", "")
|
||||
@@ -223,7 +223,7 @@ class Anyscale(BaseOpenAI):
|
||||
else:
|
||||
messages, params = self._get_chat_messages([prompt], stop)
|
||||
params = {**params, **kwargs}
|
||||
response = completion_with_retry(
|
||||
response = _completion_with_retry(
|
||||
self, messages=messages, run_manager=run_manager, **params
|
||||
)
|
||||
choices.extend(response["choices"])
|
||||
@@ -264,7 +264,7 @@ class Anyscale(BaseOpenAI):
|
||||
else:
|
||||
messages, params = self._get_chat_messages([prompt], stop)
|
||||
params = {**params, **kwargs}
|
||||
response = await acompletion_with_retry(
|
||||
response = await _acompletion_with_retry(
|
||||
self, messages=messages, run_manager=run_manager, **params
|
||||
)
|
||||
choices.extend(response["choices"])
|
||||
|
||||
@@ -100,7 +100,7 @@ def _create_retry_decorator(
|
||||
)
|
||||
|
||||
|
||||
def completion_with_retry(
|
||||
def _completion_with_retry(
|
||||
llm: Union[BaseOpenAI, OpenAIChat],
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
@@ -109,13 +109,13 @@ def completion_with_retry(
|
||||
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
|
||||
|
||||
@retry_decorator
|
||||
def _completion_with_retry(**kwargs: Any) -> Any:
|
||||
def __completion_with_retry(**kwargs: Any) -> Any:
|
||||
return llm.client.create(**kwargs)
|
||||
|
||||
return _completion_with_retry(**kwargs)
|
||||
return __completion_with_retry(**kwargs)
|
||||
|
||||
|
||||
async def acompletion_with_retry(
|
||||
async def _acompletion_with_retry(
|
||||
llm: Union[BaseOpenAI, OpenAIChat],
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
@@ -305,7 +305,7 @@ class BaseOpenAI(BaseLLM):
|
||||
) -> Iterator[GenerationChunk]:
|
||||
params = {**self._invocation_params, **kwargs, "stream": True}
|
||||
self.get_sub_prompts(params, [prompt], stop) # this mutates params
|
||||
for stream_resp in completion_with_retry(
|
||||
for stream_resp in _completion_with_retry(
|
||||
self, prompt=prompt, run_manager=run_manager, **params
|
||||
):
|
||||
chunk = _stream_response_to_generation_chunk(stream_resp)
|
||||
@@ -329,7 +329,7 @@ class BaseOpenAI(BaseLLM):
|
||||
) -> AsyncIterator[GenerationChunk]:
|
||||
params = {**self._invocation_params, **kwargs, "stream": True}
|
||||
self.get_sub_prompts(params, [prompt], stop) # this mutate params
|
||||
async for stream_resp in await acompletion_with_retry(
|
||||
async for stream_resp in await _acompletion_with_retry(
|
||||
self, prompt=prompt, run_manager=run_manager, **params
|
||||
):
|
||||
chunk = _stream_response_to_generation_chunk(stream_resp)
|
||||
@@ -398,7 +398,7 @@ class BaseOpenAI(BaseLLM):
|
||||
}
|
||||
)
|
||||
else:
|
||||
response = completion_with_retry(
|
||||
response = _completion_with_retry(
|
||||
self, prompt=_prompts, run_manager=run_manager, **params
|
||||
)
|
||||
choices.extend(response["choices"])
|
||||
@@ -447,7 +447,7 @@ class BaseOpenAI(BaseLLM):
|
||||
}
|
||||
)
|
||||
else:
|
||||
response = await acompletion_with_retry(
|
||||
response = await _acompletion_with_retry(
|
||||
self, prompt=_prompts, run_manager=run_manager, **params
|
||||
)
|
||||
choices.extend(response["choices"])
|
||||
@@ -847,7 +847,7 @@ class OpenAIChat(BaseLLM):
|
||||
) -> Iterator[GenerationChunk]:
|
||||
messages, params = self._get_chat_params([prompt], stop)
|
||||
params = {**params, **kwargs, "stream": True}
|
||||
for stream_resp in completion_with_retry(
|
||||
for stream_resp in _completion_with_retry(
|
||||
self, messages=messages, run_manager=run_manager, **params
|
||||
):
|
||||
token = stream_resp["choices"][0]["delta"].get("content", "")
|
||||
@@ -865,7 +865,7 @@ class OpenAIChat(BaseLLM):
|
||||
) -> AsyncIterator[GenerationChunk]:
|
||||
messages, params = self._get_chat_params([prompt], stop)
|
||||
params = {**params, **kwargs, "stream": True}
|
||||
async for stream_resp in await acompletion_with_retry(
|
||||
async for stream_resp in await _acompletion_with_retry(
|
||||
self, messages=messages, run_manager=run_manager, **params
|
||||
):
|
||||
token = stream_resp["choices"][0]["delta"].get("content", "")
|
||||
@@ -893,7 +893,7 @@ class OpenAIChat(BaseLLM):
|
||||
|
||||
messages, params = self._get_chat_params(prompts, stop)
|
||||
params = {**params, **kwargs}
|
||||
full_response = completion_with_retry(
|
||||
full_response = _completion_with_retry(
|
||||
self, messages=messages, run_manager=run_manager, **params
|
||||
)
|
||||
llm_output = {
|
||||
@@ -926,7 +926,7 @@ class OpenAIChat(BaseLLM):
|
||||
|
||||
messages, params = self._get_chat_params(prompts, stop)
|
||||
params = {**params, **kwargs}
|
||||
full_response = await acompletion_with_retry(
|
||||
full_response = await _acompletion_with_retry(
|
||||
self, messages=messages, run_manager=run_manager, **params
|
||||
)
|
||||
llm_output = {
|
||||
|
||||
@@ -167,7 +167,7 @@ class Nebula(LLM):
|
||||
else:
|
||||
raise ValueError("Prompt must contain instruction and conversation.")
|
||||
|
||||
response = completion_with_retry(
|
||||
response = _completion_with_retry(
|
||||
self,
|
||||
instruction=instruction,
|
||||
conversation=conversation,
|
||||
@@ -232,12 +232,12 @@ def _create_retry_decorator(llm: Nebula) -> Callable[[Any], Any]:
|
||||
)
|
||||
|
||||
|
||||
def completion_with_retry(llm: Nebula, **kwargs: Any) -> Any:
|
||||
def _completion_with_retry(llm: Nebula, **kwargs: Any) -> Any:
|
||||
"""Use tenacity to retry the completion call."""
|
||||
retry_decorator = _create_retry_decorator(llm)
|
||||
|
||||
@retry_decorator
|
||||
def _completion_with_retry(**_kwargs: Any) -> Any:
|
||||
def __completion_with_retry(**_kwargs: Any) -> Any:
|
||||
return make_request(llm, **_kwargs)
|
||||
|
||||
return _completion_with_retry(**kwargs)
|
||||
return __completion_with_retry(**kwargs)
|
||||
|
||||
@@ -86,7 +86,7 @@ def _create_retry_decorator(
|
||||
return decorator
|
||||
|
||||
|
||||
def completion_with_retry(
|
||||
def _completion_with_retry(
|
||||
llm: VertexAI,
|
||||
*args: Any,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
@@ -96,13 +96,13 @@ def completion_with_retry(
|
||||
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
|
||||
|
||||
@retry_decorator
|
||||
def _completion_with_retry(*args: Any, **kwargs: Any) -> Any:
|
||||
def __completion_with_retry(*args: Any, **kwargs: Any) -> Any:
|
||||
return llm.client.predict(*args, **kwargs)
|
||||
|
||||
return _completion_with_retry(*args, **kwargs)
|
||||
return __completion_with_retry(*args, **kwargs)
|
||||
|
||||
|
||||
def stream_completion_with_retry(
|
||||
def _stream_completion_with_retry(
|
||||
llm: VertexAI,
|
||||
*args: Any,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
@@ -118,7 +118,7 @@ def stream_completion_with_retry(
|
||||
return _completion_with_retry(*args, **kwargs)
|
||||
|
||||
|
||||
async def acompletion_with_retry(
|
||||
async def _acompletion_with_retry(
|
||||
llm: VertexAI,
|
||||
*args: Any,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
@@ -128,10 +128,10 @@ async def acompletion_with_retry(
|
||||
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
|
||||
|
||||
@retry_decorator
|
||||
async def _acompletion_with_retry(*args: Any, **kwargs: Any) -> Any:
|
||||
async def __acompletion_with_retry(*args: Any, **kwargs: Any) -> Any:
|
||||
return await llm.client.predict_async(*args, **kwargs)
|
||||
|
||||
return await _acompletion_with_retry(*args, **kwargs)
|
||||
return await __acompletion_with_retry(*args, **kwargs)
|
||||
|
||||
|
||||
class _VertexAIBase(BaseModel):
|
||||
@@ -295,7 +295,7 @@ class VertexAI(_VertexAICommon, BaseLLM):
|
||||
generation += chunk
|
||||
generations.append([generation])
|
||||
else:
|
||||
res = completion_with_retry(
|
||||
res = _completion_with_retry(
|
||||
self, prompt, run_manager=run_manager, **params
|
||||
)
|
||||
generations.append([_response_to_generation(r) for r in res.candidates])
|
||||
@@ -311,7 +311,7 @@ class VertexAI(_VertexAICommon, BaseLLM):
|
||||
params = self._prepare_params(stop=stop, **kwargs)
|
||||
generations = []
|
||||
for prompt in prompts:
|
||||
res = await acompletion_with_retry(
|
||||
res = await _acompletion_with_retry(
|
||||
self, prompt, run_manager=run_manager, **params
|
||||
)
|
||||
generations.append([_response_to_generation(r) for r in res.candidates])
|
||||
@@ -325,7 +325,7 @@ class VertexAI(_VertexAICommon, BaseLLM):
|
||||
**kwargs: Any,
|
||||
) -> Iterator[GenerationChunk]:
|
||||
params = self._prepare_params(stop=stop, stream=True, **kwargs)
|
||||
for stream_resp in stream_completion_with_retry(
|
||||
for stream_resp in _stream_completion_with_retry(
|
||||
self, prompt, run_manager=run_manager, **params
|
||||
):
|
||||
chunk = _response_to_generation(stream_resp)
|
||||
|
||||
Reference in New Issue
Block a user