community[minor]: rename ChatGPTRouter to GPTRouter (#14913)

**Description:**: Rename integration to GPTRouter 
**Tag maintainer:** @Gupta-Anubhav12 @samanyougarg @sirjan-ws-ext  
**Twitter handle:** [@SamanyouGarg](https://twitter.com/SamanyouGarg)
This commit is contained in:
Sirjanpreet Singh Banga 2023-12-19 21:18:52 +05:30 committed by GitHub
parent 992b04e475
commit 425e5e1791
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 18 additions and 18 deletions

View File

@ -16,7 +16,7 @@
"id": "bf733a38-db84-4363-89e2-de6735c37230",
"metadata": {},
"source": [
"# ChatGPTRouter\n",
"# GPTRouter\n",
"\n",
"[GPTRouter](https://github.com/Writesonic/GPTRouter) is an open source LLM API Gateway that offers a universal API for 30+ LLMs, vision, and image models, with smart fallbacks based on uptime and latency, automatic retries, and streaming.\n",
"\n",
@ -71,7 +71,7 @@
"outputs": [],
"source": [
"from langchain.schema import HumanMessage\n",
"from langchain_community.chat_models import ChatGPTRouter\n",
"from langchain_community.chat_models import GPTRouter\n",
"from langchain_community.chat_models.gpt_router import GPTRouterModel"
]
},
@ -94,7 +94,7 @@
},
"outputs": [],
"source": [
"chat = ChatGPTRouter(models_priority_list=[anthropic_claude])"
"chat = GPTRouter(models_priority_list=[anthropic_claude])"
]
},
{
@ -131,7 +131,7 @@
"id": "c361ab1e-8c0c-4206-9e3c-9d1424a12b9c",
"metadata": {},
"source": [
"## `ChatGPTRouter` also supports async and streaming functionality:"
"## `GPTRouter` also supports async and streaming functionality:"
]
},
{
@ -197,7 +197,7 @@
}
],
"source": [
"chat = ChatGPTRouter(\n",
"chat = GPTRouter(\n",
" models_priority_list=[anthropic_claude],\n",
" streaming=True,\n",
" verbose=True,\n",

View File

@ -31,7 +31,7 @@ from langchain_community.chat_models.fake import FakeListChatModel
from langchain_community.chat_models.fireworks import ChatFireworks
from langchain_community.chat_models.gigachat import GigaChat
from langchain_community.chat_models.google_palm import ChatGooglePalm
from langchain_community.chat_models.gpt_router import ChatGPTRouter
from langchain_community.chat_models.gpt_router import GPTRouter
from langchain_community.chat_models.human import HumanInputChatModel
from langchain_community.chat_models.hunyuan import ChatHunyuan
from langchain_community.chat_models.javelin_ai_gateway import ChatJavelinAIGateway
@ -80,5 +80,5 @@ __all__ = [
"ChatHunyuan",
"GigaChat",
"VolcEngineMaasChat",
"ChatGPTRouter",
"GPTRouter",
]

View File

@ -77,7 +77,7 @@ def get_ordered_generation_requests(
def _create_retry_decorator(
llm: ChatGPTRouter,
llm: GPTRouter,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
@ -96,7 +96,7 @@ def _create_retry_decorator(
def completion_with_retry(
llm: ChatGPTRouter,
llm: GPTRouter,
models_priority_list: List[GPTRouterModel],
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
@ -118,7 +118,7 @@ def completion_with_retry(
async def acompletion_with_retry(
llm: ChatGPTRouter,
llm: GPTRouter,
models_priority_list: List[GPTRouterModel],
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
@ -140,7 +140,7 @@ async def acompletion_with_retry(
return await _completion_with_retry(**kwargs)
class ChatGPTRouter(BaseChatModel):
class GPTRouter(BaseChatModel):
"""GPTRouter by Writesonic Inc.
For more information, see https://gpt-router.writesonic.com/docs

View File

@ -8,7 +8,7 @@ from langchain_core.callbacks import (
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_community.chat_models.gpt_router import ChatGPTRouter, GPTRouterModel
from langchain_community.chat_models.gpt_router import GPTRouter, GPTRouterModel
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
@ -17,7 +17,7 @@ def test_gpt_router_call() -> None:
anthropic_claude = GPTRouterModel(
name="claude-instant-1.2", provider_name="anthropic"
)
chat = ChatGPTRouter(models_priority_list=[anthropic_claude])
chat = GPTRouter(models_priority_list=[anthropic_claude])
message = HumanMessage(content="Hello World")
response = chat([message])
assert isinstance(response, AIMessage)
@ -29,7 +29,7 @@ def test_gpt_router_call_incorrect_model() -> None:
anthropic_claude = GPTRouterModel(
name="model_does_not_exist", provider_name="anthropic"
)
chat = ChatGPTRouter(models_priority_list=[anthropic_claude])
chat = GPTRouter(models_priority_list=[anthropic_claude])
message = HumanMessage(content="Hello World")
with pytest.raises(Exception):
chat([message])
@ -40,7 +40,7 @@ def test_gpt_router_generate() -> None:
anthropic_claude = GPTRouterModel(
name="claude-instant-1.2", provider_name="anthropic"
)
chat = ChatGPTRouter(models_priority_list=[anthropic_claude])
chat = GPTRouter(models_priority_list=[anthropic_claude])
chat_messages: List[List[BaseMessage]] = [
[HumanMessage(content="If (5 + x = 18), what is x?")]
]
@ -59,7 +59,7 @@ def test_gpt_router_streaming() -> None:
anthropic_claude = GPTRouterModel(
name="claude-instant-1.2", provider_name="anthropic"
)
chat = ChatGPTRouter(models_priority_list=[anthropic_claude], streaming=True)
chat = GPTRouter(models_priority_list=[anthropic_claude], streaming=True)
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
@ -73,7 +73,7 @@ def test_gpt_router_streaming_callback() -> None:
anthropic_claude = GPTRouterModel(
name="claude-instant-1.2", provider_name="anthropic"
)
chat = ChatGPTRouter(
chat = GPTRouter(
models_priority_list=[anthropic_claude],
streaming=True,
callback_manager=callback_manager,

View File

@ -31,7 +31,7 @@ EXPECTED_ALL = [
"ChatHunyuan",
"GigaChat",
"VolcEngineMaasChat",
"ChatGPTRouter",
"GPTRouter",
]