community[patch]: upgrade to recent version of mypy (#21616)

This PR upgrades community to a recent version of mypy. It inserts type:
ignore on all existing failures.
This commit is contained in:
Eugene Yurtsev
2024-05-13 14:55:07 -04:00
committed by GitHub
parent b923951062
commit 25fbe356b4
243 changed files with 718 additions and 710 deletions

View File

@@ -15,7 +15,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
@pytest.mark.scheduled
def test_anthropic_call() -> None:
"""Test valid call to anthropic."""
chat = ChatAnthropic(model="test")
chat = ChatAnthropic(model="test") # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, AIMessage)
@@ -25,7 +25,7 @@ def test_anthropic_call() -> None:
@pytest.mark.scheduled
def test_anthropic_generate() -> None:
"""Test generate method of anthropic."""
chat = ChatAnthropic(model="test")
chat = ChatAnthropic(model="test") # type: ignore[call-arg]
chat_messages: List[List[BaseMessage]] = [
[HumanMessage(content="How many toes do dogs have?")]
]
@@ -42,7 +42,7 @@ def test_anthropic_generate() -> None:
@pytest.mark.scheduled
def test_anthropic_streaming() -> None:
"""Test streaming tokens from anthropic."""
chat = ChatAnthropic(model="test", streaming=True)
chat = ChatAnthropic(model="test", streaming=True) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, AIMessage)
@@ -54,7 +54,7 @@ def test_anthropic_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatAnthropic(
chat = ChatAnthropic( # type: ignore[call-arg]
model="test",
streaming=True,
callback_manager=callback_manager,
@@ -70,7 +70,7 @@ async def test_anthropic_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatAnthropic(
chat = ChatAnthropic( # type: ignore[call-arg]
model="test",
streaming=True,
callback_manager=callback_manager,

View File

@@ -20,7 +20,7 @@ DEPLOYMENT_NAME = os.environ.get(
def _get_llm(**kwargs: Any) -> AzureChatOpenAI:
return AzureChatOpenAI(
return AzureChatOpenAI( # type: ignore[call-arg]
deployment_name=DEPLOYMENT_NAME,
openai_api_version=OPENAI_API_VERSION,
azure_endpoint=OPENAI_API_BASE,

View File

@@ -23,7 +23,7 @@ def test_chat_baichuan_default_non_streaming() -> None:
def test_chat_baichuan_turbo() -> None:
chat = ChatBaichuan(model="Baichuan2-Turbo", streaming=True)
chat = ChatBaichuan(model="Baichuan2-Turbo", streaming=True) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, AIMessage)
@@ -31,7 +31,7 @@ def test_chat_baichuan_turbo() -> None:
def test_chat_baichuan_turbo_non_streaming() -> None:
chat = ChatBaichuan(model="Baichuan2-Turbo")
chat = ChatBaichuan(model="Baichuan2-Turbo") # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, AIMessage)

View File

@@ -17,7 +17,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
@pytest.fixture
def chat() -> BedrockChat:
return BedrockChat(model_id="anthropic.claude-v2", model_kwargs={"temperature": 0})
return BedrockChat(model_id="anthropic.claude-v2", model_kwargs={"temperature": 0}) # type: ignore[call-arg]
@pytest.mark.scheduled
@@ -63,7 +63,7 @@ def test_chat_bedrock_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = BedrockChat(
chat = BedrockChat( # type: ignore[call-arg]
model_id="anthropic.claude-v2",
streaming=True,
callback_manager=callback_manager,
@@ -92,7 +92,7 @@ def test_chat_bedrock_streaming_generation_info() -> None:
callback = _FakeCallback()
callback_manager = CallbackManager([callback])
chat = BedrockChat(
chat = BedrockChat( # type: ignore[call-arg]
model_id="anthropic.claude-v2",
callback_manager=callback_manager,
)

View File

@@ -9,7 +9,7 @@ from langchain_community.chat_models.coze import ChatCoze
def test_chat_coze_default() -> None:
chat = ChatCoze(
coze_api_base="https://api.coze.com",
coze_api_key="pat_...",
coze_api_key="pat_...", # type: ignore[arg-type]
bot_id="7....",
user="123",
conversation_id="",
@@ -24,7 +24,7 @@ def test_chat_coze_default() -> None:
def test_chat_coze_default_non_streaming() -> None:
chat = ChatCoze(
coze_api_base="https://api.coze.com",
coze_api_key="pat_...",
coze_api_key="pat_...", # type: ignore[arg-type]
bot_id="7....",
user="123",
conversation_id="",

View File

@@ -12,7 +12,7 @@ from langchain_community.chat_models.dappier import (
@pytest.mark.scheduled
def test_dappier_chat() -> None:
"""Test ChatDappierAI wrapper."""
chat = ChatDappierAI(
chat = ChatDappierAI( # type: ignore[call-arg]
dappier_endpoint="https://api.dappier.com/app/datamodelconversation",
dappier_model="dm_01hpsxyfm2fwdt2zet9cg6fdxt",
)
@@ -25,7 +25,7 @@ def test_dappier_chat() -> None:
@pytest.mark.scheduled
def test_dappier_generate() -> None:
"""Test generate method of Dappier AI."""
chat = ChatDappierAI(
chat = ChatDappierAI( # type: ignore[call-arg]
dappier_endpoint="https://api.dappier.com/app/datamodelconversation",
dappier_model="dm_01hpsxyfm2fwdt2zet9cg6fdxt",
)
@@ -45,7 +45,7 @@ def test_dappier_generate() -> None:
@pytest.mark.scheduled
async def test_dappier_agenerate() -> None:
"""Test async generation."""
chat = ChatDappierAI(
chat = ChatDappierAI( # type: ignore[call-arg]
dappier_endpoint="https://api.dappier.com/app/datamodelconversation",
dappier_model="dm_01hpsxyfm2fwdt2zet9cg6fdxt",
)

View File

@@ -13,7 +13,7 @@ from langchain_community.chat_models.edenai import (
@pytest.mark.scheduled
def test_chat_edenai() -> None:
"""Test ChatEdenAI wrapper."""
chat = ChatEdenAI(
chat = ChatEdenAI( # type: ignore[call-arg]
provider="openai", model="gpt-3.5-turbo", temperature=0, max_tokens=1000
)
message = HumanMessage(content="Who are you ?")
@@ -25,7 +25,7 @@ def test_chat_edenai() -> None:
@pytest.mark.scheduled
def test_edenai_generate() -> None:
"""Test generate method of edenai."""
chat = ChatEdenAI(provider="google")
chat = ChatEdenAI(provider="google") # type: ignore[call-arg]
chat_messages: List[List[BaseMessage]] = [
[HumanMessage(content="What is the meaning of life?")]
]
@@ -42,7 +42,7 @@ def test_edenai_generate() -> None:
@pytest.mark.scheduled
async def test_edenai_async_generate() -> None:
"""Test async generation."""
chat = ChatEdenAI(provider="google", max_tokens=50)
chat = ChatEdenAI(provider="google", max_tokens=50) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
result: LLMResult = await chat.agenerate([[message], [message]])
assert isinstance(result, LLMResult)
@@ -55,7 +55,7 @@ async def test_edenai_async_generate() -> None:
@pytest.mark.scheduled
def test_edenai_streaming() -> None:
"""Test streaming EdenAI chat."""
llm = ChatEdenAI(provider="openai", max_tokens=50)
llm = ChatEdenAI(provider="openai", max_tokens=50) # type: ignore[call-arg]
for chunk in llm.stream("Generate a high fantasy story."):
assert isinstance(chunk.content, str)
@@ -64,7 +64,7 @@ def test_edenai_streaming() -> None:
@pytest.mark.scheduled
async def test_edenai_astream() -> None:
"""Test streaming from EdenAI."""
llm = ChatEdenAI(provider="openai", max_tokens=50)
llm = ChatEdenAI(provider="openai", max_tokens=50) # type: ignore[call-arg]
async for token in llm.astream("Generate a high fantasy story."):
assert isinstance(token.content, str)

View File

@@ -12,7 +12,7 @@ from langchain_community.chat_models import ChatGooglePalm
def test_chat_google_palm() -> None:
"""Test Google PaLM Chat API wrapper."""
chat = ChatGooglePalm()
chat = ChatGooglePalm() # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, BaseMessage)
@@ -21,7 +21,7 @@ def test_chat_google_palm() -> None:
def test_chat_google_palm_system_message() -> None:
"""Test Google PaLM Chat API wrapper with system message."""
chat = ChatGooglePalm()
chat = ChatGooglePalm() # type: ignore[call-arg]
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat.invoke([system_message, human_message])
@@ -31,7 +31,7 @@ def test_chat_google_palm_system_message() -> None:
def test_chat_google_palm_generate() -> None:
"""Test Google PaLM Chat API wrapper with generate."""
chat = ChatGooglePalm(n=2, temperature=1.0)
chat = ChatGooglePalm(n=2, temperature=1.0) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
@@ -48,7 +48,7 @@ def test_chat_google_palm_multiple_completions() -> None:
"""Test Google PaLM Chat API wrapper with multiple completions."""
# The API de-dupes duplicate responses, so set temperature higher. This
# could be a flakey test though...
chat = ChatGooglePalm(n=5, temperature=1.0)
chat = ChatGooglePalm(n=5, temperature=1.0) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat._generate([message])
assert isinstance(response, ChatResult)
@@ -60,7 +60,7 @@ def test_chat_google_palm_multiple_completions() -> None:
async def test_async_chat_google_palm() -> None:
"""Test async generation."""
chat = ChatGooglePalm(n=2, temperature=1.0)
chat = ChatGooglePalm(n=2, temperature=1.0) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert isinstance(response, LLMResult)

View File

@@ -16,9 +16,9 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_api_key_is_string() -> None:
gpt_router = GPTRouter(
gpt_router = GPTRouter( # type: ignore[call-arg]
gpt_router_api_base="https://example.com",
gpt_router_api_key="secret-api-key",
gpt_router_api_key="secret-api-key", # type: ignore[arg-type]
)
assert isinstance(gpt_router.gpt_router_api_key, SecretStr)
@@ -26,9 +26,9 @@ def test_api_key_is_string() -> None:
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
gpt_router = GPTRouter(
gpt_router = GPTRouter( # type: ignore[call-arg]
gpt_router_api_base="https://example.com",
gpt_router_api_key="secret-api-key",
gpt_router_api_key="secret-api-key", # type: ignore[arg-type]
)
print(gpt_router.gpt_router_api_key, end="") # noqa: T201
captured = capsys.readouterr()

View File

@@ -14,7 +14,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_jinachat_api_key_is_secret_string() -> None:
llm = JinaChat(jinachat_api_key="secret-api-key")
llm = JinaChat(jinachat_api_key="secret-api-key") # type: ignore[arg-type, call-arg]
assert isinstance(llm.jinachat_api_key, SecretStr)
@@ -23,7 +23,7 @@ def test_jinachat_api_key_masked_when_passed_from_env(
) -> None:
"""Test initialization with an API key provided via an env variable"""
monkeypatch.setenv("JINACHAT_API_KEY", "secret-api-key")
llm = JinaChat()
llm = JinaChat() # type: ignore[call-arg]
print(llm.jinachat_api_key, end="") # noqa: T201
captured = capsys.readouterr()
@@ -34,7 +34,7 @@ def test_jinachat_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
"""Test initialization with an API key provided via the initializer"""
llm = JinaChat(jinachat_api_key="secret-api-key")
llm = JinaChat(jinachat_api_key="secret-api-key") # type: ignore[arg-type, call-arg]
print(llm.jinachat_api_key, end="") # noqa: T201
captured = capsys.readouterr()
@@ -43,13 +43,13 @@ def test_jinachat_api_key_masked_when_passed_via_constructor(
def test_uses_actual_secret_value_from_secretstr() -> None:
"""Test that actual secret is retrieved using `.get_secret_value()`."""
llm = JinaChat(jinachat_api_key="secret-api-key")
llm = JinaChat(jinachat_api_key="secret-api-key") # type: ignore[arg-type, call-arg]
assert cast(SecretStr, llm.jinachat_api_key).get_secret_value() == "secret-api-key"
def test_jinachat() -> None:
"""Test JinaChat wrapper."""
chat = JinaChat(max_tokens=10)
chat = JinaChat(max_tokens=10) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, BaseMessage)
@@ -58,7 +58,7 @@ def test_jinachat() -> None:
def test_jinachat_system_message() -> None:
"""Test JinaChat wrapper with system message."""
chat = JinaChat(max_tokens=10)
chat = JinaChat(max_tokens=10) # type: ignore[call-arg]
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat.invoke([system_message, human_message])
@@ -68,7 +68,7 @@ def test_jinachat_system_message() -> None:
def test_jinachat_generate() -> None:
"""Test JinaChat wrapper with generate."""
chat = JinaChat(max_tokens=10)
chat = JinaChat(max_tokens=10) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
@@ -85,7 +85,7 @@ def test_jinachat_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = JinaChat(
chat = JinaChat( # type: ignore[call-arg]
max_tokens=10,
streaming=True,
temperature=0,
@@ -100,7 +100,7 @@ def test_jinachat_streaming() -> None:
async def test_async_jinachat() -> None:
"""Test async generation."""
chat = JinaChat(max_tokens=102)
chat = JinaChat(max_tokens=102) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert isinstance(response, LLMResult)
@@ -117,7 +117,7 @@ async def test_async_jinachat_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = JinaChat(
chat = JinaChat( # type: ignore[call-arg]
max_tokens=10,
streaming=True,
temperature=0,
@@ -140,18 +140,18 @@ async def test_async_jinachat_streaming() -> None:
def test_jinachat_extra_kwargs() -> None:
"""Test extra kwargs to chat openai."""
# Check that foo is saved in extra_kwargs.
llm = JinaChat(foo=3, max_tokens=10)
llm = JinaChat(foo=3, max_tokens=10) # type: ignore[call-arg]
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = JinaChat(foo=3, model_kwargs={"bar": 2})
llm = JinaChat(foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
JinaChat(foo=3, model_kwargs={"foo": 2})
JinaChat(foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg]
# Test that if explicit param is specified in kwargs it errors
with pytest.raises(ValueError):
JinaChat(model_kwargs={"temperature": 0.2})
JinaChat(model_kwargs={"temperature": 0.2}) # type: ignore[call-arg]

View File

@@ -74,7 +74,7 @@ class TestChatKinetica:
"""Create an LLM instance."""
import gpudb
kinetica_llm = ChatKinetica()
kinetica_llm = ChatKinetica() # type: ignore[call-arg]
LOG.info(kinetica_llm._identifying_params)
assert isinstance(kinetica_llm.kdbc, gpudb.GPUdb)
@@ -83,7 +83,7 @@ class TestChatKinetica:
@pytest.mark.vcr()
def test_load_context(self) -> None:
"""Load the LLM context from the DB."""
kinetica_llm = ChatKinetica()
kinetica_llm = ChatKinetica() # type: ignore[call-arg]
ctx_messages = kinetica_llm.load_messages_from_context(self.context_name)
system_message = ctx_messages[0]
@@ -96,7 +96,7 @@ class TestChatKinetica:
@pytest.mark.vcr()
def test_generate(self) -> None:
"""Generate SQL from a chain."""
kinetica_llm = ChatKinetica()
kinetica_llm = ChatKinetica() # type: ignore[call-arg]
# create chain
ctx_messages = kinetica_llm.load_messages_from_context(self.context_name)
@@ -113,7 +113,7 @@ class TestChatKinetica:
@pytest.mark.vcr()
def test_full_chain(self) -> None:
"""Generate SQL from a chain and execute the query."""
kinetica_llm = ChatKinetica()
kinetica_llm = ChatKinetica() # type: ignore[call-arg]
# create chain
ctx_messages = kinetica_llm.load_messages_from_context(self.context_name)

View File

@@ -192,15 +192,15 @@ def test_konko_streaming_param_validation_test() -> None:
def test_konko_additional_args_test() -> None:
"""Evaluate extra arguments for ChatKonko."""
chat_instance = ChatKonko(extra=3, max_tokens=10)
chat_instance = ChatKonko(extra=3, max_tokens=10) # type: ignore[call-arg]
assert chat_instance.max_tokens == 10
assert chat_instance.model_kwargs == {"extra": 3}
chat_instance = ChatKonko(extra=3, model_kwargs={"addition": 2})
chat_instance = ChatKonko(extra=3, model_kwargs={"addition": 2}) # type: ignore[call-arg]
assert chat_instance.model_kwargs == {"extra": 3, "addition": 2}
with pytest.raises(ValueError):
ChatKonko(extra=3, model_kwargs={"extra": 2})
ChatKonko(extra=3, model_kwargs={"extra": 2}) # type: ignore[call-arg]
with pytest.raises(ValueError):
ChatKonko(model_kwargs={"temperature": 0.2})

View File

@@ -13,7 +13,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_litellm_call() -> None:
"""Test valid call to litellm."""
chat = ChatLiteLLM(
chat = ChatLiteLLM( # type: ignore[call-arg]
model="test",
)
message = HumanMessage(content="Hello")
@@ -24,7 +24,7 @@ def test_litellm_call() -> None:
def test_litellm_generate() -> None:
"""Test generate method of anthropic."""
chat = ChatLiteLLM(model="test")
chat = ChatLiteLLM(model="test") # type: ignore[call-arg]
chat_messages: List[List[BaseMessage]] = [
[HumanMessage(content="How many toes do dogs have?")]
]
@@ -40,7 +40,7 @@ def test_litellm_generate() -> None:
def test_litellm_streaming() -> None:
"""Test streaming tokens from anthropic."""
chat = ChatLiteLLM(model="test", streaming=True)
chat = ChatLiteLLM(model="test", streaming=True) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, AIMessage)
@@ -51,7 +51,7 @@ def test_litellm_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatLiteLLM(
chat = ChatLiteLLM( # type: ignore[call-arg]
model="test",
streaming=True,
callback_manager=callback_manager,

View File

@@ -42,7 +42,7 @@ def test_chat_openai_model() -> None:
"""Test ChatOpenAI wrapper handles model_name."""
chat = ChatOpenAI(model="foo")
assert chat.model_name == "foo"
chat = ChatOpenAI(model_name="bar")
chat = ChatOpenAI(model_name="bar") # type: ignore[call-arg]
assert chat.model_name == "bar"
@@ -243,17 +243,17 @@ async def test_async_chat_openai_bind_functions() -> None:
def test_chat_openai_extra_kwargs() -> None:
"""Test extra kwargs to chat openai."""
# Check that foo is saved in extra_kwargs.
llm = ChatOpenAI(foo=3, max_tokens=10)
llm = ChatOpenAI(foo=3, max_tokens=10) # type: ignore[call-arg]
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = ChatOpenAI(foo=3, model_kwargs={"bar": 2})
llm = ChatOpenAI(foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
ChatOpenAI(foo=3, model_kwargs={"foo": 2})
ChatOpenAI(foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg]
# Test that if explicit param is specified in kwargs it errors
with pytest.raises(ValueError):

View File

@@ -11,8 +11,8 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_pai_eas_call() -> None:
chat = PaiEasChatEndpoint(
eas_service_url=os.getenv("EAS_SERVICE_URL"),
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"),
eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type]
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type]
)
response = chat.invoke([HumanMessage(content="Say foo:")])
assert isinstance(response, BaseMessage)
@@ -22,8 +22,8 @@ def test_pai_eas_call() -> None:
def test_multiple_history() -> None:
"""Tests multiple history works."""
chat = PaiEasChatEndpoint(
eas_service_url=os.getenv("EAS_SERVICE_URL"),
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"),
eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type]
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type]
)
response = chat.invoke(
@@ -40,8 +40,8 @@ def test_multiple_history() -> None:
def test_stream() -> None:
"""Test that stream works."""
chat = PaiEasChatEndpoint(
eas_service_url=os.getenv("EAS_SERVICE_URL"),
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"),
eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type]
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type]
streaming=True,
)
callback_handler = FakeCallbackHandler()
@@ -62,8 +62,8 @@ def test_stream() -> None:
def test_multiple_messages() -> None:
"""Tests multiple messages works."""
chat = PaiEasChatEndpoint(
eas_service_url=os.getenv("EAS_SERVICE_URL"),
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"),
eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type]
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type]
)
message = HumanMessage(content="Hi, how are you.")
response = chat.generate([[message], [message]])

View File

@@ -14,12 +14,12 @@ from langchain_community.chat_models import ChatPremAI
@pytest.fixture
def chat() -> ChatPremAI:
return ChatPremAI(project_id=8)
return ChatPremAI(project_id=8) # type: ignore[call-arg]
def test_chat_premai() -> None:
"""Test ChatPremAI wrapper."""
chat = ChatPremAI(project_id=8)
chat = ChatPremAI(project_id=8) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, BaseMessage)
@@ -28,7 +28,7 @@ def test_chat_premai() -> None:
def test_chat_prem_system_message() -> None:
"""Test ChatPremAI wrapper for system message"""
chat = ChatPremAI(project_id=8)
chat = ChatPremAI(project_id=8) # type: ignore[call-arg]
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat.invoke([system_message, human_message])
@@ -38,13 +38,13 @@ def test_chat_prem_system_message() -> None:
def test_chat_prem_model() -> None:
"""Test ChatPremAI wrapper handles model_name."""
chat = ChatPremAI(model="foo", project_id=8)
chat = ChatPremAI(model="foo", project_id=8) # type: ignore[call-arg]
assert chat.model == "foo"
def test_chat_prem_generate() -> None:
"""Test ChatPremAI wrapper with generate."""
chat = ChatPremAI(project_id=8)
chat = ChatPremAI(project_id=8) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
@@ -64,7 +64,7 @@ async def test_prem_invoke(chat: ChatPremAI) -> None:
def test_prem_streaming() -> None:
"""Test streaming tokens from Prem."""
chat = ChatPremAI(project_id=8, streaming=True)
chat = ChatPremAI(project_id=8, streaming=True) # type: ignore[call-arg]
for token in chat.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)

View File

@@ -11,7 +11,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_promptlayer_chat_openai() -> None:
"""Test PromptLayerChatOpenAI wrapper."""
chat = PromptLayerChatOpenAI(max_tokens=10)
chat = PromptLayerChatOpenAI(max_tokens=10) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, BaseMessage)
@@ -20,7 +20,7 @@ def test_promptlayer_chat_openai() -> None:
def test_promptlayer_chat_openai_system_message() -> None:
"""Test PromptLayerChatOpenAI wrapper with system message."""
chat = PromptLayerChatOpenAI(max_tokens=10)
chat = PromptLayerChatOpenAI(max_tokens=10) # type: ignore[call-arg]
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat.invoke([system_message, human_message])
@@ -30,7 +30,7 @@ def test_promptlayer_chat_openai_system_message() -> None:
def test_promptlayer_chat_openai_generate() -> None:
"""Test PromptLayerChatOpenAI wrapper with generate."""
chat = PromptLayerChatOpenAI(max_tokens=10, n=2)
chat = PromptLayerChatOpenAI(max_tokens=10, n=2) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
@@ -45,7 +45,7 @@ def test_promptlayer_chat_openai_generate() -> None:
def test_promptlayer_chat_openai_multiple_completions() -> None:
"""Test PromptLayerChatOpenAI wrapper with multiple completions."""
chat = PromptLayerChatOpenAI(max_tokens=10, n=5)
chat = PromptLayerChatOpenAI(max_tokens=10, n=5) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat._generate([message])
assert isinstance(response, ChatResult)
@@ -59,7 +59,7 @@ def test_promptlayer_chat_openai_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = PromptLayerChatOpenAI(
chat = PromptLayerChatOpenAI( # type: ignore[call-arg]
max_tokens=10,
streaming=True,
temperature=0,
@@ -75,7 +75,7 @@ def test_promptlayer_chat_openai_streaming() -> None:
def test_promptlayer_chat_openai_invalid_streaming_params() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
with pytest.raises(ValueError):
PromptLayerChatOpenAI(
PromptLayerChatOpenAI( # type: ignore[call-arg]
max_tokens=10,
streaming=True,
temperature=0,
@@ -85,7 +85,7 @@ def test_promptlayer_chat_openai_invalid_streaming_params() -> None:
async def test_async_promptlayer_chat_openai() -> None:
"""Test async generation."""
chat = PromptLayerChatOpenAI(max_tokens=10, n=2)
chat = PromptLayerChatOpenAI(max_tokens=10, n=2) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert isinstance(response, LLMResult)
@@ -102,7 +102,7 @@ async def test_async_promptlayer_chat_openai_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = PromptLayerChatOpenAI(
chat = PromptLayerChatOpenAI( # type: ignore[call-arg]
max_tokens=10,
streaming=True,
temperature=0,

View File

@@ -90,8 +90,8 @@ def test_initialization() -> None:
"""Test chat model initialization."""
for model in [
QianfanChatEndpoint(model="BLOOMZ-7B", timeout=40),
QianfanChatEndpoint(model="BLOOMZ-7B", request_timeout=40),
QianfanChatEndpoint(model="BLOOMZ-7B", timeout=40), # type: ignore[call-arg]
QianfanChatEndpoint(model="BLOOMZ-7B", request_timeout=40), # type: ignore[call-arg]
]:
assert model.model == "BLOOMZ-7B"
assert model.request_timeout == 40
@@ -99,7 +99,7 @@ def test_initialization() -> None:
def test_default_call() -> None:
"""Test default model.invoke(`ERNIE-Bot`) call."""
chat = QianfanChatEndpoint()
chat = QianfanChatEndpoint() # type: ignore[call-arg]
response = chat.invoke([HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
@@ -107,7 +107,7 @@ def test_default_call() -> None:
def test_model() -> None:
"""Test model kwarg works."""
chat = QianfanChatEndpoint(model="BLOOMZ-7B")
chat = QianfanChatEndpoint(model="BLOOMZ-7B") # type: ignore[call-arg]
response = chat.invoke([HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
@@ -115,7 +115,7 @@ def test_model() -> None:
def test_model_param() -> None:
"""Test model params works."""
chat = QianfanChatEndpoint()
chat = QianfanChatEndpoint() # type: ignore[call-arg]
response = chat.invoke([HumanMessage(content="Hello")], model="BLOOMZ-7B")
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
@@ -123,7 +123,7 @@ def test_model_param() -> None:
def test_endpoint() -> None:
"""Test user custom model deployments like some open source models."""
chat = QianfanChatEndpoint(endpoint="qianfan_bloomz_7b_compressed")
chat = QianfanChatEndpoint(endpoint="qianfan_bloomz_7b_compressed") # type: ignore[call-arg]
response = chat.invoke([HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
@@ -131,9 +131,9 @@ def test_endpoint() -> None:
def test_endpoint_param() -> None:
"""Test user custom model deployments like some open source models."""
chat = QianfanChatEndpoint()
chat = QianfanChatEndpoint() # type: ignore[call-arg]
response = chat.invoke(
[HumanMessage(endpoint="qianfan_bloomz_7b_compressed", content="Hello")]
[HumanMessage(endpoint="qianfan_bloomz_7b_compressed", content="Hello")] # type: ignore[call-arg]
)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
@@ -141,7 +141,7 @@ def test_endpoint_param() -> None:
def test_multiple_history() -> None:
"""Tests multiple history works."""
chat = QianfanChatEndpoint()
chat = QianfanChatEndpoint() # type: ignore[call-arg]
response = chat.invoke(
[
@@ -156,7 +156,7 @@ def test_multiple_history() -> None:
def test_chat_generate() -> None:
"""Tests chat generate works."""
chat = QianfanChatEndpoint()
chat = QianfanChatEndpoint() # type: ignore[call-arg]
response = chat.generate(
[
[
@@ -175,7 +175,7 @@ def test_chat_generate() -> None:
def test_stream() -> None:
"""Test that stream works."""
chat = QianfanChatEndpoint(streaming=True)
chat = QianfanChatEndpoint(streaming=True) # type: ignore[call-arg]
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat.invoke(
@@ -203,7 +203,7 @@ def test_stream() -> None:
@pytest.mark.asyncio
async def test_async_invoke() -> None:
chat = QianfanChatEndpoint()
chat = QianfanChatEndpoint() # type: ignore[call-arg]
res = await chat.ainvoke([HumanMessage(content="Hello")])
assert isinstance(res, BaseMessage)
assert res.content != ""
@@ -212,7 +212,7 @@ async def test_async_invoke() -> None:
@pytest.mark.asyncio
async def test_async_generate() -> None:
"""Tests chat agenerate works."""
chat = QianfanChatEndpoint()
chat = QianfanChatEndpoint() # type: ignore[call-arg]
response = await chat.agenerate(
[
[
@@ -231,7 +231,7 @@ async def test_async_generate() -> None:
@pytest.mark.asyncio
async def test_async_stream() -> None:
chat = QianfanChatEndpoint(streaming=True)
chat = QianfanChatEndpoint(streaming=True) # type: ignore[call-arg]
async for token in chat.astream(
[
HumanMessage(content="Hello."),
@@ -244,7 +244,7 @@ async def test_async_stream() -> None:
def test_multiple_messages() -> None:
"""Tests multiple messages works."""
chat = QianfanChatEndpoint()
chat = QianfanChatEndpoint() # type: ignore[call-arg]
message = HumanMessage(content="Hi, how are you.")
response = chat.generate([[message], [message]])
@@ -259,13 +259,13 @@ def test_multiple_messages() -> None:
def test_functions_call_thoughts() -> None:
chat = QianfanChatEndpoint(model="ERNIE-Bot")
chat = QianfanChatEndpoint(model="ERNIE-Bot") # type: ignore[call-arg]
prompt_tmpl = "Use the given functions to answer following question: {input}"
prompt_msgs = [
HumanMessagePromptTemplate.from_template(prompt_tmpl),
]
prompt = ChatPromptTemplate(messages=prompt_msgs)
prompt = ChatPromptTemplate(messages=prompt_msgs) # type: ignore[arg-type, call-arg]
chain = prompt | chat.bind(functions=_FUNCTIONS)
@@ -276,9 +276,9 @@ def test_functions_call_thoughts() -> None:
def test_functions_call() -> None:
chat = QianfanChatEndpoint(model="ERNIE-Bot")
chat = QianfanChatEndpoint(model="ERNIE-Bot") # type: ignore[call-arg]
prompt = ChatPromptTemplate(
prompt = ChatPromptTemplate( # type: ignore[call-arg]
messages=[
HumanMessage(content="What's the temperature in Shanghai today?"),
AIMessage(
@@ -305,7 +305,7 @@ def test_functions_call() -> None:
def test_rate_limit() -> None:
chat = QianfanChatEndpoint(model="ERNIE-Bot", init_kwargs={"query_per_second": 2})
chat = QianfanChatEndpoint(model="ERNIE-Bot", init_kwargs={"query_per_second": 2}) # type: ignore[call-arg]
assert chat.client._client._rate_limiter._sync_limiter._query_per_second == 2
responses = chat.batch(
[
@@ -326,7 +326,7 @@ def test_qianfan_key_masked_when_passed_from_env(
monkeypatch.setenv("QIANFAN_AK", "test-api-key")
monkeypatch.setenv("QIANFAN_SK", "test-secret-key")
chat = QianfanChatEndpoint()
chat = QianfanChatEndpoint() # type: ignore[call-arg]
print(chat.qianfan_ak, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
@@ -340,9 +340,9 @@ def test_qianfan_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
"""Test initialization with an API key provided via the initializer"""
chat = QianfanChatEndpoint(
qianfan_ak="test-api-key",
qianfan_sk="test-secret-key",
chat = QianfanChatEndpoint( # type: ignore[call-arg]
qianfan_ak="test-api-key", # type: ignore[arg-type]
qianfan_sk="test-secret-key", # type: ignore[arg-type]
)
print(chat.qianfan_ak, end="") # noqa: T201
captured = capsys.readouterr()
@@ -356,9 +356,9 @@ def test_qianfan_key_masked_when_passed_via_constructor(
def test_uses_actual_secret_value_from_secret_str() -> None:
"""Test that actual secret is retrieved using `.get_secret_value()`."""
chat = QianfanChatEndpoint(
qianfan_ak="test-api-key",
qianfan_sk="test-secret-key",
chat = QianfanChatEndpoint( # type: ignore[call-arg]
qianfan_ak="test-api-key", # type: ignore[arg-type]
qianfan_sk="test-secret-key", # type: ignore[arg-type]
)
assert cast(SecretStr, chat.qianfan_ak).get_secret_value() == "test-api-key"
assert cast(SecretStr, chat.qianfan_sk).get_secret_value() == "test-secret-key"

View File

@@ -7,13 +7,13 @@ def test_initialization() -> None:
"""Test chat model initialization."""
for model in [
ChatSparkLLM(timeout=30),
ChatSparkLLM(request_timeout=30),
ChatSparkLLM(request_timeout=30), # type: ignore[call-arg]
]:
assert model.request_timeout == 30
def test_chat_spark_llm() -> None:
chat = ChatSparkLLM()
chat = ChatSparkLLM() # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, AIMessage)
@@ -21,14 +21,14 @@ def test_chat_spark_llm() -> None:
def test_chat_spark_llm_streaming() -> None:
chat = ChatSparkLLM(streaming=True)
chat = ChatSparkLLM(streaming=True) # type: ignore[call-arg]
for chunk in chat.stream("Hello!"):
assert isinstance(chunk, AIMessageChunk)
assert isinstance(chunk.content, str)
def test_chat_spark_llm_with_domain() -> None:
chat = ChatSparkLLM(spark_llm_domain="generalv3")
chat = ChatSparkLLM(spark_llm_domain="generalv3") # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
print(response) # noqa: T201
@@ -37,7 +37,7 @@ def test_chat_spark_llm_with_domain() -> None:
def test_chat_spark_llm_with_temperature() -> None:
chat = ChatSparkLLM(temperature=0.9, top_k=2)
chat = ChatSparkLLM(temperature=0.9, top_k=2) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
print(response) # noqa: T201

View File

@@ -36,22 +36,22 @@ _FUNCTIONS: Any = [
def test_initialization() -> None:
"""Test chat model initialization."""
for model in [
ChatTongyi(model_name="qwen-turbo", api_key="xyz"),
ChatTongyi(model="qwen-turbo", dashscope_api_key="xyz"),
ChatTongyi(model_name="qwen-turbo", api_key="xyz"), # type: ignore[arg-type, call-arg]
ChatTongyi(model="qwen-turbo", dashscope_api_key="xyz"), # type: ignore[call-arg]
]:
assert model.model_name == "qwen-turbo"
assert cast(SecretStr, model.dashscope_api_key).get_secret_value() == "xyz"
def test_api_key_is_string() -> None:
llm = ChatTongyi(dashscope_api_key="secret-api-key")
llm = ChatTongyi(dashscope_api_key="secret-api-key") # type: ignore[call-arg]
assert isinstance(llm.dashscope_api_key, SecretStr)
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = ChatTongyi(dashscope_api_key="secret-api-key")
llm = ChatTongyi(dashscope_api_key="secret-api-key") # type: ignore[call-arg]
print(llm.dashscope_api_key, end="") # noqa: T201
captured = capsys.readouterr()
@@ -60,7 +60,7 @@ def test_api_key_masked_when_passed_via_constructor(
def test_default_call() -> None:
"""Test default model call."""
chat = ChatTongyi()
chat = ChatTongyi() # type: ignore[call-arg]
response = chat.invoke([HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
@@ -68,20 +68,20 @@ def test_default_call() -> None:
def test_model() -> None:
"""Test model kwarg works."""
chat = ChatTongyi(model="qwen-plus")
chat = ChatTongyi(model="qwen-plus") # type: ignore[call-arg]
response = chat.invoke([HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_functions_call_thoughts() -> None:
chat = ChatTongyi(model="qwen-plus")
chat = ChatTongyi(model="qwen-plus") # type: ignore[call-arg]
prompt_tmpl = "Use the given functions to answer following question: {input}"
prompt_msgs = [
HumanMessagePromptTemplate.from_template(prompt_tmpl),
]
prompt = ChatPromptTemplate(messages=prompt_msgs)
prompt = ChatPromptTemplate(messages=prompt_msgs) # type: ignore[arg-type, call-arg]
chain = prompt | chat.bind(functions=_FUNCTIONS)
@@ -93,7 +93,7 @@ def test_functions_call_thoughts() -> None:
def test_multiple_history() -> None:
"""Tests multiple history works."""
chat = ChatTongyi()
chat = ChatTongyi() # type: ignore[call-arg]
response = chat.invoke(
[
@@ -108,7 +108,7 @@ def test_multiple_history() -> None:
def test_stream() -> None:
"""Test that stream works."""
chat = ChatTongyi(streaming=True)
chat = ChatTongyi(streaming=True) # type: ignore[call-arg]
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat.invoke(
@@ -126,7 +126,7 @@ def test_stream() -> None:
def test_multiple_messages() -> None:
"""Tests multiple messages works."""
chat = ChatTongyi()
chat = ChatTongyi() # type: ignore[call-arg]
message = HumanMessage(content="Hi, how are you.")
response = chat.generate([[message], [message]])

View File

@@ -248,7 +248,7 @@ def test_vertexai_args_passed(stop: Optional[str]) -> None:
mock_send_message = MagicMock(return_value=mock_response)
mock_chat.send_message = mock_send_message
model = ChatVertexAI(**prompt_params)
model = ChatVertexAI(**prompt_params) # type: ignore[arg-type]
message = HumanMessage(content=user_prompt)
if stop:
response = model.invoke([message], stop=[stop])

View File

@@ -10,7 +10,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_default_call() -> None:
"""Test valid chat call to volc engine."""
chat = VolcEngineMaasChat()
chat = VolcEngineMaasChat() # type: ignore[call-arg]
response = chat.invoke([HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
@@ -18,7 +18,7 @@ def test_default_call() -> None:
def test_multiple_history() -> None:
"""Tests multiple history works."""
chat = VolcEngineMaasChat()
chat = VolcEngineMaasChat() # type: ignore[call-arg]
response = chat.invoke(
[
@@ -33,7 +33,7 @@ def test_multiple_history() -> None:
def test_stream() -> None:
"""Test that stream works."""
chat = VolcEngineMaasChat(streaming=True)
chat = VolcEngineMaasChat(streaming=True) # type: ignore[call-arg]
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat.invoke(
@@ -51,7 +51,7 @@ def test_stream() -> None:
def test_stop() -> None:
"""Test that stop works."""
chat = VolcEngineMaasChat(
chat = VolcEngineMaasChat( # type: ignore[call-arg]
model="skylark2-pro-4k", model_version="1.2", streaming=True
)
callback_handler = FakeCallbackHandler()
@@ -73,7 +73,7 @@ def test_stop() -> None:
def test_multiple_messages() -> None:
"""Tests multiple messages works."""
chat = VolcEngineMaasChat()
chat = VolcEngineMaasChat() # type: ignore[call-arg]
message = HumanMessage(content="Hi, how are you?")
response = chat.generate([[message], [message]])

View File

@@ -16,7 +16,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
@pytest.mark.scheduled
def test_chat_yuan2() -> None:
"""Test ChatYuan2 wrapper."""
chat = ChatYuan2(
chat = ChatYuan2( # type: ignore[call-arg]
yuan2_api_key="EMPTY",
yuan2_api_base="http://127.0.0.1:8001/v1",
temperature=1.0,
@@ -34,7 +34,7 @@ def test_chat_yuan2() -> None:
def test_chat_yuan2_system_message() -> None:
"""Test ChatYuan2 wrapper with system message."""
chat = ChatYuan2(
chat = ChatYuan2( # type: ignore[call-arg]
yuan2_api_key="EMPTY",
yuan2_api_base="http://127.0.0.1:8001/v1",
temperature=1.0,
@@ -54,7 +54,7 @@ def test_chat_yuan2_system_message() -> None:
@pytest.mark.scheduled
def test_chat_yuan2_generate() -> None:
"""Test ChatYuan2 wrapper with generate."""
chat = ChatYuan2(
chat = ChatYuan2( # type: ignore[call-arg]
yuan2_api_key="EMPTY",
yuan2_api_base="http://127.0.0.1:8001/v1",
temperature=1.0,
@@ -82,7 +82,7 @@ def test_chat_yuan2_streaming() -> None:
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatYuan2(
chat = ChatYuan2( # type: ignore[call-arg]
yuan2_api_key="EMPTY",
yuan2_api_base="http://127.0.0.1:8001/v1",
temperature=1.0,
@@ -102,7 +102,7 @@ def test_chat_yuan2_streaming() -> None:
@pytest.mark.asyncio
async def test_async_chat_yuan2() -> None:
"""Test async generation."""
chat = ChatYuan2(
chat = ChatYuan2( # type: ignore[call-arg]
yuan2_api_key="EMPTY",
yuan2_api_base="http://127.0.0.1:8001/v1",
temperature=1.0,
@@ -129,7 +129,7 @@ async def test_async_chat_yuan2_streaming() -> None:
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatYuan2(
chat = ChatYuan2( # type: ignore[call-arg]
yuan2_api_key="EMPTY",
yuan2_api_base="http://127.0.0.1:8001/v1",
temperature=1.0,