Fix: fix partners name typo in tests (#15066)

<!-- Thank you for contributing to LangChain!

Please title your PR "<package>: <description>", where <package> is
whichever of langchain, community, core, experimental, etc. is being
modified.

Replace this entire comment with:
  - **Description:** a description of the change, 
  - **Issue:** the issue # it fixes if applicable,
  - **Dependencies:** any dependencies required for this change,
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!

Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` from the root
of the package you've modified to check this locally.

See contribution guidelines for more information on how to write/run
tests, lint, etc: https://python.langchain.com/docs/contributing/

If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in
`docs/docs/integrations` directory.

If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17.
 -->

---------

Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
Co-authored-by: Ran <rccalman@gmail.com>
This commit is contained in:
chyroc 2023-12-23 03:48:39 +08:00 committed by GitHub
parent 2e159931ac
commit 86d27fd684
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 6 additions and 6 deletions

View File

@ -5,7 +5,7 @@ from langchain_anthropic.chat_models import ChatAnthropicMessages
def test_stream() -> None:
"""Test streaming tokens from OpenAI."""
"""Test streaming tokens from Anthropic."""
llm = ChatAnthropicMessages(model_name="claude-instant-1.2")
for token in llm.stream("I'm Pickle Rick"):
@ -13,7 +13,7 @@ def test_stream() -> None:
async def test_astream() -> None:
"""Test streaming tokens from OpenAI."""
"""Test streaming tokens from Anthropic."""
llm = ChatAnthropicMessages(model_name="claude-instant-1.2")
async for token in llm.astream("I'm Pickle Rick"):

View File

@ -13,7 +13,7 @@ _B64_string = """iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAIAAAAC64paAAABhGlDQ1BJQ0MgUHJ
def test_chat_google_genai_stream() -> None:
"""Test streaming tokens from OpenAI."""
"""Test streaming tokens from Gemini."""
llm = ChatGoogleGenerativeAI(model=_MODEL)
for token in llm.stream("This is a test. Say 'foo'"):
@ -21,7 +21,7 @@ def test_chat_google_genai_stream() -> None:
async def test_chat_google_genai_astream() -> None:
"""Test streaming tokens from OpenAI."""
"""Test streaming tokens from Gemini."""
llm = ChatGoogleGenerativeAI(model=_MODEL)
async for token in llm.astream("This is a test. Say 'foo'"):

View File

@ -8,7 +8,7 @@ _MODEL_NAME = "ensemble"
@pytest.mark.skip(reason="Need a working Triton server")
def test_stream() -> None:
"""Test streaming tokens from OpenAI."""
"""Test streaming tokens from NVIDIA TRT."""
llm = TritonTensorRTLLM(model_name=_MODEL_NAME)
for token in llm.stream("I'm Pickle Rick"):
@ -17,7 +17,7 @@ def test_stream() -> None:
@pytest.mark.skip(reason="Need a working Triton server")
async def test_astream() -> None:
"""Test streaming tokens from OpenAI."""
"""Test streaming tokens from NVIDIA TRT."""
llm = TritonTensorRTLLM(model_name=_MODEL_NAME)
async for token in llm.astream("I'm Pickle Rick"):