mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-17 15:35:14 +00:00
feat: Implement stream
interface (#15875)
<!-- Thank you for contributing to LangChain! Please title your PR "<package>: <description>", where <package> is whichever of langchain, community, core, experimental, etc. is being modified. Replace this entire comment with: - **Description:** a description of the change, - **Issue:** the issue # it fixes if applicable, - **Dependencies:** any dependencies required for this change, - **Twitter handle:** we announce bigger features on Twitter. If your PR gets announced, and you'd like a mention, we'll gladly shout you out! Please make sure your PR is passing linting and testing before submitting. Run `make format`, `make lint` and `make test` from the root of the package you've modified to check this locally. See contribution guidelines for more information on how to write/run tests, lint, etc: https://python.langchain.com/docs/contributing/ If you're adding a new integration, please include: 1. a test for the integration, preferably unit tests that do not rely on network access, 2. an example notebook showing its use. It lives in `docs/docs/integrations` directory. If no one reviews your PR within a few days, please @-mention one of @baskaryan, @eyurtsev, @hwchase17. --> Major changes: - Rename `wasm_chat.py` to `llama_edge.py` - Rename the `WasmChatService` class to `ChatService` - Implement the `stream` interface for `ChatService` - Add `test_chat_wasm_service_streaming` in the integration test - Update `llama_edge.ipynb` --------- Signed-off-by: Xin Liu <sam@secondstate.io>
This commit is contained in:
@@ -0,0 +1,52 @@
|
||||
import pytest
|
||||
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
|
||||
|
||||
from langchain_community.chat_models.llama_edge import LlamaEdgeChatService
|
||||
|
||||
|
||||
@pytest.mark.enable_socket
|
||||
def test_chat_wasm_service() -> None:
|
||||
"""This test requires the port 8080 is not occupied."""
|
||||
|
||||
# service url
|
||||
service_url = "https://b008-54-186-154-209.ngrok-free.app"
|
||||
|
||||
# create wasm-chat service instance
|
||||
chat = LlamaEdgeChatService(service_url=service_url)
|
||||
|
||||
# create message sequence
|
||||
system_message = SystemMessage(content="You are an AI assistant")
|
||||
user_message = HumanMessage(content="What is the capital of France?")
|
||||
messages = [system_message, user_message]
|
||||
|
||||
# chat with wasm-chat service
|
||||
response = chat(messages)
|
||||
|
||||
# check response
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, str)
|
||||
assert "Paris" in response.content
|
||||
|
||||
|
||||
@pytest.mark.enable_socket
|
||||
def test_chat_wasm_service_streaming() -> None:
|
||||
"""This test requires the port 8080 is not occupied."""
|
||||
|
||||
# service url
|
||||
service_url = "https://b008-54-186-154-209.ngrok-free.app"
|
||||
|
||||
# create wasm-chat service instance
|
||||
chat = LlamaEdgeChatService(service_url=service_url, streaming=True)
|
||||
|
||||
# create message sequence
|
||||
user_message = HumanMessage(content="What is the capital of France?")
|
||||
messages = [
|
||||
user_message,
|
||||
]
|
||||
|
||||
output = ""
|
||||
for chunk in chat.stream(messages):
|
||||
print(chunk.content, end="", flush=True)
|
||||
output += chunk.content
|
||||
|
||||
assert "Paris" in output
|
@@ -1,28 +0,0 @@
|
||||
import pytest
|
||||
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
|
||||
|
||||
from langchain_community.chat_models.wasm_chat import WasmChatService
|
||||
|
||||
|
||||
@pytest.mark.enable_socket
|
||||
def test_chat_wasm_service() -> None:
|
||||
"""This test requires the port 8080 is not occupied."""
|
||||
|
||||
# service url
|
||||
service_url = "https://b008-54-186-154-209.ngrok-free.app"
|
||||
|
||||
# create wasm-chat service instance
|
||||
chat = WasmChatService(service_url=service_url)
|
||||
|
||||
# create message sequence
|
||||
system_message = SystemMessage(content="You are an AI assistant")
|
||||
user_message = HumanMessage(content="What is the capital of France?")
|
||||
messages = [system_message, user_message]
|
||||
|
||||
# chat with wasm-chat service
|
||||
response = chat(messages)
|
||||
|
||||
# check response
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, str)
|
||||
assert "Paris" in response.content
|
@@ -33,7 +33,7 @@ EXPECTED_ALL = [
|
||||
"ChatHunyuan",
|
||||
"GigaChat",
|
||||
"VolcEngineMaasChat",
|
||||
"WasmChatService",
|
||||
"LlamaEdgeChatService",
|
||||
"GPTRouter",
|
||||
"ChatZhipuAI",
|
||||
]
|
||||
|
@@ -7,8 +7,8 @@ from langchain_core.messages import (
|
||||
SystemMessage,
|
||||
)
|
||||
|
||||
from langchain_community.chat_models.wasm_chat import (
|
||||
WasmChatService,
|
||||
from langchain_community.chat_models.llama_edge import (
|
||||
LlamaEdgeChatService,
|
||||
_convert_dict_to_message,
|
||||
_convert_message_to_dict,
|
||||
)
|
||||
@@ -64,7 +64,7 @@ def test__convert_dict_to_message_other_role() -> None:
|
||||
|
||||
|
||||
def test_wasm_chat_without_service_url() -> None:
|
||||
chat = WasmChatService()
|
||||
chat = LlamaEdgeChatService()
|
||||
|
||||
# create message sequence
|
||||
system_message = SystemMessage(content="You are an AI assistant")
|
Reference in New Issue
Block a user