mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-01 11:02:37 +00:00
community: update documentation and model IDs for FriendliAI provider (#28984)
### Description - In the example, remove `llama-2-13b-chat`, `mixtral-8x7b-instruct-v0-1`. - Fix llm friendli streaming implementation. - Update examples in documentation and remove duplicates. ### Issue N/A ### Dependencies None ### Twitter handle `@friendliai`
This commit is contained in:
@@ -75,12 +75,12 @@ class ChatFriendli(BaseChatModel, BaseFriendli):
|
||||
from langchain_community.chat_models import FriendliChat
|
||||
|
||||
chat = Friendli(
|
||||
model="llama-2-13b-chat", friendli_token="YOUR FRIENDLI TOKEN"
|
||||
model="meta-llama-3.1-8b-instruct", friendli_token="YOUR FRIENDLI TOKEN"
|
||||
)
|
||||
chat.invoke("What is generative AI?")
|
||||
"""
|
||||
|
||||
model: str = "llama-2-13b-chat"
|
||||
model: str = "meta-llama-3.1-8b-instruct"
|
||||
|
||||
@property
|
||||
def lc_secrets(self) -> Dict[str, str]:
|
||||
|
@@ -16,14 +16,19 @@ from langchain_core.utils.utils import convert_to_secret_str
|
||||
from pydantic import Field, SecretStr
|
||||
|
||||
|
||||
def _stream_response_to_generation_chunk(stream_response: Any) -> GenerationChunk:
|
||||
def _stream_response_to_generation_chunk(
|
||||
stream_response: Any,
|
||||
) -> GenerationChunk:
|
||||
"""Convert a stream response to a generation chunk."""
|
||||
if stream_response.event == "token_sampled":
|
||||
return GenerationChunk(
|
||||
text=stream_response.text,
|
||||
generation_info={"token": str(stream_response.token)},
|
||||
)
|
||||
return GenerationChunk(text="")
|
||||
if not stream_response.get("choices", None):
|
||||
return GenerationChunk(text="")
|
||||
return GenerationChunk(
|
||||
text=stream_response.choices[0].text,
|
||||
# generation_info=dict(
|
||||
# finish_reason=stream_response.choices[0].get("finish_reason", None),
|
||||
# logprobs=stream_response.choices[0].get("logprobs", None),
|
||||
# ),
|
||||
)
|
||||
|
||||
|
||||
class BaseFriendli(Serializable):
|
||||
@@ -34,7 +39,7 @@ class BaseFriendli(Serializable):
|
||||
# Friendli Async client.
|
||||
async_client: Any = Field(default=None, exclude=True)
|
||||
# Model name to use.
|
||||
model: str = "mixtral-8x7b-instruct-v0-1"
|
||||
model: str = "meta-llama-3.1-8b-instruct"
|
||||
# Friendli personal access token to run as.
|
||||
friendli_token: Optional[SecretStr] = None
|
||||
# Friendli team ID to run as.
|
||||
@@ -107,7 +112,7 @@ class Friendli(LLM, BaseFriendli):
|
||||
from langchain_community.llms import Friendli
|
||||
|
||||
friendli = Friendli(
|
||||
model="mixtral-8x7b-instruct-v0-1", friendli_token="YOUR FRIENDLI TOKEN"
|
||||
model="meta-llama-3.1-8b-instruct", friendli_token="YOUR FRIENDLI TOKEN"
|
||||
)
|
||||
"""
|
||||
|
||||
|
@@ -114,14 +114,14 @@ async def test_friendli_ainvoke(
|
||||
@pytest.mark.requires("friendli")
|
||||
def test_friendli_stream(mock_friendli_client: Mock, friendli_llm: Friendli) -> None:
|
||||
"""Test stream with friendli."""
|
||||
mock_choice_0 = Mock()
|
||||
mock_choice_0.text = "Hello "
|
||||
mock_choice_1 = Mock()
|
||||
mock_choice_1.text = "langchain"
|
||||
mock_chunk_0 = Mock()
|
||||
mock_chunk_0.event = "token_sampled"
|
||||
mock_chunk_0.text = "Hello "
|
||||
mock_chunk_0.token = 0
|
||||
mock_chunk_0.choices = [mock_choice_0]
|
||||
mock_chunk_1 = Mock()
|
||||
mock_chunk_1.event = "token_sampled"
|
||||
mock_chunk_1.text = "Friendli"
|
||||
mock_chunk_1.token = 1
|
||||
mock_chunk_1.choices = [mock_choice_1]
|
||||
mock_stream = MagicMock()
|
||||
mock_chunks = [mock_chunk_0, mock_chunk_1]
|
||||
mock_stream.__iter__.return_value = mock_chunks
|
||||
@@ -129,7 +129,7 @@ def test_friendli_stream(mock_friendli_client: Mock, friendli_llm: Friendli) ->
|
||||
mock_friendli_client.completions.create.return_value = mock_stream
|
||||
stream = friendli_llm.stream("Hello langchain")
|
||||
for i, chunk in enumerate(stream):
|
||||
assert chunk == mock_chunks[i].text
|
||||
assert chunk == mock_chunks[i].choices[0].text
|
||||
|
||||
mock_friendli_client.completions.create.assert_called_once_with(
|
||||
model=friendli_llm.model,
|
||||
@@ -149,22 +149,22 @@ async def test_friendli_astream(
|
||||
mock_friendli_async_client: AsyncMock, friendli_llm: Friendli
|
||||
) -> None:
|
||||
"""Test async stream with friendli."""
|
||||
mock_choice_0 = Mock()
|
||||
mock_choice_0.text = "Hello "
|
||||
mock_choice_1 = Mock()
|
||||
mock_choice_1.text = "langchain"
|
||||
mock_chunk_0 = Mock()
|
||||
mock_chunk_0.event = "token_sampled"
|
||||
mock_chunk_0.text = "Hello "
|
||||
mock_chunk_0.token = 0
|
||||
mock_chunk_0.choices = [mock_choice_0]
|
||||
mock_chunk_1 = Mock()
|
||||
mock_chunk_1.event = "token_sampled"
|
||||
mock_chunk_1.text = "Friendli"
|
||||
mock_chunk_1.token = 1
|
||||
mock_chunk_1.choices = [mock_choice_1]
|
||||
mock_stream = AsyncMock()
|
||||
mock_chunks = [mock_chunk_0, mock_chunk_1]
|
||||
mock_stream.__aiter__.return_value = mock_chunks
|
||||
mock_stream.__aiter__.return_value = iter(mock_chunks)
|
||||
|
||||
mock_friendli_async_client.completions.create.return_value = mock_stream
|
||||
stream = friendli_llm.astream("Hello langchain")
|
||||
async for i, chunk in aenumerate(stream):
|
||||
assert chunk == mock_chunks[i].text
|
||||
assert chunk == mock_chunks[i].choices[0].text
|
||||
|
||||
mock_friendli_async_client.completions.create.assert_awaited_once_with(
|
||||
model=friendli_llm.model,
|
||||
|
Reference in New Issue
Block a user