mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-24 20:09:01 +00:00
fireworks[minor]: remove default model and temperature (#30965)
`mixtral-8x-7b-instruct` was recently retired from Fireworks Serverless. Here we remove the default model altogether, so that the model must be explicitly specified on init: ```python ChatFireworks(model="accounts/fireworks/models/llama-v3p1-70b-instruct") # for example ``` We also set a null default for `temperature`, which previously defaulted to 0.0. This parameter will no longer be included in request payloads unless it is explicitly provided.
This commit is contained in:
@@ -13,54 +13,13 @@ from typing_extensions import TypedDict
|
||||
|
||||
from langchain_fireworks import ChatFireworks
|
||||
|
||||
|
||||
def test_chat_fireworks_call() -> None:
|
||||
"""Test valid call to fireworks."""
|
||||
llm = ChatFireworks( # type: ignore[call-arg]
|
||||
model="accounts/fireworks/models/llama-v3p1-70b-instruct", temperature=0
|
||||
)
|
||||
|
||||
resp = llm.invoke("Hello!")
|
||||
assert isinstance(resp, AIMessage)
|
||||
|
||||
assert len(resp.content) > 0
|
||||
|
||||
|
||||
def test_tool_choice() -> None:
|
||||
"""Test that tool choice is respected."""
|
||||
llm = ChatFireworks( # type: ignore[call-arg]
|
||||
model="accounts/fireworks/models/llama-v3p1-70b-instruct", temperature=0
|
||||
)
|
||||
|
||||
class MyTool(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
|
||||
with_tool = llm.bind_tools([MyTool], tool_choice="MyTool")
|
||||
|
||||
resp = with_tool.invoke("Who was the 27 year old named Erick?")
|
||||
assert isinstance(resp, AIMessage)
|
||||
assert resp.content == "" # should just be tool call
|
||||
tool_calls = resp.additional_kwargs["tool_calls"]
|
||||
assert len(tool_calls) == 1
|
||||
tool_call = tool_calls[0]
|
||||
assert tool_call["function"]["name"] == "MyTool"
|
||||
assert json.loads(tool_call["function"]["arguments"]) == {
|
||||
"age": 27,
|
||||
"name": "Erick",
|
||||
}
|
||||
assert tool_call["type"] == "function"
|
||||
assert isinstance(resp.tool_calls, list)
|
||||
assert len(resp.tool_calls) == 1
|
||||
tool_call = resp.tool_calls[0]
|
||||
assert tool_call["name"] == "MyTool"
|
||||
assert tool_call["args"] == {"age": 27, "name": "Erick"}
|
||||
_MODEL = "accounts/fireworks/models/llama-v3p1-8b-instruct"
|
||||
|
||||
|
||||
def test_tool_choice_bool() -> None:
|
||||
"""Test that tool choice is respected just passing in True."""
|
||||
|
||||
llm = ChatFireworks( # type: ignore[call-arg]
|
||||
llm = ChatFireworks(
|
||||
model="accounts/fireworks/models/llama-v3p1-70b-instruct", temperature=0
|
||||
)
|
||||
|
||||
@@ -84,17 +43,9 @@ def test_tool_choice_bool() -> None:
|
||||
assert tool_call["type"] == "function"
|
||||
|
||||
|
||||
def test_stream() -> None:
|
||||
"""Test streaming tokens from ChatFireworks."""
|
||||
llm = ChatFireworks() # type: ignore[call-arg]
|
||||
|
||||
for token in llm.stream("I'm Pickle Rick"):
|
||||
assert isinstance(token.content, str)
|
||||
|
||||
|
||||
async def test_astream() -> None:
|
||||
"""Test streaming tokens from ChatFireworks."""
|
||||
llm = ChatFireworks() # type: ignore[call-arg]
|
||||
llm = ChatFireworks(model=_MODEL)
|
||||
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
chunks_with_token_counts = 0
|
||||
@@ -125,18 +76,9 @@ async def test_astream() -> None:
|
||||
assert full.response_metadata["model_name"]
|
||||
|
||||
|
||||
async def test_abatch() -> None:
|
||||
"""Test abatch tokens from ChatFireworks."""
|
||||
llm = ChatFireworks() # type: ignore[call-arg]
|
||||
|
||||
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
||||
for token in result:
|
||||
assert isinstance(token.content, str)
|
||||
|
||||
|
||||
async def test_abatch_tags() -> None:
|
||||
"""Test batch tokens from ChatFireworks."""
|
||||
llm = ChatFireworks() # type: ignore[call-arg]
|
||||
llm = ChatFireworks(model=_MODEL)
|
||||
|
||||
result = await llm.abatch(
|
||||
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
|
||||
@@ -145,18 +87,9 @@ async def test_abatch_tags() -> None:
|
||||
assert isinstance(token.content, str)
|
||||
|
||||
|
||||
def test_batch() -> None:
|
||||
"""Test batch tokens from ChatFireworks."""
|
||||
llm = ChatFireworks() # type: ignore[call-arg]
|
||||
|
||||
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
||||
for token in result:
|
||||
assert isinstance(token.content, str)
|
||||
|
||||
|
||||
async def test_ainvoke() -> None:
|
||||
"""Test invoke tokens from ChatFireworks."""
|
||||
llm = ChatFireworks() # type: ignore[call-arg]
|
||||
llm = ChatFireworks(model=_MODEL)
|
||||
|
||||
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
||||
assert isinstance(result.content, str)
|
||||
@@ -164,7 +97,7 @@ async def test_ainvoke() -> None:
|
||||
|
||||
def test_invoke() -> None:
|
||||
"""Test invoke tokens from ChatFireworks."""
|
||||
llm = ChatFireworks() # type: ignore[call-arg]
|
||||
llm = ChatFireworks(model=_MODEL)
|
||||
|
||||
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
|
||||
assert isinstance(result.content, str)
|
||||
|
Reference in New Issue
Block a user