mirror of
https://github.com/hwchase17/langchain.git
synced 2026-04-06 20:34:01 +00:00
chore(core): drop gpt-3.5-turbo from docstrings (#36497)
This commit is contained in:
@@ -166,14 +166,14 @@ class InMemoryCache(BaseCache):
|
||||
# Update cache
|
||||
cache.update(
|
||||
prompt="What is the capital of France?",
|
||||
llm_string="model='gpt-3.5-turbo', temperature=0.1",
|
||||
llm_string="model='gpt-5.4-mini',
|
||||
return_val=[Generation(text="Paris")],
|
||||
)
|
||||
|
||||
# Lookup cache
|
||||
result = cache.lookup(
|
||||
prompt="What is the capital of France?",
|
||||
llm_string="model='gpt-3.5-turbo', temperature=0.1",
|
||||
llm_string="model='gpt-5.4-mini',
|
||||
)
|
||||
# result is [Generation(text="Paris")]
|
||||
```
|
||||
|
||||
@@ -499,7 +499,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
|
||||
# When invoking the created RunnableSequence, you can pass in the
|
||||
# value for your ConfigurableField's id which in this case will either be
|
||||
# `joke` or `poem`.
|
||||
chain = prompt | ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
||||
chain = prompt | ChatOpenAI(model="gpt-5.4-mini")
|
||||
|
||||
# The `with_config` method brings in the desired Prompt Runnable in your
|
||||
# Runnable Sequence.
|
||||
@@ -525,7 +525,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
|
||||
"poem": PromptTemplate.from_template("Write a short poem about {topic}")
|
||||
},
|
||||
)
|
||||
chain = prompt | ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
||||
chain = prompt | ChatOpenAI(model="gpt-5.4-mini")
|
||||
chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"})
|
||||
```
|
||||
"""
|
||||
|
||||
@@ -54,8 +54,8 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
from langchain_core.chat_models.openai import ChatOpenAI
|
||||
from langchain_core.chat_models.anthropic import ChatAnthropic
|
||||
|
||||
model = ChatAnthropic(model="claude-3-haiku-20240307").with_fallbacks(
|
||||
[ChatOpenAI(model="gpt-3.5-turbo-0125")]
|
||||
model = ChatAnthropic(model="claude-sonnet-4-6").with_fallbacks(
|
||||
[ChatOpenAI(model="gpt-5.4-mini")]
|
||||
)
|
||||
# Will usually use ChatAnthropic, but fallback to ChatOpenAI
|
||||
# if ChatAnthropic fails.
|
||||
|
||||
Reference in New Issue
Block a user