diff --git a/libs/core/langchain_core/caches.py b/libs/core/langchain_core/caches.py index efeedea5252..1ac250875ee 100644 --- a/libs/core/langchain_core/caches.py +++ b/libs/core/langchain_core/caches.py @@ -166,14 +166,14 @@ class InMemoryCache(BaseCache): # Update cache cache.update( prompt="What is the capital of France?", - llm_string="model='gpt-3.5-turbo', temperature=0.1", + llm_string="model='gpt-5.4-mini', return_val=[Generation(text="Paris")], ) # Lookup cache result = cache.lookup( prompt="What is the capital of France?", - llm_string="model='gpt-3.5-turbo', temperature=0.1", + llm_string="model='gpt-5.4-mini', ) # result is [Generation(text="Paris")] ``` diff --git a/libs/core/langchain_core/runnables/configurable.py b/libs/core/langchain_core/runnables/configurable.py index 5552fa620ea..a03108850fa 100644 --- a/libs/core/langchain_core/runnables/configurable.py +++ b/libs/core/langchain_core/runnables/configurable.py @@ -499,7 +499,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]): # When invoking the created RunnableSequence, you can pass in the # value for your ConfigurableField's id which in this case will either be # `joke` or `poem`. - chain = prompt | ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + chain = prompt | ChatOpenAI(model="gpt-5.4-mini") # The `with_config` method brings in the desired Prompt Runnable in your # Runnable Sequence. @@ -525,7 +525,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]): "poem": PromptTemplate.from_template("Write a short poem about {topic}") }, ) - chain = prompt | ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + chain = prompt | ChatOpenAI(model="gpt-5.4-mini") chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"}) ``` """ diff --git a/libs/core/langchain_core/runnables/fallbacks.py b/libs/core/langchain_core/runnables/fallbacks.py index 0fcacb4420d..72fb6b4f693 100644 --- a/libs/core/langchain_core/runnables/fallbacks.py +++ b/libs/core/langchain_core/runnables/fallbacks.py @@ -54,8 +54,8 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): from langchain_core.chat_models.openai import ChatOpenAI from langchain_core.chat_models.anthropic import ChatAnthropic - model = ChatAnthropic(model="claude-3-haiku-20240307").with_fallbacks( - [ChatOpenAI(model="gpt-3.5-turbo-0125")] + model = ChatAnthropic(model="claude-sonnet-4-6").with_fallbacks( + [ChatOpenAI(model="gpt-5.4-mini")] ) # Will usually use ChatAnthropic, but fallback to ChatOpenAI # if ChatAnthropic fails.