diff --git a/libs/cli/langchain_cli/integration_template/README.md b/libs/cli/langchain_cli/integration_template/README.md index f8d70df8005..15741c62f85 100644 --- a/libs/cli/langchain_cli/integration_template/README.md +++ b/libs/cli/langchain_cli/integration_template/README.md @@ -19,8 +19,8 @@ And you should configure credentials by setting the following environment variab ```python from __module_name__ import Chat__ModuleName__ -llm = Chat__ModuleName__() -llm.invoke("Sing a ballad of LangChain.") +model = Chat__ModuleName__() +model.invoke("Sing a ballad of LangChain.") ``` ## Embeddings @@ -41,6 +41,6 @@ embeddings.embed_query("What is the meaning of life?") ```python from __module_name__ import __ModuleName__LLM -llm = __ModuleName__LLM() -llm.invoke("The meaning of life is") +model = __ModuleName__LLM() +model.invoke("The meaning of life is") ``` diff --git a/libs/cli/langchain_cli/integration_template/docs/chat.ipynb b/libs/cli/langchain_cli/integration_template/docs/chat.ipynb index 86221b5fc63..ff6e4d8cab2 100644 --- a/libs/cli/langchain_cli/integration_template/docs/chat.ipynb +++ b/libs/cli/langchain_cli/integration_template/docs/chat.ipynb @@ -72,7 +72,9 @@ "cell_type": "markdown", "id": "72ee0c4b-9764-423a-9dbf-95129e185210", "metadata": {}, - "source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:" + "source": [ + "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:" + ] }, { "cell_type": "code", @@ -126,7 +128,7 @@ "source": [ "from __module_name__ import Chat__ModuleName__\n", "\n", - "llm = Chat__ModuleName__(\n", + "model = Chat__ModuleName__(\n", " model=\"model-name\",\n", " temperature=0,\n", " max_tokens=None,\n", @@ -162,7 +164,7 @@ " ),\n", " (\"human\", \"I love programming.\"),\n", "]\n", - "ai_msg = llm.invoke(messages)\n", + "ai_msg = model.invoke(messages)\n", "ai_msg" ] }, @@ -207,7 +209,7 @@ " ]\n", ")\n", "\n", - "chain = prompt | llm\n", + "chain = prompt | model\n", "chain.invoke(\n", " {\n", " \"input_language\": \"English\",\n", diff --git a/libs/cli/langchain_cli/integration_template/docs/llms.ipynb b/libs/cli/langchain_cli/integration_template/docs/llms.ipynb index 217929fff71..eb58d4fafbf 100644 --- a/libs/cli/langchain_cli/integration_template/docs/llms.ipynb +++ b/libs/cli/langchain_cli/integration_template/docs/llms.ipynb @@ -65,7 +65,9 @@ "cell_type": "markdown", "id": "4b6e1ca6", "metadata": {}, - "source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:" + "source": [ + "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:" + ] }, { "cell_type": "code", @@ -119,7 +121,7 @@ "source": [ "from __module_name__ import __ModuleName__LLM\n", "\n", - "llm = __ModuleName__LLM(\n", + "model = __ModuleName__LLM(\n", " model=\"model-name\",\n", " temperature=0,\n", " max_tokens=None,\n", @@ -141,7 +143,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "035dea0f", "metadata": { "tags": [] @@ -150,7 +152,7 @@ "source": [ "input_text = \"__ModuleName__ is an AI company that \"\n", "\n", - "completion = llm.invoke(input_text)\n", + "completion = model.invoke(input_text)\n", "completion" ] }, @@ -177,7 +179,7 @@ "\n", "prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n", "\n", - "chain = prompt | llm\n", + "chain = prompt | model\n", "chain.invoke(\n", " {\n", " \"output_language\": \"German\",\n", diff --git a/libs/cli/langchain_cli/integration_template/docs/retrievers.ipynb b/libs/cli/langchain_cli/integration_template/docs/retrievers.ipynb index 300d3d87958..254633bdf23 100644 --- a/libs/cli/langchain_cli/integration_template/docs/retrievers.ipynb +++ b/libs/cli/langchain_cli/integration_template/docs/retrievers.ipynb @@ -155,7 +155,7 @@ "\n", "from langchain_openai import ChatOpenAI\n", "\n", - "llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)" + "model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)" ] }, { @@ -185,7 +185,7 @@ "chain = (\n", " {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n", " | prompt\n", - " | llm\n", + " | model\n", " | StrOutputParser()\n", ")" ] diff --git a/libs/cli/langchain_cli/integration_template/docs/tools.ipynb b/libs/cli/langchain_cli/integration_template/docs/tools.ipynb index a160b95658a..97853ad8669 100644 --- a/libs/cli/langchain_cli/integration_template/docs/tools.ipynb +++ b/libs/cli/langchain_cli/integration_template/docs/tools.ipynb @@ -192,7 +192,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "id": "af3123ad-7a02-40e5-b58e-7d56e23e5830", "metadata": {}, "outputs": [], @@ -203,7 +203,7 @@ "# !pip install -qU langchain langchain-openai\n", "from langchain.chat_models import init_chat_model\n", "\n", - "llm = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")" + "model = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")" ] }, { @@ -216,7 +216,7 @@ "from langgraph.prebuilt import create_react_agent\n", "\n", "tools = [tool]\n", - "agent = create_react_agent(llm, tools)" + "agent = create_react_agent(model, tools)" ] }, { diff --git a/libs/cli/langchain_cli/integration_template/integration_template/chat_models.py b/libs/cli/langchain_cli/integration_template/integration_template/chat_models.py index 0e077cd4856..fedf46fb95d 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/chat_models.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/chat_models.py @@ -60,7 +60,7 @@ class Chat__ModuleName__(BaseChatModel): ```python from __module_name__ import Chat__ModuleName__ - llm = Chat__ModuleName__( + model = Chat__ModuleName__( model="...", temperature=0, max_tokens=None, @@ -77,7 +77,7 @@ class Chat__ModuleName__(BaseChatModel): ("system", "You are a helpful translator. Translate the user sentence to French."), ("human", "I love programming."), ] - llm.invoke(messages) + model.invoke(messages) ``` ```python @@ -87,7 +87,7 @@ class Chat__ModuleName__(BaseChatModel): # TODO: Delete if token-level streaming isn't supported. Stream: ```python - for chunk in llm.stream(messages): + for chunk in model.stream(messages): print(chunk.text, end="") ``` @@ -96,7 +96,7 @@ class Chat__ModuleName__(BaseChatModel): ``` ```python - stream = llm.stream(messages) + stream = model.stream(messages) full = next(stream) for chunk in stream: full += chunk @@ -110,13 +110,13 @@ class Chat__ModuleName__(BaseChatModel): # TODO: Delete if native async isn't supported. Async: ```python - await llm.ainvoke(messages) + await model.ainvoke(messages) # stream: - # async for chunk in (await llm.astream(messages)) + # async for chunk in (await model.astream(messages)) # batch: - # await llm.abatch([messages]) + # await model.abatch([messages]) ``` ```python @@ -137,8 +137,8 @@ class Chat__ModuleName__(BaseChatModel): location: str = Field(..., description="The city and state, e.g. San Francisco, CA") - llm_with_tools = llm.bind_tools([GetWeather, GetPopulation]) - ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?") + model_with_tools = model.bind_tools([GetWeather, GetPopulation]) + ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?") ai_msg.tool_calls ``` @@ -162,8 +162,8 @@ class Chat__ModuleName__(BaseChatModel): punchline: str = Field(description="The punchline to the joke") rating: int | None = Field(description="How funny the joke is, from 1 to 10") - structured_llm = llm.with_structured_output(Joke) - structured_llm.invoke("Tell me a joke about cats") + structured_model = model.with_structured_output(Joke) + structured_model.invoke("Tell me a joke about cats") ``` ```python @@ -176,8 +176,8 @@ class Chat__ModuleName__(BaseChatModel): JSON mode: ```python # TODO: Replace with appropriate bind arg. - json_llm = llm.bind(response_format={"type": "json_object"}) - ai_msg = json_llm.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]") + json_model = model.bind(response_format={"type": "json_object"}) + ai_msg = json_model.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]") ai_msg.content ``` @@ -204,7 +204,7 @@ class Chat__ModuleName__(BaseChatModel): }, ], ) - ai_msg = llm.invoke([message]) + ai_msg = model.invoke([message]) ai_msg.content ``` @@ -235,7 +235,7 @@ class Chat__ModuleName__(BaseChatModel): # TODO: Delete if token usage metadata isn't supported. Token usage: ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.usage_metadata ``` @@ -247,8 +247,8 @@ class Chat__ModuleName__(BaseChatModel): Logprobs: ```python # TODO: Replace with appropriate bind arg. - logprobs_llm = llm.bind(logprobs=True) - ai_msg = logprobs_llm.invoke(messages) + logprobs_model = model.bind(logprobs=True) + ai_msg = logprobs_model.invoke(messages) ai_msg.response_metadata["logprobs"] ``` @@ -257,7 +257,7 @@ class Chat__ModuleName__(BaseChatModel): ``` Response metadata ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.response_metadata ``` diff --git a/libs/cli/langchain_cli/integration_template/integration_template/retrievers.py b/libs/cli/langchain_cli/integration_template/integration_template/retrievers.py index 48c5f735788..d4c6a966bfc 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/retrievers.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/retrievers.py @@ -65,7 +65,7 @@ class __ModuleName__Retriever(BaseRetriever): Question: {question}\"\"\" ) - llm = ChatOpenAI(model="gpt-3.5-turbo-0125") + model = ChatOpenAI(model="gpt-3.5-turbo-0125") def format_docs(docs): return "\\n\\n".join(doc.page_content for doc in docs) @@ -73,7 +73,7 @@ class __ModuleName__Retriever(BaseRetriever): chain = ( {"context": retriever | format_docs, "question": RunnablePassthrough()} | prompt - | llm + | model | StrOutputParser() ) diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index d13a081520c..67ec50e8207 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -1555,10 +1555,10 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC): justification: str - llm = ChatModel(model="model-name", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) + model = ChatModel(model="model-name", temperature=0) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) @@ -1580,12 +1580,12 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC): justification: str - llm = ChatModel(model="model-name", temperature=0) - structured_llm = llm.with_structured_output( + model = ChatModel(model="model-name", temperature=0) + structured_model = model.with_structured_output( AnswerWithJustification, include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { @@ -1609,10 +1609,10 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC): dict_schema = convert_to_openai_tool(AnswerWithJustification) - llm = ChatModel(model="model-name", temperature=0) - structured_llm = llm.with_structured_output(dict_schema) + model = ChatModel(model="model-name", temperature=0) + structured_model = model.with_structured_output(dict_schema) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index 842ed35f824..91b6ef1c5f4 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -776,11 +776,11 @@ class Runnable(ABC, Generic[Input, Output]): SystemMessagePromptTemplate.from_template("You are a nice assistant.") + "{question}" ) - llm = FakeStreamingListLLM(responses=["foo-lish"]) + model = FakeStreamingListLLM(responses=["foo-lish"]) - chain: Runnable = prompt | llm | {"str": StrOutputParser()} + chain: Runnable = prompt | model | {"str": StrOutputParser()} - chain_with_assign = chain.assign(hello=itemgetter("str") | llm) + chain_with_assign = chain.assign(hello=itemgetter("str") | model) print(chain_with_assign.input_schema.model_json_schema()) # {'title': 'PromptInput', 'type': 'object', 'properties': @@ -1598,16 +1598,16 @@ class Runnable(ABC, Generic[Input, Output]): from langchain_ollama import ChatOllama from langchain_core.output_parsers import StrOutputParser - llm = ChatOllama(model="llama3.1") + model = ChatOllama(model="llama3.1") # Without bind - chain = llm | StrOutputParser() + chain = model | StrOutputParser() chain.invoke("Repeat quoted words exactly: 'One two three four five.'") # Output is 'One two three four five.' # With bind - chain = llm.bind(stop=["three"]) | StrOutputParser() + chain = model.bind(stop=["three"]) | StrOutputParser() chain.invoke("Repeat quoted words exactly: 'One two three four five.'") # Output is 'One two' @@ -6056,10 +6056,10 @@ def chain( @chain def my_func(fields): prompt = PromptTemplate("Hello, {name}!") - llm = OpenAI() + model = OpenAI() formatted = prompt.invoke(**fields) - for chunk in llm.stream(formatted): + for chunk in model.stream(formatted): yield chunk ``` """ diff --git a/libs/core/langchain_core/runnables/fallbacks.py b/libs/core/langchain_core/runnables/fallbacks.py index 61f1753d8dd..a3794d8ebcb 100644 --- a/libs/core/langchain_core/runnables/fallbacks.py +++ b/libs/core/langchain_core/runnables/fallbacks.py @@ -594,7 +594,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): Returns: If the attribute is anything other than a method that outputs a Runnable, returns getattr(self.runnable, name). If the attribute is a method that - does return a new Runnable (e.g. llm.bind_tools([...]) outputs a new + does return a new Runnable (e.g. model.bind_tools([...]) outputs a new RunnableBinding) then self.runnable and each of the runnables in self.fallbacks is replaced with getattr(x, name). @@ -605,15 +605,15 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): gpt_4o = ChatOpenAI(model="gpt-4o") claude_3_sonnet = ChatAnthropic(model="claude-3-7-sonnet-20250219") - llm = gpt_4o.with_fallbacks([claude_3_sonnet]) + model = gpt_4o.with_fallbacks([claude_3_sonnet]) - llm.model_name + model.model_name # -> "gpt-4o" # .bind_tools() is called on both ChatOpenAI and ChatAnthropic # Equivalent to: # gpt_4o.bind_tools([...]).with_fallbacks([claude_3_sonnet.bind_tools([...])]) - llm.bind_tools([...]) + model.bind_tools([...]) # -> RunnableWithFallbacks( runnable=RunnableBinding(bound=ChatOpenAI(...), kwargs={"tools": [...]}), fallbacks=[RunnableBinding(bound=ChatAnthropic(...), kwargs={"tools": [...]})], diff --git a/libs/core/tests/unit_tests/tracers/test_run_collector.py b/libs/core/tests/unit_tests/tracers/test_run_collector.py index 95c0052cf21..17f6a973fce 100644 --- a/libs/core/tests/unit_tests/tracers/test_run_collector.py +++ b/libs/core/tests/unit_tests/tracers/test_run_collector.py @@ -7,9 +7,9 @@ from langchain_core.tracers.context import collect_runs def test_collect_runs() -> None: - llm = FakeListLLM(responses=["hello"]) + model = FakeListLLM(responses=["hello"]) with collect_runs() as cb: - llm.invoke("hi") + model.invoke("hi") assert cb.traced_runs assert len(cb.traced_runs) == 1 assert isinstance(cb.traced_runs[0].id, uuid.UUID) diff --git a/libs/langchain/langchain_classic/agents/agent_toolkits/vectorstore/base.py b/libs/langchain/langchain_classic/agents/agent_toolkits/vectorstore/base.py index a8d02d4f03e..d59458e761f 100644 --- a/libs/langchain/langchain_classic/agents/agent_toolkits/vectorstore/base.py +++ b/libs/langchain/langchain_classic/agents/agent_toolkits/vectorstore/base.py @@ -58,7 +58,7 @@ def create_vectorstore_agent( from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langgraph.prebuilt import create_react_agent - llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) + model = ChatOpenAI(model="gpt-4o-mini", temperature=0) vector_store = InMemoryVectorStore.from_texts( [ @@ -74,7 +74,7 @@ def create_vectorstore_agent( "Fetches information about pets.", ) - agent = create_react_agent(llm, [tool]) + agent = create_react_agent(model, [tool]) for step in agent.stream( {"messages": [("human", "What are dogs known for?")]}, @@ -156,7 +156,7 @@ def create_vectorstore_router_agent( from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langgraph.prebuilt import create_react_agent - llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) + model = ChatOpenAI(model="gpt-4o-mini", temperature=0) pet_vector_store = InMemoryVectorStore.from_texts( [ @@ -187,7 +187,7 @@ def create_vectorstore_router_agent( ), ] - agent = create_react_agent(llm, tools) + agent = create_react_agent(model, tools) for step in agent.stream( {"messages": [("human", "Tell me about carrots.")]}, diff --git a/libs/langchain/langchain_classic/chains/api/base.py b/libs/langchain/langchain_classic/chains/api/base.py index ee2e13b2312..830211bf21f 100644 --- a/libs/langchain/langchain_classic/chains/api/base.py +++ b/libs/langchain/langchain_classic/chains/api/base.py @@ -143,7 +143,7 @@ try: description: Limit the number of results \"\"\" - llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) + model = ChatOpenAI(model="gpt-4o-mini", temperature=0) toolkit = RequestsToolkit( requests_wrapper=TextRequestsWrapper(headers={}), # no auth required allow_dangerous_requests=ALLOW_DANGEROUS_REQUESTS, @@ -152,7 +152,7 @@ try: api_request_chain = ( API_URL_PROMPT.partial(api_docs=api_spec) - | llm.bind_tools(tools, tool_choice="any") + | model.bind_tools(tools, tool_choice="any") ) class ChainState(TypedDict): @@ -169,7 +169,7 @@ try: return {"messages": [response]} async def acall_model(state: ChainState, config: RunnableConfig): - response = await llm.ainvoke(state["messages"], config) + response = await model.ainvoke(state["messages"], config) return {"messages": [response]} graph_builder = StateGraph(ChainState) diff --git a/libs/langchain/langchain_classic/chains/combine_documents/map_reduce.py b/libs/langchain/langchain_classic/chains/combine_documents/map_reduce.py index 99fdf7bc1be..16cd4b9f372 100644 --- a/libs/langchain/langchain_classic/chains/combine_documents/map_reduce.py +++ b/libs/langchain/langchain_classic/chains/combine_documents/map_reduce.py @@ -53,16 +53,16 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain): input_variables=["page_content"], template="{page_content}" ) document_variable_name = "context" - llm = OpenAI() + model = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` prompt = PromptTemplate.from_template("Summarize this content: {context}") - llm_chain = LLMChain(llm=llm, prompt=prompt) + llm_chain = LLMChain(llm=model, prompt=prompt) # We now define how to combine these summaries reduce_prompt = PromptTemplate.from_template( "Combine these summaries: {context}" ) - reduce_llm_chain = LLMChain(llm=llm, prompt=reduce_prompt) + reduce_llm_chain = LLMChain(llm=model, prompt=reduce_prompt) combine_documents_chain = StuffDocumentsChain( llm_chain=reduce_llm_chain, document_prompt=document_prompt, @@ -79,7 +79,7 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain): # which is specifically aimed at collapsing documents BEFORE # the final call. prompt = PromptTemplate.from_template("Collapse this content: {context}") - llm_chain = LLMChain(llm=llm, prompt=prompt) + llm_chain = LLMChain(llm=model, prompt=prompt) collapse_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, diff --git a/libs/langchain/langchain_classic/chains/combine_documents/map_rerank.py b/libs/langchain/langchain_classic/chains/combine_documents/map_rerank.py index a712ab6f06b..8d6f3ac5992 100644 --- a/libs/langchain/langchain_classic/chains/combine_documents/map_rerank.py +++ b/libs/langchain/langchain_classic/chains/combine_documents/map_rerank.py @@ -42,7 +42,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain): from langchain_classic.output_parsers.regex import RegexParser document_variable_name = "context" - llm = OpenAI() + model = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` # The actual prompt will need to be a lot more complex, this is just @@ -61,7 +61,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain): input_variables=["context"], output_parser=output_parser, ) - llm_chain = LLMChain(llm=llm, prompt=prompt) + llm_chain = LLMChain(llm=model, prompt=prompt) chain = MapRerankDocumentsChain( llm_chain=llm_chain, document_variable_name=document_variable_name, diff --git a/libs/langchain/langchain_classic/chains/combine_documents/reduce.py b/libs/langchain/langchain_classic/chains/combine_documents/reduce.py index 0f86c389ff0..6f546702dae 100644 --- a/libs/langchain/langchain_classic/chains/combine_documents/reduce.py +++ b/libs/langchain/langchain_classic/chains/combine_documents/reduce.py @@ -171,11 +171,11 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain): input_variables=["page_content"], template="{page_content}" ) document_variable_name = "context" - llm = OpenAI() + model = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` prompt = PromptTemplate.from_template("Summarize this content: {context}") - llm_chain = LLMChain(llm=llm, prompt=prompt) + llm_chain = LLMChain(llm=model, prompt=prompt) combine_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, @@ -188,7 +188,7 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain): # which is specifically aimed at collapsing documents BEFORE # the final call. prompt = PromptTemplate.from_template("Collapse this content: {context}") - llm_chain = LLMChain(llm=llm, prompt=prompt) + llm_chain = LLMChain(llm=model, prompt=prompt) collapse_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, diff --git a/libs/langchain/langchain_classic/chains/combine_documents/refine.py b/libs/langchain/langchain_classic/chains/combine_documents/refine.py index a6d694c63d1..528e86a7e84 100644 --- a/libs/langchain/langchain_classic/chains/combine_documents/refine.py +++ b/libs/langchain/langchain_classic/chains/combine_documents/refine.py @@ -55,11 +55,11 @@ class RefineDocumentsChain(BaseCombineDocumentsChain): input_variables=["page_content"], template="{page_content}" ) document_variable_name = "context" - llm = OpenAI() + model = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` prompt = PromptTemplate.from_template("Summarize this content: {context}") - initial_llm_chain = LLMChain(llm=llm, prompt=prompt) + initial_llm_chain = LLMChain(llm=model, prompt=prompt) initial_response_name = "prev_response" # The prompt here should take as an input variable the # `document_variable_name` as well as `initial_response_name` @@ -67,7 +67,7 @@ class RefineDocumentsChain(BaseCombineDocumentsChain): "Here's your first summary: {prev_response}. " "Now add to it based on the following context: {context}" ) - refine_llm_chain = LLMChain(llm=llm, prompt=prompt_refine) + refine_llm_chain = LLMChain(llm=model, prompt=prompt_refine) chain = RefineDocumentsChain( initial_llm_chain=initial_llm_chain, refine_llm_chain=refine_llm_chain, diff --git a/libs/langchain/langchain_classic/chains/combine_documents/stuff.py b/libs/langchain/langchain_classic/chains/combine_documents/stuff.py index 5e05d01114e..363d74dcb6f 100644 --- a/libs/langchain/langchain_classic/chains/combine_documents/stuff.py +++ b/libs/langchain/langchain_classic/chains/combine_documents/stuff.py @@ -68,8 +68,8 @@ def create_stuff_documents_chain( prompt = ChatPromptTemplate.from_messages( [("system", "What are everyone's favorite colors:\n\n{context}")] ) - llm = ChatOpenAI(model="gpt-3.5-turbo") - chain = create_stuff_documents_chain(llm, prompt) + model = ChatOpenAI(model="gpt-3.5-turbo") + chain = create_stuff_documents_chain(model, prompt) docs = [ Document(page_content="Jesse loves red but not yellow"), @@ -132,11 +132,11 @@ class StuffDocumentsChain(BaseCombineDocumentsChain): input_variables=["page_content"], template="{page_content}" ) document_variable_name = "context" - llm = OpenAI() + model = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` prompt = PromptTemplate.from_template("Summarize this content: {context}") - llm_chain = LLMChain(llm=llm, prompt=prompt) + llm_chain = LLMChain(llm=model, prompt=prompt) chain = StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, diff --git a/libs/langchain/langchain_classic/chains/constitutional_ai/base.py b/libs/langchain/langchain_classic/chains/constitutional_ai/base.py index 72bbcbb9323..af8302b34a7 100644 --- a/libs/langchain/langchain_classic/chains/constitutional_ai/base.py +++ b/libs/langchain/langchain_classic/chains/constitutional_ai/base.py @@ -58,7 +58,7 @@ class ConstitutionalChain(Chain): from langgraph.graph import END, START, StateGraph from typing_extensions import Annotated, TypedDict - llm = ChatOpenAI(model="gpt-4o-mini") + model = ChatOpenAI(model="gpt-4o-mini") class Critique(TypedDict): """Generate a critique, if needed.""" @@ -86,9 +86,9 @@ class ConstitutionalChain(Chain): "Revision Request: {revision_request}" ) - chain = llm | StrOutputParser() - critique_chain = critique_prompt | llm.with_structured_output(Critique) - revision_chain = revision_prompt | llm | StrOutputParser() + chain = model | StrOutputParser() + critique_chain = critique_prompt | model.with_structured_output(Critique) + revision_chain = revision_prompt | model | StrOutputParser() class State(TypedDict): @@ -170,16 +170,16 @@ class ConstitutionalChain(Chain): from langchain_classic.chains.constitutional_ai.models \ import ConstitutionalPrinciple - llm = OpenAI() + llmodelm = OpenAI() qa_prompt = PromptTemplate( template="Q: {question} A:", input_variables=["question"], ) - qa_chain = LLMChain(llm=llm, prompt=qa_prompt) + qa_chain = LLMChain(llm=model, prompt=qa_prompt) constitutional_chain = ConstitutionalChain.from_llm( - llm=llm, + llm=model, chain=qa_chain, constitutional_principles=[ ConstitutionalPrinciple( diff --git a/libs/langchain/langchain_classic/chains/conversation/base.py b/libs/langchain/langchain_classic/chains/conversation/base.py index 07c1c98ee08..f1c403787de 100644 --- a/libs/langchain/langchain_classic/chains/conversation/base.py +++ b/libs/langchain/langchain_classic/chains/conversation/base.py @@ -47,9 +47,9 @@ class ConversationChain(LLMChain): return store[session_id] - llm = ChatOpenAI(model="gpt-3.5-turbo-0125") + model = ChatOpenAI(model="gpt-3.5-turbo-0125") - chain = RunnableWithMessageHistory(llm, get_session_history) + chain = RunnableWithMessageHistory(model, get_session_history) chain.invoke( "Hi I'm Bob.", config={"configurable": {"session_id": "1"}}, @@ -85,9 +85,9 @@ class ConversationChain(LLMChain): return store[session_id] - llm = ChatOpenAI(model="gpt-3.5-turbo-0125") + model = ChatOpenAI(model="gpt-3.5-turbo-0125") - chain = RunnableWithMessageHistory(llm, get_session_history) + chain = RunnableWithMessageHistory(model, get_session_history) chain.invoke( "Hi I'm Bob.", config={"configurable": {"session_id": "1"}}, diff --git a/libs/langchain/langchain_classic/chains/conversational_retrieval/base.py b/libs/langchain/langchain_classic/chains/conversational_retrieval/base.py index d1424619ec4..f327a9d407a 100644 --- a/libs/langchain/langchain_classic/chains/conversational_retrieval/base.py +++ b/libs/langchain/langchain_classic/chains/conversational_retrieval/base.py @@ -283,7 +283,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain): retriever = ... # Your retriever - llm = ChatOpenAI() + model = ChatOpenAI() # Contextualize question contextualize_q_system_prompt = ( @@ -301,7 +301,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain): ] ) history_aware_retriever = create_history_aware_retriever( - llm, retriever, contextualize_q_prompt + model, retriever, contextualize_q_prompt ) # Answer question @@ -324,7 +324,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain): # Below we use create_stuff_documents_chain to feed all retrieved context # into the LLM. Note that we can also use StuffDocumentsChain and other # instances of BaseCombineDocumentsChain. - question_answer_chain = create_stuff_documents_chain(llm, qa_prompt) + question_answer_chain = create_stuff_documents_chain(model, qa_prompt) rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain) # Usage: @@ -371,8 +371,8 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain): "Follow up question: {question}" ) prompt = PromptTemplate.from_template(template) - llm = OpenAI() - question_generator_chain = LLMChain(llm=llm, prompt=prompt) + model = OpenAI() + question_generator_chain = LLMChain(llm=model, prompt=prompt) chain = ConversationalRetrievalChain( combine_docs_chain=combine_docs_chain, retriever=retriever, diff --git a/libs/langchain/langchain_classic/chains/history_aware_retriever.py b/libs/langchain/langchain_classic/chains/history_aware_retriever.py index ed16c85560b..9d193adf52e 100644 --- a/libs/langchain/langchain_classic/chains/history_aware_retriever.py +++ b/libs/langchain/langchain_classic/chains/history_aware_retriever.py @@ -38,10 +38,10 @@ def create_history_aware_retriever( from langchain_classic import hub rephrase_prompt = hub.pull("langchain-ai/chat-langchain-rephrase") - llm = ChatOpenAI() + model = ChatOpenAI() retriever = ... chat_retriever_chain = create_history_aware_retriever( - llm, retriever, rephrase_prompt + model, retriever, rephrase_prompt ) chain.invoke({"input": "...", "chat_history": }) diff --git a/libs/langchain/langchain_classic/chains/llm.py b/libs/langchain/langchain_classic/chains/llm.py index 534cd97794c..fae99da5d53 100644 --- a/libs/langchain/langchain_classic/chains/llm.py +++ b/libs/langchain/langchain_classic/chains/llm.py @@ -55,8 +55,8 @@ class LLMChain(Chain): prompt_template = "Tell me a {adjective} joke" prompt = PromptTemplate(input_variables=["adjective"], template=prompt_template) - llm = OpenAI() - chain = prompt | llm | StrOutputParser() + model = OpenAI() + chain = prompt | model | StrOutputParser() chain.invoke("your adjective here") ``` @@ -69,7 +69,7 @@ class LLMChain(Chain): prompt_template = "Tell me a {adjective} joke" prompt = PromptTemplate(input_variables=["adjective"], template=prompt_template) - llm = LLMChain(llm=OpenAI(), prompt=prompt) + model = LLMChain(llm=OpenAI(), prompt=prompt) ``` """ diff --git a/libs/langchain/langchain_classic/chains/llm_checker/base.py b/libs/langchain/langchain_classic/chains/llm_checker/base.py index 5bd53322e9c..ec630178d5b 100644 --- a/libs/langchain/langchain_classic/chains/llm_checker/base.py +++ b/libs/langchain/langchain_classic/chains/llm_checker/base.py @@ -80,8 +80,8 @@ class LLMCheckerChain(Chain): from langchain_community.llms import OpenAI from langchain_classic.chains import LLMCheckerChain - llm = OpenAI(temperature=0.7) - checker_chain = LLMCheckerChain.from_llm(llm) + model = OpenAI(temperature=0.7) + checker_chain = LLMCheckerChain.from_llm(model) ``` """ diff --git a/libs/langchain/langchain_classic/chains/llm_math/base.py b/libs/langchain/langchain_classic/chains/llm_math/base.py index 86eea13e1b7..0d1306c6925 100644 --- a/libs/langchain/langchain_classic/chains/llm_math/base.py +++ b/libs/langchain/langchain_classic/chains/llm_math/base.py @@ -84,9 +84,9 @@ class LLMMathChain(Chain): ) ) - llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) + model = ChatOpenAI(model="gpt-4o-mini", temperature=0) tools = [calculator] - llm_with_tools = llm.bind_tools(tools, tool_choice="any") + model_with_tools = model.bind_tools(tools, tool_choice="any") class ChainState(TypedDict): \"\"\"LangGraph state.\"\"\" @@ -95,11 +95,11 @@ class LLMMathChain(Chain): async def acall_chain(state: ChainState, config: RunnableConfig): last_message = state["messages"][-1] - response = await llm_with_tools.ainvoke(state["messages"], config) + response = await model_with_tools.ainvoke(state["messages"], config) return {"messages": [response]} async def acall_model(state: ChainState, config: RunnableConfig): - response = await llm.ainvoke(state["messages"], config) + response = await model.ainvoke(state["messages"], config) return {"messages": [response]} graph_builder = StateGraph(ChainState) diff --git a/libs/langchain/langchain_classic/chains/llm_summarization_checker/base.py b/libs/langchain/langchain_classic/chains/llm_summarization_checker/base.py index 28e745772a7..7eb3a12d0ba 100644 --- a/libs/langchain/langchain_classic/chains/llm_summarization_checker/base.py +++ b/libs/langchain/langchain_classic/chains/llm_summarization_checker/base.py @@ -83,8 +83,8 @@ class LLMSummarizationCheckerChain(Chain): from langchain_community.llms import OpenAI from langchain_classic.chains import LLMSummarizationCheckerChain - llm = OpenAI(temperature=0.0) - checker_chain = LLMSummarizationCheckerChain.from_llm(llm) + model = OpenAI(temperature=0.0) + checker_chain = LLMSummarizationCheckerChain.from_llm(model) ``` """ diff --git a/libs/langchain/langchain_classic/chains/natbot/base.py b/libs/langchain/langchain_classic/chains/natbot/base.py index 6c482bccd86..c93969bf61a 100644 --- a/libs/langchain/langchain_classic/chains/natbot/base.py +++ b/libs/langchain/langchain_classic/chains/natbot/base.py @@ -84,8 +84,8 @@ class NatBotChain(Chain): """Load with default LLMChain.""" msg = ( "This method is no longer implemented. Please use from_llm." - "llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)" - "For example, NatBotChain.from_llm(llm, objective)" + "model = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)" + "For example, NatBotChain.from_llm(model, objective)" ) raise NotImplementedError(msg) diff --git a/libs/langchain/langchain_classic/chains/openai_functions/base.py b/libs/langchain/langchain_classic/chains/openai_functions/base.py index 417ae1e2a64..f7cdce9c021 100644 --- a/libs/langchain/langchain_classic/chains/openai_functions/base.py +++ b/libs/langchain/langchain_classic/chains/openai_functions/base.py @@ -107,7 +107,7 @@ def create_openai_fn_chain( fav_food: str | None = Field(None, description="The dog's favorite food") - llm = ChatOpenAI(model="gpt-4", temperature=0) + model = ChatOpenAI(model="gpt-4", temperature=0) prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a world class algorithm for recording entities."), @@ -115,7 +115,7 @@ def create_openai_fn_chain( ("human", "Tip: Make sure to answer in the correct format"), ] ) - chain = create_openai_fn_chain([RecordPerson, RecordDog], llm, prompt) + chain = create_openai_fn_chain([RecordPerson, RecordDog], model, prompt) chain.run("Harry was a chubby brown beagle who loved chicken") # -> RecordDog(name="Harry", color="brown", fav_food="chicken") @@ -191,7 +191,7 @@ def create_structured_output_chain( color: str = Field(..., description="The dog's color") fav_food: str | None = Field(None, description="The dog's favorite food") - llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0) + model = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0) prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a world class algorithm for extracting information in structured formats."), @@ -199,7 +199,7 @@ def create_structured_output_chain( ("human", "Tip: Make sure to answer in the correct format"), ] ) - chain = create_structured_output_chain(Dog, llm, prompt) + chain = create_structured_output_chain(Dog, model, prompt) chain.run("Harry was a chubby brown beagle who loved chicken") # -> Dog(name="Harry", color="brown", fav_food="chicken") diff --git a/libs/langchain/langchain_classic/chains/openai_functions/citation_fuzzy_match.py b/libs/langchain/langchain_classic/chains/openai_functions/citation_fuzzy_match.py index 0a8cc3bed02..4b3fd369d39 100644 --- a/libs/langchain/langchain_classic/chains/openai_functions/citation_fuzzy_match.py +++ b/libs/langchain/langchain_classic/chains/openai_functions/citation_fuzzy_match.py @@ -83,12 +83,12 @@ def create_citation_fuzzy_match_runnable(llm: BaseChatModel) -> Runnable: from langchain_classic.chains import create_citation_fuzzy_match_runnable from langchain_openai import ChatOpenAI - llm = ChatOpenAI(model="gpt-4o-mini") + model = ChatOpenAI(model="gpt-4o-mini") context = "Alice has blue eyes. Bob has brown eyes. Charlie has green eyes." question = "What color are Bob's eyes?" - chain = create_citation_fuzzy_match_runnable(llm) + chain = create_citation_fuzzy_match_runnable(model) chain.invoke({"question": question, "context": context}) ``` diff --git a/libs/langchain/langchain_classic/chains/openai_functions/extraction.py b/libs/langchain/langchain_classic/chains/openai_functions/extraction.py index 3075bf93eaf..8d1dd05ce62 100644 --- a/libs/langchain/langchain_classic/chains/openai_functions/extraction.py +++ b/libs/langchain/langchain_classic/chains/openai_functions/extraction.py @@ -73,8 +73,8 @@ Passage: # to see an up to date list of which models support # with_structured_output. model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) - structured_llm = model.with_structured_output(Joke) - structured_llm.invoke("Tell me a joke about cats. + structured_model = model.with_structured_output(Joke) + structured_model.invoke("Tell me a joke about cats. Make sure to call the Joke function.") """ ), @@ -143,8 +143,8 @@ def create_extraction_chain( # to see an up to date list of which models support # with_structured_output. model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) - structured_llm = model.with_structured_output(Joke) - structured_llm.invoke("Tell me a joke about cats. + structured_model = model.with_structured_output(Joke) + structured_model.invoke("Tell me a joke about cats. Make sure to call the Joke function.") """ ), diff --git a/libs/langchain/langchain_classic/chains/openai_functions/openapi.py b/libs/langchain/langchain_classic/chains/openai_functions/openapi.py index 2cfbfc31522..c29c111e388 100644 --- a/libs/langchain/langchain_classic/chains/openai_functions/openapi.py +++ b/libs/langchain/langchain_classic/chains/openai_functions/openapi.py @@ -330,7 +330,7 @@ def get_openapi_chain( prompt = ChatPromptTemplate.from_template( "Use the provided APIs to respond to this user query:\\n\\n{query}" ) - llm = ChatOpenAI(model="gpt-4o-mini", temperature=0).bind_tools(tools) + model = ChatOpenAI(model="gpt-4o-mini", temperature=0).bind_tools(tools) def _execute_tool(message) -> Any: if tool_calls := message.tool_calls: @@ -341,7 +341,7 @@ def get_openapi_chain( else: return message.content - chain = prompt | llm | _execute_tool + chain = prompt | model | _execute_tool ``` ```python @@ -394,7 +394,7 @@ def get_openapi_chain( msg = ( "Must provide an LLM for this chain.For example,\n" "from langchain_openai import ChatOpenAI\n" - "llm = ChatOpenAI()\n" + "model = ChatOpenAI()\n" ) raise ValueError(msg) prompt = prompt or ChatPromptTemplate.from_template( diff --git a/libs/langchain/langchain_classic/chains/openai_functions/tagging.py b/libs/langchain/langchain_classic/chains/openai_functions/tagging.py index bbb21f9cb5a..2f70394b0e5 100644 --- a/libs/langchain/langchain_classic/chains/openai_functions/tagging.py +++ b/libs/langchain/langchain_classic/chains/openai_functions/tagging.py @@ -77,8 +77,8 @@ def create_tagging_chain( # to see an up to date list of which models support # with_structured_output. model = ChatAnthropic(model="claude-3-haiku-20240307", temperature=0) - structured_llm = model.with_structured_output(Joke) - structured_llm.invoke( + structured_model = model.with_structured_output(Joke) + structured_model.invoke( "Why did the cat cross the road? To get to the other " "side... and then lay down in the middle of it!" ) @@ -153,8 +153,8 @@ def create_tagging_chain_pydantic( # to see an up to date list of which models support # with_structured_output. model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) - structured_llm = model.with_structured_output(Joke) - structured_llm.invoke( + structured_model = model.with_structured_output(Joke) + structured_model.invoke( "Why did the cat cross the road? To get to the other " "side... and then lay down in the middle of it!" ) diff --git a/libs/langchain/langchain_classic/chains/openai_tools/extraction.py b/libs/langchain/langchain_classic/chains/openai_tools/extraction.py index 36c5729cdbc..276a0548e4d 100644 --- a/libs/langchain/langchain_classic/chains/openai_tools/extraction.py +++ b/libs/langchain/langchain_classic/chains/openai_tools/extraction.py @@ -42,8 +42,8 @@ If a property is not present and is not required in the function parameters, do # to see an up to date list of which models support # with_structured_output. model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) - structured_llm = model.with_structured_output(Joke) - structured_llm.invoke("Tell me a joke about cats. + structured_model = model.with_structured_output(Joke) + structured_model.invoke("Tell me a joke about cats. Make sure to call the Joke function.") """ ), diff --git a/libs/langchain/langchain_classic/chains/qa_generation/base.py b/libs/langchain/langchain_classic/chains/qa_generation/base.py index df74669d045..fc2ca8824b8 100644 --- a/libs/langchain/langchain_classic/chains/qa_generation/base.py +++ b/libs/langchain/langchain_classic/chains/qa_generation/base.py @@ -34,7 +34,7 @@ class QAGenerationChain(Chain): - Supports async and streaming; - Surfaces prompt and text splitter for easier customization; - Use of JsonOutputParser supports JSONPatch operations in streaming mode, - as well as robustness to markdown. + as well as robustness to markdown. ```python from langchain_classic.chains.qa_generation.prompt import ( @@ -52,14 +52,14 @@ class QAGenerationChain(Chain): from langchain_openai import ChatOpenAI from langchain_text_splitters import RecursiveCharacterTextSplitter - llm = ChatOpenAI() + model = ChatOpenAI() text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500) split_text = RunnableLambda(lambda x: text_splitter.create_documents([x])) chain = RunnableParallel( text=RunnablePassthrough(), questions=( - split_text | RunnableEach(bound=prompt | llm | JsonOutputParser()) + split_text | RunnableEach(bound=prompt | model | JsonOutputParser()) ), ) ``` diff --git a/libs/langchain/langchain_classic/chains/retrieval.py b/libs/langchain/langchain_classic/chains/retrieval.py index 8eb4ff47768..f181953689b 100644 --- a/libs/langchain/langchain_classic/chains/retrieval.py +++ b/libs/langchain/langchain_classic/chains/retrieval.py @@ -46,9 +46,11 @@ def create_retrieval_chain( from langchain_classic import hub retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat") - llm = ChatOpenAI() + model = ChatOpenAI() retriever = ... - combine_docs_chain = create_stuff_documents_chain(llm, retrieval_qa_chat_prompt) + combine_docs_chain = create_stuff_documents_chain( + model, retrieval_qa_chat_prompt + ) retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain) retrieval_chain.invoke({"input": "..."}) diff --git a/libs/langchain/langchain_classic/chains/retrieval_qa/base.py b/libs/langchain/langchain_classic/chains/retrieval_qa/base.py index 39fdf46ff60..5a6bc65af88 100644 --- a/libs/langchain/langchain_classic/chains/retrieval_qa/base.py +++ b/libs/langchain/langchain_classic/chains/retrieval_qa/base.py @@ -237,7 +237,7 @@ class RetrievalQA(BaseRetrievalQA): retriever = ... # Your retriever - llm = ChatOpenAI() + model = ChatOpenAI() system_prompt = ( "Use the given context to answer the question. " @@ -251,7 +251,7 @@ class RetrievalQA(BaseRetrievalQA): ("human", "{input}"), ] ) - question_answer_chain = create_stuff_documents_chain(llm, prompt) + question_answer_chain = create_stuff_documents_chain(model, prompt) chain = create_retrieval_chain(retriever, question_answer_chain) chain.invoke({"input": query}) diff --git a/libs/langchain/langchain_classic/chains/router/llm_router.py b/libs/langchain/langchain_classic/chains/router/llm_router.py index 6fa390a2f12..c90685e6b5d 100644 --- a/libs/langchain/langchain_classic/chains/router/llm_router.py +++ b/libs/langchain/langchain_classic/chains/router/llm_router.py @@ -48,7 +48,7 @@ class LLMRouterChain(RouterChain): from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain_openai import ChatOpenAI - llm = ChatOpenAI(model="gpt-4o-mini") + model = ChatOpenAI(model="gpt-4o-mini") prompt_1 = ChatPromptTemplate.from_messages( [ @@ -63,8 +63,8 @@ class LLMRouterChain(RouterChain): ] ) - chain_1 = prompt_1 | llm | StrOutputParser() - chain_2 = prompt_2 | llm | StrOutputParser() + chain_1 = prompt_1 | model | StrOutputParser() + chain_2 = prompt_2 | model | StrOutputParser() route_system = "Route the user's query to either the animal " "or vegetable expert." @@ -83,7 +83,7 @@ class LLMRouterChain(RouterChain): route_chain = ( route_prompt - | llm.with_structured_output(RouteQuery) + | model.with_structured_output(RouteQuery) | itemgetter("destination") ) diff --git a/libs/langchain/langchain_classic/chains/router/multi_prompt.py b/libs/langchain/langchain_classic/chains/router/multi_prompt.py index 2c13ec476d4..52c3eac6a1e 100644 --- a/libs/langchain/langchain_classic/chains/router/multi_prompt.py +++ b/libs/langchain/langchain_classic/chains/router/multi_prompt.py @@ -49,7 +49,7 @@ class MultiPromptChain(MultiRouteChain): from langgraph.graph import END, START, StateGraph from typing_extensions import TypedDict - llm = ChatOpenAI(model="gpt-4o-mini") + model = ChatOpenAI(model="gpt-4o-mini") # Define the prompts we will route to prompt_1 = ChatPromptTemplate.from_messages( @@ -68,8 +68,8 @@ class MultiPromptChain(MultiRouteChain): # Construct the chains we will route to. These format the input query # into the respective prompt, run it through a chat model, and cast # the result to a string. - chain_1 = prompt_1 | llm | StrOutputParser() - chain_2 = prompt_2 | llm | StrOutputParser() + chain_1 = prompt_1 | model | StrOutputParser() + chain_2 = prompt_2 | model | StrOutputParser() # Next: define the chain that selects which branch to route to. @@ -92,7 +92,7 @@ class MultiPromptChain(MultiRouteChain): destination: Literal["animal", "vegetable"] - route_chain = route_prompt | llm.with_structured_output(RouteQuery) + route_chain = route_prompt | model.with_structured_output(RouteQuery) # For LangGraph, we will define the state of the graph to hold the query, diff --git a/libs/langchain/langchain_classic/chains/router/multi_retrieval_qa.py b/libs/langchain/langchain_classic/chains/router/multi_retrieval_qa.py index bf710cd8040..7f9d743f0f8 100644 --- a/libs/langchain/langchain_classic/chains/router/multi_retrieval_qa.py +++ b/libs/langchain/langchain_classic/chains/router/multi_retrieval_qa.py @@ -117,7 +117,7 @@ class MultiRetrievalQAChain(MultiRouteChain): "default LLMs on behalf of users." "You can provide a conversation LLM like so:\n" "from langchain_openai import ChatOpenAI\n" - "llm = ChatOpenAI()" + "model = ChatOpenAI()" ) raise NotImplementedError(msg) _default_chain = ConversationChain( diff --git a/libs/langchain/langchain_classic/chains/sql_database/query.py b/libs/langchain/langchain_classic/chains/sql_database/query.py index f14b9672205..aa26b6bae21 100644 --- a/libs/langchain/langchain_classic/chains/sql_database/query.py +++ b/libs/langchain/langchain_classic/chains/sql_database/query.py @@ -76,8 +76,8 @@ def create_sql_query_chain( from langchain_community.utilities import SQLDatabase db = SQLDatabase.from_uri("sqlite:///Chinook.db") - llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) - chain = create_sql_query_chain(llm, db) + model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) + chain = create_sql_query_chain(model, db) response = chain.invoke({"question": "How many employees are there"}) ``` diff --git a/libs/langchain/langchain_classic/chains/structured_output/base.py b/libs/langchain/langchain_classic/chains/structured_output/base.py index 9d99cb33f9c..e7e18f51efe 100644 --- a/libs/langchain/langchain_classic/chains/structured_output/base.py +++ b/libs/langchain/langchain_classic/chains/structured_output/base.py @@ -57,8 +57,8 @@ from pydantic import BaseModel # to see an up to date list of which models support # with_structured_output. model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) - structured_llm = model.with_structured_output(Joke) - structured_llm.invoke("Tell me a joke about cats. + structured_model = model.with_structured_output(Joke) + structured_model.invoke("Tell me a joke about cats. Make sure to call the Joke function.") """ ), @@ -127,9 +127,9 @@ def create_openai_fn_runnable( fav_food: str | None = Field(None, description="The dog's favorite food") - llm = ChatOpenAI(model="gpt-4", temperature=0) - structured_llm = create_openai_fn_runnable([RecordPerson, RecordDog], llm) - structured_llm.invoke("Harry was a chubby brown beagle who loved chicken) + model = ChatOpenAI(model="gpt-4", temperature=0) + structured_model = create_openai_fn_runnable([RecordPerson, RecordDog], model) + structured_model.invoke("Harry was a chubby brown beagle who loved chicken) # -> RecordDog(name="Harry", color="brown", fav_food="chicken") ``` @@ -176,8 +176,8 @@ def create_openai_fn_runnable( # to see an up to date list of which models support # with_structured_output. model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) - structured_llm = model.with_structured_output(Joke) - structured_llm.invoke("Tell me a joke about cats. + structured_model = model.with_structured_output(Joke) + structured_model.invoke("Tell me a joke about cats. Make sure to call the Joke function.") """ ), @@ -250,21 +250,21 @@ def create_structured_output_runnable( color: str = Field(..., description="The dog's color") fav_food: str | None = Field(None, description="The dog's favorite food") - llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) prompt = ChatPromptTemplate.from_messages( [ ("system", "You are an extraction algorithm. Please extract every possible instance"), ('human', '{input}') ] ) - structured_llm = create_structured_output_runnable( + structured_model = create_structured_output_runnable( RecordDog, - llm, + model, mode="openai-tools", enforce_function_usage=True, return_single=True ) - structured_llm.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) + structured_model.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) # -> RecordDog(name="Harry", color="brown", fav_food="chicken") ``` @@ -303,15 +303,15 @@ def create_structured_output_runnable( } - llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) - structured_llm = create_structured_output_runnable( + model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + structured_model = create_structured_output_runnable( dog_schema, - llm, + model, mode="openai-tools", enforce_function_usage=True, return_single=True ) - structured_llm.invoke("Harry was a chubby brown beagle who loved chicken") + structured_model.invoke("Harry was a chubby brown beagle who loved chicken") # -> {'name': 'Harry', 'color': 'brown', 'fav_food': 'chicken'} ``` @@ -330,9 +330,9 @@ def create_structured_output_runnable( color: str = Field(..., description="The dog's color") fav_food: str | None = Field(None, description="The dog's favorite food") - llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) - structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-functions") - structured_llm.invoke("Harry was a chubby brown beagle who loved chicken") + model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + structured_model = create_structured_output_runnable(Dog, model, mode="openai-functions") + structured_model.invoke("Harry was a chubby brown beagle who loved chicken") # -> Dog(name="Harry", color="brown", fav_food="chicken") ``` @@ -352,13 +352,13 @@ def create_structured_output_runnable( color: str = Field(..., description="The dog's color") fav_food: str | None = Field(None, description="The dog's favorite food") - llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) - structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-functions") + model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + structured_model = create_structured_output_runnable(Dog, model, mode="openai-functions") system = '''Extract information about any dogs mentioned in the user input.''' prompt = ChatPromptTemplate.from_messages( [("system", system), ("human", "{input}"),] ) - chain = prompt | structured_llm + chain = prompt | structured_model chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) # -> Dog(name="Harry", color="brown", fav_food="chicken") ``` @@ -379,8 +379,8 @@ def create_structured_output_runnable( color: str = Field(..., description="The dog's color") fav_food: str | None = Field(None, description="The dog's favorite food") - llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) - structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-json") + model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + structured_model = create_structured_output_runnable(Dog, model, mode="openai-json") system = '''You are a world class assistant for extracting information in structured JSON formats. \ Extract a valid JSON blob from the user input that matches the following JSON Schema: @@ -389,7 +389,7 @@ def create_structured_output_runnable( prompt = ChatPromptTemplate.from_messages( [("system", system), ("human", "{input}"),] ) - chain = prompt | structured_llm + chain = prompt | structured_model chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) ``` diff --git a/libs/langchain/langchain_classic/evaluation/agents/trajectory_eval_chain.py b/libs/langchain/langchain_classic/evaluation/agents/trajectory_eval_chain.py index a8b364028c3..644a3902186 100644 --- a/libs/langchain/langchain_classic/evaluation/agents/trajectory_eval_chain.py +++ b/libs/langchain/langchain_classic/evaluation/agents/trajectory_eval_chain.py @@ -113,10 +113,10 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain): \"\"\"Very helpful answers to geography questions.\"\"\" return f"{country}? IDK - We may never know {question}." - llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) + model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) agent = initialize_agent( tools=[geography_answers], - llm=llm, + llm=model, agent=AgentType.OPENAI_FUNCTIONS, return_intermediate_steps=True, ) @@ -125,7 +125,7 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain): response = agent(question) eval_chain = TrajectoryEvalChain.from_llm( - llm=llm, agent_tools=[geography_answers], return_reasoning=True + llm=model, agent_tools=[geography_answers], return_reasoning=True ) result = eval_chain.evaluate_agent_trajectory( diff --git a/libs/langchain/langchain_classic/evaluation/comparison/eval_chain.py b/libs/langchain/langchain_classic/evaluation/comparison/eval_chain.py index ab83aa46c4a..01b9c6afd3d 100644 --- a/libs/langchain/langchain_classic/evaluation/comparison/eval_chain.py +++ b/libs/langchain/langchain_classic/evaluation/comparison/eval_chain.py @@ -165,10 +165,10 @@ class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain): Example: >>> from langchain_community.chat_models import ChatOpenAI >>> from langchain_classic.evaluation.comparison import PairwiseStringEvalChain - >>> llm = ChatOpenAI( + >>> model = ChatOpenAI( ... temperature=0, model_name="gpt-4", model_kwargs={"random_seed": 42} ... ) - >>> chain = PairwiseStringEvalChain.from_llm(llm=llm) + >>> chain = PairwiseStringEvalChain.from_llm(llm=model) >>> result = chain.evaluate_string_pairs( ... input = "What is the chemical formula for water?", ... prediction = "H2O", diff --git a/libs/langchain/langchain_classic/evaluation/criteria/__init__.py b/libs/langchain/langchain_classic/evaluation/criteria/__init__.py index c275541a06c..fce15234e2b 100644 --- a/libs/langchain/langchain_classic/evaluation/criteria/__init__.py +++ b/libs/langchain/langchain_classic/evaluation/criteria/__init__.py @@ -15,9 +15,9 @@ Using a predefined criterion: >>> from langchain_community.llms import OpenAI >>> from langchain_classic.evaluation.criteria import CriteriaEvalChain ->>> llm = OpenAI() +>>> model = OpenAI() >>> criteria = "conciseness" ->>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) +>>> chain = CriteriaEvalChain.from_llm(llm=model, criteria=criteria) >>> chain.evaluate_strings( prediction="The answer is 42.", reference="42", @@ -29,7 +29,7 @@ Using a custom criterion: >>> from langchain_community.llms import OpenAI >>> from langchain_classic.evaluation.criteria import LabeledCriteriaEvalChain ->>> llm = OpenAI() +>>> model = OpenAI() >>> criteria = { "hallucination": ( "Does this submission contain information" @@ -37,7 +37,7 @@ Using a custom criterion: ), } >>> chain = LabeledCriteriaEvalChain.from_llm( - llm=llm, + llm=model, criteria=criteria, ) >>> chain.evaluate_strings( diff --git a/libs/langchain/langchain_classic/evaluation/criteria/eval_chain.py b/libs/langchain/langchain_classic/evaluation/criteria/eval_chain.py index 80d964c451f..64c2cc0d6cf 100644 --- a/libs/langchain/langchain_classic/evaluation/criteria/eval_chain.py +++ b/libs/langchain/langchain_classic/evaluation/criteria/eval_chain.py @@ -190,9 +190,9 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): -------- >>> from langchain_anthropic import ChatAnthropic >>> from langchain_classic.evaluation.criteria import CriteriaEvalChain - >>> llm = ChatAnthropic(temperature=0) + >>> model = ChatAnthropic(temperature=0) >>> criteria = {"my-custom-criterion": "Is the submission the most amazing ever?"} - >>> evaluator = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) + >>> evaluator = CriteriaEvalChain.from_llm(llm=model, criteria=criteria) >>> evaluator.evaluate_strings( ... prediction="Imagine an ice cream flavor for the color aquamarine", ... input="Tell me an idea", @@ -205,10 +205,10 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): >>> from langchain_openai import ChatOpenAI >>> from langchain_classic.evaluation.criteria import LabeledCriteriaEvalChain - >>> llm = ChatOpenAI(model="gpt-4", temperature=0) + >>> model = ChatOpenAI(model="gpt-4", temperature=0) >>> criteria = "correctness" >>> evaluator = LabeledCriteriaEvalChain.from_llm( - ... llm=llm, + ... llm=model, ... criteria=criteria, ... ) >>> evaluator.evaluate_strings( @@ -347,7 +347,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): -------- >>> from langchain_openai import OpenAI >>> from langchain_classic.evaluation.criteria import LabeledCriteriaEvalChain - >>> llm = OpenAI() + >>> model = OpenAI() >>> criteria = { "hallucination": ( "Does this submission contain information" @@ -355,7 +355,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): ), } >>> chain = LabeledCriteriaEvalChain.from_llm( - llm=llm, + llm=model, criteria=criteria, ) """ @@ -433,9 +433,9 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): Examples: >>> from langchain_openai import OpenAI >>> from langchain_classic.evaluation.criteria import CriteriaEvalChain - >>> llm = OpenAI() + >>> model = OpenAI() >>> criteria = "conciseness" - >>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) + >>> chain = CriteriaEvalChain.from_llm(llm=model, criteria=criteria) >>> chain.evaluate_strings( prediction="The answer is 42.", reference="42", @@ -485,9 +485,9 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): Examples: >>> from langchain_openai import OpenAI >>> from langchain_classic.evaluation.criteria import CriteriaEvalChain - >>> llm = OpenAI() + >>> model = OpenAI() >>> criteria = "conciseness" - >>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) + >>> chain = CriteriaEvalChain.from_llm(llm=model, criteria=criteria) >>> await chain.aevaluate_strings( prediction="The answer is 42.", reference="42", @@ -569,7 +569,7 @@ class LabeledCriteriaEvalChain(CriteriaEvalChain): -------- >>> from langchain_openai import OpenAI >>> from langchain_classic.evaluation.criteria import LabeledCriteriaEvalChain - >>> llm = OpenAI() + >>> model = OpenAI() >>> criteria = { "hallucination": ( "Does this submission contain information" @@ -577,7 +577,7 @@ class LabeledCriteriaEvalChain(CriteriaEvalChain): ), } >>> chain = LabeledCriteriaEvalChain.from_llm( - llm=llm, + llm=model, criteria=criteria, ) """ diff --git a/libs/langchain/langchain_classic/evaluation/scoring/__init__.py b/libs/langchain/langchain_classic/evaluation/scoring/__init__.py index 4faaefb278a..c5c08722f25 100644 --- a/libs/langchain/langchain_classic/evaluation/scoring/__init__.py +++ b/libs/langchain/langchain_classic/evaluation/scoring/__init__.py @@ -7,8 +7,8 @@ criteria and or a reference answer. Example: >>> from langchain_community.chat_models import ChatOpenAI >>> from langchain_classic.evaluation.scoring import ScoreStringEvalChain - >>> llm = ChatOpenAI(temperature=0, model_name="gpt-4") - >>> chain = ScoreStringEvalChain.from_llm(llm=llm) + >>> model = ChatOpenAI(temperature=0, model_name="gpt-4") + >>> chain = ScoreStringEvalChain.from_llm(llm=model) >>> result = chain.evaluate_strings( ... input="What is the chemical formula for water?", ... prediction="H2O", diff --git a/libs/langchain/langchain_classic/evaluation/scoring/eval_chain.py b/libs/langchain/langchain_classic/evaluation/scoring/eval_chain.py index 93c412af61c..fefa2041936 100644 --- a/libs/langchain/langchain_classic/evaluation/scoring/eval_chain.py +++ b/libs/langchain/langchain_classic/evaluation/scoring/eval_chain.py @@ -156,8 +156,8 @@ class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain): Example: >>> from langchain_community.chat_models import ChatOpenAI >>> from langchain_classic.evaluation.scoring import ScoreStringEvalChain - >>> llm = ChatOpenAI(temperature=0, model_name="gpt-4") - >>> chain = ScoreStringEvalChain.from_llm(llm=llm) + >>> model = ChatOpenAI(temperature=0, model_name="gpt-4") + >>> chain = ScoreStringEvalChain.from_llm(llm=model) >>> result = chain.evaluate_strings( ... input="What is the chemical formula for water?", ... prediction="H2O", diff --git a/libs/langchain/langchain_classic/indexes/vectorstore.py b/libs/langchain/langchain_classic/indexes/vectorstore.py index d23951d0daf..0680241739c 100644 --- a/libs/langchain/langchain_classic/indexes/vectorstore.py +++ b/libs/langchain/langchain_classic/indexes/vectorstore.py @@ -55,7 +55,7 @@ class VectorStoreIndexWrapper(BaseModel): "Please provide an llm to use for querying the vectorstore.\n" "For example,\n" "from langchain_openai import OpenAI\n" - "llm = OpenAI(temperature=0)" + "model = OpenAI(temperature=0)" ) raise NotImplementedError(msg) retriever_kwargs = retriever_kwargs or {} @@ -90,7 +90,7 @@ class VectorStoreIndexWrapper(BaseModel): "Please provide an llm to use for querying the vectorstore.\n" "For example,\n" "from langchain_openai import OpenAI\n" - "llm = OpenAI(temperature=0)" + "model = OpenAI(temperature=0)" ) raise NotImplementedError(msg) retriever_kwargs = retriever_kwargs or {} @@ -125,7 +125,7 @@ class VectorStoreIndexWrapper(BaseModel): "Please provide an llm to use for querying the vectorstore.\n" "For example,\n" "from langchain_openai import OpenAI\n" - "llm = OpenAI(temperature=0)" + "model = OpenAI(temperature=0)" ) raise NotImplementedError(msg) retriever_kwargs = retriever_kwargs or {} @@ -160,7 +160,7 @@ class VectorStoreIndexWrapper(BaseModel): "Please provide an llm to use for querying the vectorstore.\n" "For example,\n" "from langchain_openai import OpenAI\n" - "llm = OpenAI(temperature=0)" + "model = OpenAI(temperature=0)" ) raise NotImplementedError(msg) retriever_kwargs = retriever_kwargs or {} diff --git a/libs/langchain/langchain_classic/smith/__init__.py b/libs/langchain/langchain_classic/smith/__init__.py index 4f06ae3af34..24a75f48dae 100644 --- a/libs/langchain/langchain_classic/smith/__init__.py +++ b/libs/langchain/langchain_classic/smith/__init__.py @@ -22,8 +22,8 @@ from langchain_classic.smith import RunEvalConfig, run_on_dataset # Chains may have memory. Passing in a constructor function lets the # evaluation framework avoid cross-contamination between runs. def construct_chain(): - llm = ChatOpenAI(temperature=0) - chain = LLMChain.from_string(llm, "What's the answer to {your_input_key}") + model = ChatOpenAI(temperature=0) + chain = LLMChain.from_string(model, "What's the answer to {your_input_key}") return chain diff --git a/libs/langchain/langchain_classic/smith/evaluation/__init__.py b/libs/langchain/langchain_classic/smith/evaluation/__init__.py index 78e7fc70ab5..d10d6ef03b6 100644 --- a/libs/langchain/langchain_classic/smith/evaluation/__init__.py +++ b/libs/langchain/langchain_classic/smith/evaluation/__init__.py @@ -16,8 +16,8 @@ from langchain_classic.smith import EvaluatorType, RunEvalConfig, run_on_dataset def construct_chain(): - llm = ChatOpenAI(temperature=0) - chain = LLMChain.from_string(llm, "What's the answer to {your_input_key}") + model = ChatOpenAI(temperature=0) + chain = LLMChain.from_string(model, "What's the answer to {your_input_key}") return chain diff --git a/libs/langchain/langchain_classic/smith/evaluation/runner_utils.py b/libs/langchain/langchain_classic/smith/evaluation/runner_utils.py index 51fb2d776af..8db9f6fa971 100644 --- a/libs/langchain/langchain_classic/smith/evaluation/runner_utils.py +++ b/libs/langchain/langchain_classic/smith/evaluation/runner_utils.py @@ -1395,9 +1395,9 @@ async def arun_on_dataset( # Chains may have memory. Passing in a constructor function lets the # evaluation framework avoid cross-contamination between runs. def construct_chain(): - llm = ChatOpenAI(temperature=0) + model = ChatOpenAI(temperature=0) chain = LLMChain.from_string( - llm, + model, "What's the answer to {your_input_key}" ) return chain @@ -1570,9 +1570,9 @@ def run_on_dataset( # Chains may have memory. Passing in a constructor function lets the # evaluation framework avoid cross-contamination between runs. def construct_chain(): - llm = ChatOpenAI(temperature=0) + model = ChatOpenAI(temperature=0) chain = LLMChain.from_string( - llm, + model, "What's the answer to {your_input_key}" ) return chain diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index fb494fffcab..ee8aba6225f 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -609,7 +609,7 @@ class ChatAnthropic(BaseChatModel): ```python from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-3-7-sonnet-20250219", temperature=0, max_tokens=1024, @@ -650,7 +650,7 @@ class ChatAnthropic(BaseChatModel): ), ("human", "I love programming."), ] - llm.invoke(messages) + model.invoke(messages) ``` ```python @@ -674,7 +674,7 @@ class ChatAnthropic(BaseChatModel): Stream: ```python - for chunk in llm.stream(messages): + for chunk in model.stream(messages): print(chunk.text, end="") ``` @@ -690,7 +690,7 @@ class ChatAnthropic(BaseChatModel): ``` ```python - stream = llm.stream(messages) + stream = model.stream(messages) full = next(stream) for chunk in stream: full += chunk @@ -703,13 +703,13 @@ class ChatAnthropic(BaseChatModel): Async: ```python - await llm.ainvoke(messages) + await model.ainvoke(messages) # stream: - # async for chunk in (await llm.astream(messages)) + # async for chunk in (await model.astream(messages)) # batch: - # await llm.abatch([messages]) + # await model.abatch([messages]) ``` ```python @@ -748,8 +748,8 @@ class ChatAnthropic(BaseChatModel): location: str = Field(..., description="The city and state, e.g. San Francisco, CA") - llm_with_tools = llm.bind_tools([GetWeather, GetPopulation]) - ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?") + model_with_tools = model.bind_tools([GetWeather, GetPopulation]) + ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?") ai_msg.tool_calls ``` @@ -795,8 +795,8 @@ class ChatAnthropic(BaseChatModel): rating: int | None = Field(description="How funny the joke is, from 1 to 10") - structured_llm = llm.with_structured_output(Joke) - structured_llm.invoke("Tell me a joke about cats") + structured_model = model.with_structured_output(Joke) + structured_model.invoke("Tell me a joke about cats") ``` ```python @@ -823,7 +823,7 @@ class ChatAnthropic(BaseChatModel): image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8") - llm = ChatAnthropic(model="claude-3-5-sonnet-latest") + model = ChatAnthropic(model="claude-3-5-sonnet-latest") message = HumanMessage( content=[ { @@ -841,7 +841,7 @@ class ChatAnthropic(BaseChatModel): }, ], ) - ai_msg = llm.invoke([message]) + ai_msg = model.invoke([message]) ai_msg.content ``` @@ -857,7 +857,7 @@ class ChatAnthropic(BaseChatModel): ```python from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-sonnet-4-20250514", betas=["files-api-2025-04-14"], ) @@ -874,7 +874,7 @@ class ChatAnthropic(BaseChatModel): }, ], } - llm.invoke([input_message]) + model.invoke([input_message]) ``` PDF input: @@ -890,8 +890,8 @@ class ChatAnthropic(BaseChatModel): url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf" data = b64encode(requests.get(url).content).decode() - llm = ChatAnthropic(model="claude-3-5-sonnet-latest") - ai_msg = llm.invoke( + model = ChatAnthropic(model="claude-3-5-sonnet-latest") + ai_msg = model.invoke( [ HumanMessage( [ @@ -920,7 +920,7 @@ class ChatAnthropic(BaseChatModel): ```python from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-sonnet-4-20250514", betas=["files-api-2025-04-14"], ) @@ -937,7 +937,7 @@ class ChatAnthropic(BaseChatModel): }, ], } - llm.invoke([input_message]) + model.invoke([input_message]) ``` Extended thinking: @@ -954,13 +954,13 @@ class ChatAnthropic(BaseChatModel): ```python from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-3-7-sonnet-latest", max_tokens=5000, thinking={"type": "enabled", "budget_tokens": 2000}, ) - response = llm.invoke("What is the cube root of 50.653?") + response = model.invoke("What is the cube root of 50.653?") response.content ``` @@ -987,7 +987,7 @@ class ChatAnthropic(BaseChatModel): ```python from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic(model="claude-3-5-haiku-latest") + model = ChatAnthropic(model="claude-3-5-haiku-latest") messages = [ { @@ -1008,7 +1008,7 @@ class ChatAnthropic(BaseChatModel): ], } ] - response = llm.invoke(messages) + response = model.invoke(messages) response.content ``` @@ -1050,7 +1050,7 @@ class ChatAnthropic(BaseChatModel): Token usage: ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.usage_metadata ``` @@ -1062,7 +1062,7 @@ class ChatAnthropic(BaseChatModel): default: ```python - stream = llm.stream(messages) + stream = model.stream(messages) full = next(stream) for chunk in stream: full += chunk @@ -1088,7 +1088,7 @@ class ChatAnthropic(BaseChatModel): ```python from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic(model="claude-3-7-sonnet-20250219") + model = ChatAnthropic(model="claude-3-7-sonnet-20250219") messages = [ { @@ -1111,7 +1111,7 @@ class ChatAnthropic(BaseChatModel): }, ] - response = llm.invoke(messages) + response = model.invoke(messages) response.usage_metadata["input_token_details"] ``` @@ -1125,7 +1125,7 @@ class ChatAnthropic(BaseChatModel): cache. ```python - response = llm.invoke( + response = model.invoke( messages, cache_control={"type": "ephemeral"}, ) @@ -1137,7 +1137,7 @@ class ChatAnthropic(BaseChatModel): apply one hour caching by setting `ttl` to `'1h'`. ```python - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-3-7-sonnet-20250219", ) @@ -1154,14 +1154,14 @@ class ChatAnthropic(BaseChatModel): } ] - response = llm.invoke(messages) + response = model.invoke(messages) ``` Details of cached token counts will be included on the `InputTokenDetails` of response's `usage_metadata`: ```python - response = llm.invoke(messages) + response = model.invoke(messages) response.usage_metadata ``` @@ -1189,7 +1189,7 @@ class ChatAnthropic(BaseChatModel): ```python from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-sonnet-4-20250514", betas=["context-1m-2025-08-07"], # Enable 1M context beta ) @@ -1210,7 +1210,7 @@ class ChatAnthropic(BaseChatModel): \"\"\") ] - response = llm.invoke(messages) + response = model.invoke(messages) ``` See [Claude documentation](https://docs.anthropic.com/en/docs/build-with-claude/context-windows#1m-token-context-window) @@ -1225,7 +1225,7 @@ class ChatAnthropic(BaseChatModel): from langchain_anthropic import ChatAnthropic from langchain_core.tools import tool - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-3-7-sonnet-20250219", temperature=0, model_kwargs={ @@ -1240,8 +1240,8 @@ class ChatAnthropic(BaseChatModel): \"\"\"Get the weather at a location.\"\"\" return "It's sunny." - llm_with_tools = llm.bind_tools([get_weather]) - response = llm_with_tools.invoke( + model_with_tools = model.bind_tools([get_weather]) + response = model_with_tools.invoke( "What's the weather in San Francisco?" ) print(response.tool_calls) @@ -1263,13 +1263,13 @@ class ChatAnthropic(BaseChatModel): ```python from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-sonnet-4-5-20250929", betas=["context-management-2025-06-27"], context_management={"edits": [{"type": "clear_tool_uses_20250919"}]}, ) - llm_with_tools = llm.bind_tools([{"type": "web_search_20250305", "name": "web_search"}]) - response = llm_with_tools.invoke("Search for recent developments in AI") + model_with_tools = model.bind_tools([{"type": "web_search_20250305", "name": "web_search"}]) + response = model_with_tools.invoke("Search for recent developments in AI") ``` Built-in tools: @@ -1281,16 +1281,16 @@ class ChatAnthropic(BaseChatModel): ```python from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic(model="claude-3-5-haiku-latest") + model = ChatAnthropic(model="claude-3-5-haiku-latest") tool = { "type": "web_search_20250305", "name": "web_search", "max_uses": 3, } - llm_with_tools = llm.bind_tools([tool]) + model_with_tools = model.bind_tools([tool]) - response = llm_with_tools.invoke("How do I update a web app to TypeScript 5.5?") + response = model_with_tools.invoke("How do I update a web app to TypeScript 5.5?") ``` ??? note "Web fetch (beta)" @@ -1298,7 +1298,7 @@ class ChatAnthropic(BaseChatModel): ```python from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-3-5-haiku-latest", betas=["web-fetch-2025-09-10"], # Enable web fetch beta ) @@ -1308,23 +1308,23 @@ class ChatAnthropic(BaseChatModel): "name": "web_fetch", "max_uses": 3, } - llm_with_tools = llm.bind_tools([tool]) + model_with_tools = model.bind_tools([tool]) - response = llm_with_tools.invoke("Please analyze the content at https://example.com/article") + response = model_with_tools.invoke("Please analyze the content at https://example.com/article") ``` ??? note "Code execution" ```python - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-sonnet-4-20250514", betas=["code-execution-2025-05-22"], ) tool = {"type": "code_execution_20250522", "name": "code_execution"} - llm_with_tools = llm.bind_tools([tool]) + model_with_tools = model.bind_tools([tool]) - response = llm_with_tools.invoke( + response = model_with_tools.invoke( "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]" ) ``` @@ -1347,13 +1347,13 @@ class ChatAnthropic(BaseChatModel): } ] - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-sonnet-4-20250514", betas=["mcp-client-2025-04-04"], mcp_servers=mcp_servers, ) - response = llm.invoke( + response = model.invoke( "What transport protocols does the 2025-03-26 version of the MCP " "spec (modelcontextprotocol/modelcontextprotocol) support?" ) @@ -1364,12 +1364,12 @@ class ChatAnthropic(BaseChatModel): ```python from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic(model="claude-3-7-sonnet-20250219") + model = ChatAnthropic(model="claude-3-7-sonnet-20250219") tool = {"type": "text_editor_20250124", "name": "str_replace_editor"} - llm_with_tools = llm.bind_tools([tool]) + model_with_tools = model.bind_tools([tool]) - response = llm_with_tools.invoke( + response = model_with_tools.invoke( "There's a syntax error in my primes.py file. Can you help me fix it?" ) print(response.text) @@ -1390,18 +1390,18 @@ class ChatAnthropic(BaseChatModel): ```python from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-sonnet-4-5-20250929", betas=["context-management-2025-06-27"], ) - llm_with_tools = llm.bind_tools([{"type": "memory_20250818", "name": "memory"}]) - response = llm_with_tools.invoke("What are my interests?") + model_with_tools = model.bind_tools([{"type": "memory_20250818", "name": "memory"}]) + response = model_with_tools.invoke("What are my interests?") ``` Response metadata ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.response_metadata ``` @@ -1939,9 +1939,9 @@ class ChatAnthropic(BaseChatModel): product: str = Field(..., description="The product to look up.") - llm = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) - llm_with_tools = llm.bind_tools([GetWeather, GetPrice]) - llm_with_tools.invoke( + model = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) + model_with_tools = model.bind_tools([GetWeather, GetPrice]) + model_with_tools.invoke( "What is the weather like in San Francisco", ) # -> AIMessage( @@ -1973,9 +1973,9 @@ class ChatAnthropic(BaseChatModel): product: str = Field(..., description="The product to look up.") - llm = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) - llm_with_tools = llm.bind_tools([GetWeather, GetPrice], tool_choice="any") - llm_with_tools.invoke( + model = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) + model_with_tools = model.bind_tools([GetWeather, GetPrice], tool_choice="any") + model_with_tools.invoke( "what is the weather like in San Francisco", ) ``` @@ -1999,9 +1999,9 @@ class ChatAnthropic(BaseChatModel): product: str = Field(..., description="The product to look up.") - llm = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) - llm_with_tools = llm.bind_tools([GetWeather, GetPrice], tool_choice="GetWeather") - llm_with_tools.invoke("What is the weather like in San Francisco") + model = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) + model_with_tools = model.bind_tools([GetWeather, GetPrice], tool_choice="GetWeather") + model_with_tools.invoke("What is the weather like in San Francisco") ``` Example — cache specific tools: @@ -2033,12 +2033,12 @@ class ChatAnthropic(BaseChatModel): # We need to pass in extra headers to enable use of the beta cache # control API. - llm = ChatAnthropic( + model = ChatAnthropic( model="claude-3-5-sonnet-latest", temperature=0, ) - llm_with_tools = llm.bind_tools([GetWeather, cached_price_tool]) - llm_with_tools.invoke("What is the weather like in San Francisco") + model_with_tools = model.bind_tools([GetWeather, cached_price_tool]) + model_with_tools.invoke("What is the weather like in San Francisco") ``` This outputs: @@ -2227,10 +2227,10 @@ class ChatAnthropic(BaseChatModel): justification: str - llm = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) + model = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") # -> AnswerWithJustification( # answer='They weigh the same', @@ -2252,10 +2252,10 @@ class ChatAnthropic(BaseChatModel): justification: str - llm = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True) + model = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) + structured_model = model.with_structured_output(AnswerWithJustification, include_raw=True) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") # -> { # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), @@ -2280,10 +2280,10 @@ class ChatAnthropic(BaseChatModel): "required": ["answer", "justification"], }, } - llm = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) - structured_llm = llm.with_structured_output(schema) + model = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) + structured_model = model.with_structured_output(schema) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") # -> { # 'answer': 'They weigh the same', # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' @@ -2356,13 +2356,13 @@ class ChatAnthropic(BaseChatModel): from langchain_anthropic import ChatAnthropic from langchain_core.messages import HumanMessage, SystemMessage - llm = ChatAnthropic(model="claude-3-5-sonnet-20241022") + model = ChatAnthropic(model="claude-3-5-sonnet-20241022") messages = [ SystemMessage(content="You are a scientist"), HumanMessage(content="Hello, Claude"), ] - llm.get_num_tokens_from_messages(messages) + model.get_num_tokens_from_messages(messages) ``` ```txt @@ -2376,7 +2376,7 @@ class ChatAnthropic(BaseChatModel): from langchain_core.messages import HumanMessage from langchain_core.tools import tool - llm = ChatAnthropic(model="claude-3-5-sonnet-20241022") + model = ChatAnthropic(model="claude-3-5-sonnet-20241022") @tool(parse_docstring=True) def get_weather(location: str) -> str: @@ -2390,7 +2390,7 @@ class ChatAnthropic(BaseChatModel): messages = [ HumanMessage(content="What's the weather like in San Francisco?"), ] - llm.get_num_tokens_from_messages(messages, tools=[get_weather]) + model.get_num_tokens_from_messages(messages, tools=[get_weather]) ``` ```txt diff --git a/libs/partners/deepseek/langchain_deepseek/chat_models.py b/libs/partners/deepseek/langchain_deepseek/chat_models.py index 7abfd3ebd63..bc185d366fc 100644 --- a/libs/partners/deepseek/langchain_deepseek/chat_models.py +++ b/libs/partners/deepseek/langchain_deepseek/chat_models.py @@ -59,7 +59,7 @@ class ChatDeepSeek(BaseChatOpenAI): ```python from langchain_deepseek import ChatDeepSeek - llm = ChatDeepSeek( + model = ChatDeepSeek( model="...", temperature=0, max_tokens=None, @@ -76,16 +76,16 @@ class ChatDeepSeek(BaseChatOpenAI): ("system", "You are a helpful translator. Translate the user sentence to French."), ("human", "I love programming."), ] - llm.invoke(messages) + model.invoke(messages) ``` Stream: ```python - for chunk in llm.stream(messages): + for chunk in model.stream(messages): print(chunk.text, end="") ``` ```python - stream = llm.stream(messages) + stream = model.stream(messages) full = next(stream) for chunk in stream: full += chunk @@ -94,13 +94,13 @@ class ChatDeepSeek(BaseChatOpenAI): Async: ```python - await llm.ainvoke(messages) + await model.ainvoke(messages) # stream: - # async for chunk in (await llm.astream(messages)) + # async for chunk in (await model.astream(messages)) # batch: - # await llm.abatch([messages]) + # await model.abatch([messages]) ``` Tool calling: @@ -120,8 +120,8 @@ class ChatDeepSeek(BaseChatOpenAI): location: str = Field(..., description="The city and state, e.g. San Francisco, CA") - llm_with_tools = llm.bind_tools([GetWeather, GetPopulation]) - ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?") + model_with_tools = model.bind_tools([GetWeather, GetPopulation]) + ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?") ai_msg.tool_calls ``` @@ -142,15 +142,15 @@ class ChatDeepSeek(BaseChatOpenAI): rating: int | None = Field(description="How funny the joke is, from 1 to 10") - structured_llm = llm.with_structured_output(Joke) - structured_llm.invoke("Tell me a joke about cats") + structured_model = model.with_structured_output(Joke) + structured_model.invoke("Tell me a joke about cats") ``` See `ChatDeepSeek.with_structured_output()` for more. Token usage: ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.usage_metadata ``` ```python @@ -158,7 +158,7 @@ class ChatDeepSeek(BaseChatOpenAI): ``` Response metadata ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.response_metadata ``` """ # noqa: E501 diff --git a/libs/partners/fireworks/langchain_fireworks/chat_models.py b/libs/partners/fireworks/langchain_fireworks/chat_models.py index 52be161d902..9c242b1eaf5 100644 --- a/libs/partners/fireworks/langchain_fireworks/chat_models.py +++ b/libs/partners/fireworks/langchain_fireworks/chat_models.py @@ -748,13 +748,13 @@ class ChatFireworks(BaseChatModel): ) - llm = ChatFireworks( + model = ChatFireworks( model="accounts/fireworks/models/firefunction-v1", temperature=0, ) - structured_llm = llm.with_structured_output(AnswerWithJustification) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) @@ -778,15 +778,15 @@ class ChatFireworks(BaseChatModel): justification: str - llm = ChatFireworks( + model = ChatFireworks( model="accounts/fireworks/models/firefunction-v1", temperature=0, ) - structured_llm = llm.with_structured_output( + structured_model = model.with_structured_output( AnswerWithJustification, include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { @@ -813,13 +813,13 @@ class ChatFireworks(BaseChatModel): ] - llm = ChatFireworks( + model = ChatFireworks( model="accounts/fireworks/models/firefunction-v1", temperature=0, ) - structured_llm = llm.with_structured_output(AnswerWithJustification) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { @@ -849,13 +849,13 @@ class ChatFireworks(BaseChatModel): }, } - llm = ChatFireworks( + model = ChatFireworks( model="accounts/fireworks/models/firefunction-v1", temperature=0, ) - structured_llm = llm.with_structured_output(oai_schema) + structured_model = model.with_structured_output(oai_schema) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { @@ -876,14 +876,14 @@ class ChatFireworks(BaseChatModel): justification: str - llm = ChatFireworks( + model = ChatFireworks( model="accounts/fireworks/models/firefunction-v1", temperature=0 ) - structured_llm = llm.with_structured_output( + structured_model = model.with_structured_output( AnswerWithJustification, method="json_mode", include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'. " "What's heavier a pound of bricks or a pound of feathers?" @@ -898,11 +898,11 @@ class ChatFireworks(BaseChatModel): Example: schema=None, method="json_mode", include_raw=True: ```python - structured_llm = llm.with_structured_output( + structured_model = model.with_structured_output( method="json_mode", include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'. " "What's heavier a pound of bricks or a pound of feathers?" diff --git a/libs/partners/groq/langchain_groq/chat_models.py b/libs/partners/groq/langchain_groq/chat_models.py index 7f4bb628638..64c72fbdd8d 100644 --- a/libs/partners/groq/langchain_groq/chat_models.py +++ b/libs/partners/groq/langchain_groq/chat_models.py @@ -124,7 +124,7 @@ class ChatGroq(BaseChatModel): ```python from langchain_groq import ChatGroq - llm = ChatGroq( + model = ChatGroq( model="llama-3.1-8b-instant", temperature=0.0, max_retries=2, @@ -138,7 +138,7 @@ class ChatGroq(BaseChatModel): ("system", "You are a helpful translator. Translate the user sentence to French."), ("human", "I love programming."), ] - llm.invoke(messages) + model.invoke(messages) ``` ```python AIMessage(content='The English sentence "I love programming" can @@ -155,7 +155,7 @@ class ChatGroq(BaseChatModel): Stream: ```python # Streaming `text` for each content chunk received - for chunk in llm.stream(messages): + for chunk in model.stream(messages): print(chunk.text, end="") ``` @@ -173,7 +173,7 @@ class ChatGroq(BaseChatModel): ```python # Reconstructing a full response - stream = llm.stream(messages) + stream = model.stream(messages) full = next(stream) for chunk in stream: full += chunk @@ -195,7 +195,7 @@ class ChatGroq(BaseChatModel): Async: ```python - await llm.ainvoke(messages) + await model.ainvoke(messages) ``` ```python @@ -228,7 +228,7 @@ class ChatGroq(BaseChatModel): location: str = Field(..., description="The city and state, e.g. San Francisco, CA") - model_with_tools = llm.bind_tools([GetWeather, GetPopulation]) + model_with_tools = model.bind_tools([GetWeather, GetPopulation]) ai_msg = model_with_tools.invoke("What is the population of NY?") ai_msg.tool_calls ``` @@ -260,7 +260,7 @@ class ChatGroq(BaseChatModel): rating: int | None = Field(description="How funny the joke is, from 1 to 10") - structured_model = llm.with_structured_output(Joke) + structured_model = model.with_structured_output(Joke) structured_model.invoke("Tell me a joke about cats") ``` @@ -276,7 +276,7 @@ class ChatGroq(BaseChatModel): Response metadata: ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.response_metadata ``` @@ -921,10 +921,10 @@ class ChatGroq(BaseChatModel): justification: str | None = Field(default=None, description="A justification for the answer.") - llm = ChatGroq(model="openai/gpt-oss-120b", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) + model = ChatGroq(model="openai/gpt-oss-120b", temperature=0) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") # -> AnswerWithJustification( # answer='They weigh the same', @@ -945,13 +945,13 @@ class ChatGroq(BaseChatModel): justification: str - llm = ChatGroq(model="openai/gpt-oss-120b", temperature=0) - structured_llm = llm.with_structured_output( + model = ChatGroq(model="openai/gpt-oss-120b", temperature=0) + structured_model = model.with_structured_output( AnswerWithJustification, include_raw=True, ) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") # -> { # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), @@ -973,10 +973,10 @@ class ChatGroq(BaseChatModel): justification: Annotated[str | None, None, "A justification for the answer."] - llm = ChatGroq(model="openai/gpt-oss-120b", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) + model = ChatGroq(model="openai/gpt-oss-120b", temperature=0) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") # -> { # 'answer': 'They weigh the same', # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' @@ -999,10 +999,10 @@ class ChatGroq(BaseChatModel): 'required': ['answer'] } - llm = ChatGroq(model="openai/gpt-oss-120b", temperature=0) - structured_llm = llm.with_structured_output(oai_schema) + model = ChatGroq(model="openai/gpt-oss-120b", temperature=0) + structured_model = model.with_structured_output(oai_schema) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { @@ -1029,13 +1029,13 @@ class ChatGroq(BaseChatModel): justification: str | None = Field(default=None, description="A justification for the answer.") - llm = ChatGroq(model="openai/gpt-oss-120b", temperature=0) - structured_llm = llm.with_structured_output( + model = ChatGroq(model="openai/gpt-oss-120b", temperature=0) + structured_model = model.with_structured_output( AnswerWithJustification, method="json_schema", ) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") # -> AnswerWithJustification( # answer='They weigh the same', @@ -1054,12 +1054,12 @@ class ChatGroq(BaseChatModel): justification: str - llm = ChatGroq(model="openai/gpt-oss-120b", temperature=0) - structured_llm = llm.with_structured_output( + model = ChatGroq(model="openai/gpt-oss-120b", temperature=0) + structured_model = model.with_structured_output( AnswerWithJustification, method="json_mode", include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n" "What's heavier a pound of bricks or a pound of feathers?" diff --git a/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py b/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py index 2f65ee79c53..fe9f2111855 100644 --- a/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py +++ b/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py @@ -352,7 +352,7 @@ class ChatHuggingFace(BaseChatModel): from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace - llm = HuggingFaceEndpoint( + model = HuggingFaceEndpoint( repo_id="microsoft/Phi-3-mini-4k-instruct", task="text-generation", max_new_tokens=512, @@ -360,7 +360,7 @@ class ChatHuggingFace(BaseChatModel): repetition_penalty=1.03, ) - chat = ChatHuggingFace(llm=llm, verbose=True) + chat = ChatHuggingFace(llm=model, verbose=True) ``` Invoke: diff --git a/libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py b/libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py index 4ee76521c36..1fa41ffed3d 100644 --- a/libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py +++ b/libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py @@ -36,7 +36,7 @@ class HuggingFaceEndpoint(LLM): Example: ```python # Basic Example (no streaming) - llm = HuggingFaceEndpoint( + model = HuggingFaceEndpoint( endpoint_url="http://localhost:8010/", max_new_tokens=512, top_k=10, @@ -46,13 +46,13 @@ class HuggingFaceEndpoint(LLM): repetition_penalty=1.03, huggingfacehub_api_token="my-api-key", ) - print(llm.invoke("What is Deep Learning?")) + print(model.invoke("What is Deep Learning?")) # Streaming response example from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler callbacks = [StreamingStdOutCallbackHandler()] - llm = HuggingFaceEndpoint( + model = HuggingFaceEndpoint( endpoint_url="http://localhost:8010/", max_new_tokens=512, top_k=10, @@ -64,17 +64,17 @@ class HuggingFaceEndpoint(LLM): streaming=True, huggingfacehub_api_token="my-api-key", ) - print(llm.invoke("What is Deep Learning?")) + print(model.invoke("What is Deep Learning?")) # Basic Example (no streaming) with Mistral-Nemo-Base-2407 model using a third-party provider (Novita). - llm = HuggingFaceEndpoint( + model = HuggingFaceEndpoint( repo_id="mistralai/Mistral-Nemo-Base-2407", provider="novita", max_new_tokens=100, do_sample=False, huggingfacehub_api_token="my-api-key", ) - print(llm.invoke("What is Deep Learning?")) + print(model.invoke("What is Deep Learning?")) ``` """ # noqa: E501 diff --git a/libs/partners/mistralai/langchain_mistralai/chat_models.py b/libs/partners/mistralai/langchain_mistralai/chat_models.py index 88e999db270..578f0acdaa5 100644 --- a/libs/partners/mistralai/langchain_mistralai/chat_models.py +++ b/libs/partners/mistralai/langchain_mistralai/chat_models.py @@ -827,10 +827,10 @@ class ChatMistralAI(BaseChatModel): ) - llm = ChatMistralAI(model="mistral-large-latest", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) + model = ChatMistralAI(model="mistral-large-latest", temperature=0) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) @@ -853,12 +853,12 @@ class ChatMistralAI(BaseChatModel): justification: str - llm = ChatMistralAI(model="mistral-large-latest", temperature=0) - structured_llm = llm.with_structured_output( + model = ChatMistralAI(model="mistral-large-latest", temperature=0) + structured_model = model.with_structured_output( AnswerWithJustification, include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { @@ -884,10 +884,10 @@ class ChatMistralAI(BaseChatModel): ] - llm = ChatMistralAI(model="mistral-large-latest", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) + model = ChatMistralAI(model="mistral-large-latest", temperature=0) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { @@ -912,10 +912,10 @@ class ChatMistralAI(BaseChatModel): 'required': ['answer'] } - llm = ChatMistralAI(model="mistral-large-latest", temperature=0) - structured_llm = llm.with_structured_output(oai_schema) + model = ChatMistralAI(model="mistral-large-latest", temperature=0) + structured_model = model.with_structured_output(oai_schema) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { @@ -935,12 +935,12 @@ class ChatMistralAI(BaseChatModel): justification: str - llm = ChatMistralAI(model="mistral-large-latest", temperature=0) - structured_llm = llm.with_structured_output( + model = ChatMistralAI(model="mistral-large-latest", temperature=0) + structured_model = model.with_structured_output( AnswerWithJustification, method="json_mode", include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n" "What's heavier a pound of bricks or a pound of feathers?" @@ -954,11 +954,11 @@ class ChatMistralAI(BaseChatModel): Example: schema=None, method="json_mode", include_raw=True: ```python - structured_llm = llm.with_structured_output( + structured_model = model.with_structured_output( method="json_mode", include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n" "What's heavier a pound of bricks or a pound of feathers?" diff --git a/libs/partners/ollama/langchain_ollama/chat_models.py b/libs/partners/ollama/langchain_ollama/chat_models.py index 2e63b51d310..9eefcb04e3d 100644 --- a/libs/partners/ollama/langchain_ollama/chat_models.py +++ b/libs/partners/ollama/langchain_ollama/chat_models.py @@ -294,7 +294,7 @@ class ChatOllama(BaseChatModel): ```python from langchain_ollama import ChatOllama - llm = ChatOllama( + model = ChatOllama( model="gpt-oss:20b", validate_model_on_init=True, temperature=0.8, @@ -309,7 +309,7 @@ class ChatOllama(BaseChatModel): ("system", "You are a helpful translator. Translate the user sentence to French."), ("human", "I love programming."), ] - llm.invoke(messages) + model.invoke(messages) ``` ```python @@ -318,7 +318,7 @@ class ChatOllama(BaseChatModel): Stream: ```python - for chunk in llm.stream("Return the words Hello World!"): + for chunk in model.stream("Return the words Hello World!"): print(chunk.text, end="") ``` @@ -331,7 +331,7 @@ class ChatOllama(BaseChatModel): ``` ```python - stream = llm.stream(messages) + stream = model.stream(messages) full = next(stream) for chunk in stream: full += chunk @@ -359,7 +359,7 @@ class ChatOllama(BaseChatModel): Async: ```python - await llm.ainvoke("Hello how are you!") + await model.ainvoke("Hello how are you!") ``` ```python @@ -383,7 +383,7 @@ class ChatOllama(BaseChatModel): ``` ```python - async for chunk in llm.astream("Say hello world!"): + async for chunk in model.astream("Say hello world!"): print(chunk.content) ``` @@ -396,7 +396,7 @@ class ChatOllama(BaseChatModel): ```python messages = [("human", "Say hello world!"), ("human", "Say goodbye world!")] - await llm.abatch(messages) + await model.abatch(messages) ``` ```python @@ -440,8 +440,8 @@ class ChatOllama(BaseChatModel): JSON mode: ```python - json_llm = ChatOllama(format="json") - llm.invoke( + json_model = ChatOllama(format="json") + json_model.invoke( "Return a query for the weather in a random location and time of day with two keys: location and time_of_day. " "Respond using JSON only." ).content @@ -495,18 +495,18 @@ class ChatOllama(BaseChatModel): ```python from langchain_ollama import ChatOllama - llm = ChatOllama( + model = ChatOllama( model="deepseek-r1:8b", validate_model_on_init=True, reasoning=True, ) - llm.invoke("how many r in the word strawberry?") + model.invoke("how many r in the word strawberry?") # or, on an invocation basis: - llm.invoke("how many r in the word strawberry?", reasoning=True) - # or llm.stream("how many r in the word strawberry?", reasoning=True) + model.invoke("how many r in the word strawberry?", reasoning=True) + # or model.stream("how many r in the word strawberry?", reasoning=True) # If not provided, the invocation will default to the ChatOllama reasoning # param provided (None by default). @@ -1327,10 +1327,10 @@ class ChatOllama(BaseChatModel): ) - llm = ChatOllama(model="llama3.1", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) + model = ChatOllama(model="llama3.1", temperature=0) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") # -> AnswerWithJustification( # answer='They weigh the same', @@ -1352,13 +1352,13 @@ class ChatOllama(BaseChatModel): justification: str - llm = ChatOllama(model="llama3.1", temperature=0) - structured_llm = llm.with_structured_output( + model = ChatOllama(model="llama3.1", temperature=0) + structured_model = model.with_structured_output( AnswerWithJustification, include_raw=True, ) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") # -> { # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), @@ -1385,13 +1385,13 @@ class ChatOllama(BaseChatModel): ) - llm = ChatOllama(model="llama3.1", temperature=0) - structured_llm = llm.with_structured_output( + model = ChatOllama(model="llama3.1", temperature=0) + structured_model = model.with_structured_output( AnswerWithJustification, method="function_calling", ) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") # -> AnswerWithJustification( # answer='They weigh the same', @@ -1414,10 +1414,10 @@ class ChatOllama(BaseChatModel): justification: Annotated[str | None, None, "A justification for the answer."] - llm = ChatOllama(model="llama3.1", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) + model = ChatOllama(model="llama3.1", temperature=0) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") # -> { # 'answer': 'They weigh the same', # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' @@ -1441,10 +1441,10 @@ class ChatOllama(BaseChatModel): 'required': ['answer'] } - llm = ChatOllama(model="llama3.1", temperature=0) - structured_llm = llm.with_structured_output(oai_schema) + model = ChatOllama(model="llama3.1", temperature=0) + structured_model = model.with_structured_output(oai_schema) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { @@ -1465,12 +1465,12 @@ class ChatOllama(BaseChatModel): justification: str - llm = ChatOllama(model="llama3.1", temperature=0) - structured_llm = llm.with_structured_output( + model = ChatOllama(model="llama3.1", temperature=0) + structured_model = model.with_structured_output( AnswerWithJustification, method="json_mode", include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n" "What's heavier a pound of bricks or a pound of feathers?" diff --git a/libs/partners/ollama/langchain_ollama/llms.py b/libs/partners/ollama/langchain_ollama/llms.py index 45a0a79fece..42b387a381c 100644 --- a/libs/partners/ollama/langchain_ollama/llms.py +++ b/libs/partners/ollama/langchain_ollama/llms.py @@ -66,7 +66,7 @@ class OllamaLLM(BaseLLM): ```python from langchain_ollama import OllamaLLM - llm = OllamaLLM( + model = OllamaLLM( model="llama3.1", temperature=0.7, num_predict=256, @@ -78,7 +78,7 @@ class OllamaLLM(BaseLLM): Invoke: ```python input_text = "The meaning of life is " - response = llm.invoke(input_text) + response = model.invoke(input_text) print(response) ``` ```txt @@ -88,7 +88,7 @@ class OllamaLLM(BaseLLM): Stream: ```python - for chunk in llm.stream(input_text): + for chunk in model.stream(input_text): print(chunk, end="") ``` ```txt @@ -98,10 +98,10 @@ class OllamaLLM(BaseLLM): Async: ```python - response = await llm.ainvoke(input_text) + response = await model.ainvoke(input_text) # stream: - # async for chunk in llm.astream(input_text): + # async for chunk in model.astream(input_text): # print(chunk, end="") ``` """ diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index 602b91f6980..215a4b691d8 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -82,7 +82,7 @@ class AzureChatOpenAI(BaseChatOpenAI): ```python from langchain_openai import AzureChatOpenAI - llm = AzureChatOpenAI( + model = AzureChatOpenAI( azure_deployment="your-deployment", api_version="2024-05-01-preview", temperature=0, @@ -127,7 +127,7 @@ class AzureChatOpenAI(BaseChatOpenAI): ), ("human", "I love programming."), ] - llm.invoke(messages) + model.invoke(messages) ``` ```python @@ -172,7 +172,7 @@ class AzureChatOpenAI(BaseChatOpenAI): Stream: ```python - for chunk in llm.stream(messages): + for chunk in model.stream(messages): print(chunk.text, end="") ``` @@ -200,7 +200,7 @@ class AzureChatOpenAI(BaseChatOpenAI): ``` ```python - stream = llm.stream(messages) + stream = model.stream(messages) full = next(stream) for chunk in stream: full += chunk @@ -221,13 +221,13 @@ class AzureChatOpenAI(BaseChatOpenAI): Async: ```python - await llm.ainvoke(messages) + await model.ainvoke(messages) # stream: - # async for chunk in (await llm.astream(messages)) + # async for chunk in (await model.astream(messages)) # batch: - # await llm.abatch([messages]) + # await model.abatch([messages]) ``` Tool calling: @@ -251,8 +251,8 @@ class AzureChatOpenAI(BaseChatOpenAI): ) - llm_with_tools = llm.bind_tools([GetWeather, GetPopulation]) - ai_msg = llm_with_tools.invoke( + model_with_tools = model.bind_tools([GetWeather, GetPopulation]) + ai_msg = model_with_tools.invoke( "Which city is hotter today and which is bigger: LA or NY?" ) ai_msg.tool_calls @@ -300,8 +300,8 @@ class AzureChatOpenAI(BaseChatOpenAI): ) - structured_llm = llm.with_structured_output(Joke) - structured_llm.invoke("Tell me a joke about cats") + structured_model = model.with_structured_output(Joke) + structured_model.invoke("Tell me a joke about cats") ``` ```python @@ -316,8 +316,8 @@ class AzureChatOpenAI(BaseChatOpenAI): JSON mode: ```python - json_llm = llm.bind(response_format={"type": "json_object"}) - ai_msg = json_llm.invoke( + json_model = model.bind(response_format={"type": "json_object"}) + ai_msg = json_model.invoke( "Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]" ) ai_msg.content @@ -344,7 +344,7 @@ class AzureChatOpenAI(BaseChatOpenAI): }, ] ) - ai_msg = llm.invoke([message]) + ai_msg = model.invoke([message]) ai_msg.content ``` @@ -354,7 +354,7 @@ class AzureChatOpenAI(BaseChatOpenAI): Token usage: ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.usage_metadata ``` @@ -363,8 +363,8 @@ class AzureChatOpenAI(BaseChatOpenAI): ``` Logprobs: ```python - logprobs_llm = llm.bind(logprobs=True) - ai_msg = logprobs_llm.invoke(messages) + logprobs_model = model.bind(logprobs=True) + ai_msg = logprobs_model.invoke(messages) ai_msg.response_metadata["logprobs"] ``` @@ -422,7 +422,7 @@ class AzureChatOpenAI(BaseChatOpenAI): Response metadata ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.response_metadata ``` @@ -911,16 +911,16 @@ class AzureChatOpenAI(BaseChatOpenAI): \"\"\"Get weather at a location.\"\"\" pass - llm = init_chat_model("openai:gpt-4o-mini") + model = init_chat_model("openai:gpt-4o-mini") - structured_llm = llm.with_structured_output( + structured_model = model.with_structured_output( ResponseSchema, tools=[get_weather], strict=True, include_raw=True, ) - structured_llm.invoke("What's the weather in Boston?") + structured_model.invoke("What's the weather in Boston?") ``` ```python @@ -986,10 +986,12 @@ class AzureChatOpenAI(BaseChatOpenAI): ) - llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) + model = AzureChatOpenAI( + azure_deployment="...", model="gpt-4o", temperature=0 + ) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) @@ -1017,12 +1019,14 @@ class AzureChatOpenAI(BaseChatOpenAI): ) - llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0) - structured_llm = llm.with_structured_output( + model = AzureChatOpenAI( + azure_deployment="...", model="gpt-4o", temperature=0 + ) + structured_model = model.with_structured_output( AnswerWithJustification, method="function_calling" ) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) @@ -1046,12 +1050,14 @@ class AzureChatOpenAI(BaseChatOpenAI): justification: str - llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0) - structured_llm = llm.with_structured_output( + model = AzureChatOpenAI( + azure_deployment="...", model="gpt-4o", temperature=0 + ) + structured_model = model.with_structured_output( AnswerWithJustification, include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { @@ -1078,10 +1084,12 @@ class AzureChatOpenAI(BaseChatOpenAI): ] - llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) + model = AzureChatOpenAI( + azure_deployment="...", model="gpt-4o", temperature=0 + ) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { @@ -1107,14 +1115,14 @@ class AzureChatOpenAI(BaseChatOpenAI): 'required': ['answer'] } - llm = AzureChatOpenAI( + model = AzureChatOpenAI( azure_deployment="...", model="gpt-4o", temperature=0, ) - structured_llm = llm.with_structured_output(oai_schema) + structured_model = model.with_structured_output(oai_schema) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { @@ -1135,16 +1143,16 @@ class AzureChatOpenAI(BaseChatOpenAI): justification: str - llm = AzureChatOpenAI( + model = AzureChatOpenAI( azure_deployment="...", model="gpt-4o", temperature=0, ) - structured_llm = llm.with_structured_output( + structured_model = model.with_structured_output( AnswerWithJustification, method="json_mode", include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n" "What's heavier a pound of bricks or a pound of feathers?" @@ -1159,11 +1167,11 @@ class AzureChatOpenAI(BaseChatOpenAI): ??? note "Example: `schema=None`, `method='json_mode'`, `include_raw=True`" ```python - structured_llm = llm.with_structured_output( + structured_model = model.with_structured_output( method="json_mode", include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n" "What's heavier a pound of bricks or a pound of feathers?" diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 8fffd34e6ae..20bbeac10f5 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -654,11 +654,11 @@ class BaseChatOpenAI(BaseChatModel): For example, the following two are equivalent: ```python - llm = ChatOpenAI( + model = ChatOpenAI( model="...", use_previous_response_id=True, ) - llm.invoke( + model.invoke( [ HumanMessage("Hello"), AIMessage("Hi there!", response_metadata={"id": "resp_123"}), @@ -668,8 +668,8 @@ class BaseChatOpenAI(BaseChatModel): ``` ```python - llm = ChatOpenAI(model="...", use_responses_api=True) - llm.invoke([HumanMessage("How are you?")], previous_response_id="resp_123") + model = ChatOpenAI(model="...", use_responses_api=True) + model.invoke([HumanMessage("How are you?")], previous_response_id="resp_123") ``` !!! version-added "Added in version 0.3.26" @@ -1801,16 +1801,16 @@ class BaseChatOpenAI(BaseChatModel): \"\"\"Get weather at a location.\"\"\" pass - llm = init_chat_model("openai:gpt-4o-mini") + model = init_chat_model("openai:gpt-4o-mini") - structured_llm = llm.with_structured_output( + structured_model = model.with_structured_output( ResponseSchema, tools=[get_weather], strict=True, include_raw=True, ) - structured_llm.invoke("What's the weather in Boston?") + structured_model.invoke("What's the weather in Boston?") ``` ```python @@ -2057,7 +2057,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ```python from langchain_openai import ChatOpenAI - llm = ChatOpenAI( + model = ChatOpenAI( model="...", temperature=0, max_tokens=None, @@ -2104,7 +2104,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ), ("human", "I love programming."), ] - llm.invoke(messages) + model.invoke(messages) ``` Results in an `AIMessage` response: @@ -2133,7 +2133,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] Stream a response from the model: ```python - for chunk in llm.stream(messages): + for chunk in model.stream(messages): print(chunk.text, end="") ``` @@ -2158,7 +2158,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] To collect the full message, you can concatenate the chunks: ```python - stream = llm.stream(messages) + stream = model.stream(messages) full = next(stream) for chunk in stream: full += chunk @@ -2178,13 +2178,13 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ```python # Invoke - await llm.ainvoke(messages) + await model.ainvoke(messages) # Stream - async for chunk in (await llm.astream(messages)) + async for chunk in (await model.astream(messages)) # Batch - await llm.abatch([messages]) + await model.abatch([messages]) ``` Results in an `AIMessage` response: @@ -2236,11 +2236,11 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ) - llm_with_tools = llm.bind_tools( + model_with_tools = model.bind_tools( [GetWeather, GetPopulation] # strict = True # Enforce tool args schema is respected ) - ai_msg = llm_with_tools.invoke( + ai_msg = model_with_tools.invoke( "Which city is hotter today and which is bigger: LA or NY?" ) ai_msg.tool_calls @@ -2277,7 +2277,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] be set to `False` to disable parallel tool calls: ```python - ai_msg = llm_with_tools.invoke( + ai_msg = model_with_tools.invoke( "What is the weather in LA and NY?", parallel_tool_calls=False ) ai_msg.tool_calls @@ -2294,7 +2294,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ``` Like other runtime parameters, `parallel_tool_calls` can be bound to a model - using `llm.bind(parallel_tool_calls=False)` or during instantiation by + using `model.bind(parallel_tool_calls=False)` or during instantiation by setting `model_kwargs`. See `bind_tools` for more. @@ -2308,12 +2308,12 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ```python from langchain_openai import ChatOpenAI - llm = ChatOpenAI(model="...", output_version="responses/v1") + model = ChatOpenAI(model="...", output_version="responses/v1") tool = {"type": "web_search"} - llm_with_tools = llm.bind_tools([tool]) + model_with_tools = model.bind_tools([tool]) - response = llm_with_tools.invoke("What was a positive news story from today?") + response = model_with_tools.invoke("What was a positive news story from today?") response.content ``` @@ -2354,12 +2354,12 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ```python from langchain_openai import ChatOpenAI - llm = ChatOpenAI( + model = ChatOpenAI( model="...", use_responses_api=True, output_version="responses/v1", ) - response = llm.invoke("Hi, I'm Bob.") + response = model.invoke("Hi, I'm Bob.") response.text ``` @@ -2368,7 +2368,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ``` ```python - second_response = llm.invoke( + second_response = model.invoke( "What is my name?", previous_response_id=response.response_metadata["id"], ) @@ -2388,7 +2388,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] recent response. ```python - llm = ChatOpenAI(model="...", use_previous_response_id=True) + model = ChatOpenAI(model="...", use_previous_response_id=True) ``` ??? info "Reasoning output" @@ -2404,10 +2404,10 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] "summary": "auto", # 'detailed', 'auto', or None } - llm = ChatOpenAI( + model = ChatOpenAI( model="...", reasoning=reasoning, output_version="responses/v1" ) - response = llm.invoke("What is 3^3?") + response = model.invoke("What is 3^3?") # Response text print(f"Output: {response.text}") @@ -2448,8 +2448,8 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ) - structured_llm = llm.with_structured_output(Joke) - structured_llm.invoke("Tell me a joke about cats") + structured_model = model.with_structured_output(Joke) + structured_model.invoke("Tell me a joke about cats") ``` ```python @@ -2465,8 +2465,8 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ??? info "JSON mode" ```python - json_llm = llm.bind(response_format={"type": "json_object"}) - ai_msg = json_llm.invoke( + json_model = model.bind(response_format={"type": "json_object"}) + ai_msg = json_model.invoke( "Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]" ) ai_msg.content @@ -2495,7 +2495,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ] ) - ai_msg = llm.invoke([message]) + ai_msg = model.invoke([message]) ai_msg.content ``` @@ -2506,7 +2506,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ??? info "Token usage" ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.usage_metadata ```txt @@ -2516,7 +2516,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] When streaming, set the `stream_usage` kwarg: ```python - stream = llm.stream(messages, stream_usage=True) + stream = model.stream(messages, stream_usage=True) full = next(stream) for chunk in stream: full += chunk @@ -2530,8 +2530,8 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ??? info "Logprobs" ```python - logprobs_llm = llm.bind(logprobs=True) - ai_msg = logprobs_llm.invoke(messages) + logprobs_model = model.bind(logprobs=True) + ai_msg = logprobs_model.invoke(messages) ai_msg.response_metadata["logprobs"] ``` @@ -2590,7 +2590,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ??? info "Response metadata" ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.response_metadata ``` @@ -2621,7 +2621,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ```python from langchain_openai import ChatOpenAI - llm = ChatOpenAI(model="...", service_tier="flex") + model = ChatOpenAI(model="...", service_tier="flex") ``` Note that this is a beta feature that is only available for a subset of models. @@ -2639,7 +2639,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ```python title="LM Studio example with TTL (auto-eviction)" from langchain_openai import ChatOpenAI - llm = ChatOpenAI( + model = ChatOpenAI( base_url="http://localhost:1234/v1", api_key="lm-studio", # Can be any string model="mlx-community/QwQ-32B-4bit", @@ -2649,7 +2649,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ``` ```python title="vLLM example with custom parameters" - llm = ChatOpenAI( + model = ChatOpenAI( base_url="http://localhost:8000/v1", api_key="EMPTY", model="meta-llama/Llama-2-7b-chat-hf", @@ -2669,7 +2669,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ```python # Standard OpenAI parameters - llm = ChatOpenAI( + model = ChatOpenAI( model="...", model_kwargs={ "stream_options": {"include_usage": True}, @@ -2689,7 +2689,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ```python # Custom provider parameters - llm = ChatOpenAI( + model = ChatOpenAI( base_url="http://localhost:8000/v1", model="custom-model", extra_body={ @@ -2715,19 +2715,19 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] per-invocation to improve cache hit rates and reduce costs: ```python - llm = ChatOpenAI(model="...") + model = ChatOpenAI(model="...") - response = llm.invoke( + response = model.invoke( messages, prompt_cache_key="example-key-a", # Routes to same machine for cache hits ) - customer_response = llm.invoke(messages, prompt_cache_key="example-key-b") - support_response = llm.invoke(messages, prompt_cache_key="example-key-c") + customer_response = model.invoke(messages, prompt_cache_key="example-key-b") + support_response = model.invoke(messages, prompt_cache_key="example-key-c") # Dynamic cache keys based on context cache_key = f"example-key-{dynamic_suffix}" - response = llm.invoke(messages, prompt_cache_key=cache_key) + response = model.invoke(messages, prompt_cache_key=cache_key) ``` Cache keys help ensure requests with the same prompt prefix are routed to @@ -2905,16 +2905,16 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] \"\"\"Get weather at a location.\"\"\" pass - llm = init_chat_model("openai:gpt-4o-mini") + model = init_chat_model("openai:gpt-4o-mini") - structured_llm = llm.with_structured_output( + structured_model = model.with_structured_output( ResponseSchema, tools=[get_weather], strict=True, include_raw=True, ) - structured_llm.invoke("What's the weather in Boston?") + structured_model.invoke("What's the weather in Boston?") ``` ```python @@ -2971,10 +2971,10 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ) - llm = ChatOpenAI(model="...", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) + model = ChatOpenAI(model="...", temperature=0) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) ``` @@ -3002,12 +3002,12 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ) - llm = ChatOpenAI(model="...", temperature=0) - structured_llm = llm.with_structured_output( + model = ChatOpenAI(model="...", temperature=0) + structured_model = model.with_structured_output( AnswerWithJustification, method="function_calling" ) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) ``` @@ -3033,12 +3033,12 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] justification: str - llm = ChatOpenAI(model="...", temperature=0) - structured_llm = llm.with_structured_output( + model = ChatOpenAI(model="...", temperature=0) + structured_model = model.with_structured_output( AnswerWithJustification, include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) ``` @@ -3085,10 +3085,10 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ] - llm = ChatOpenAI(model="...", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) + model = ChatOpenAI(model="...", temperature=0) + structured_model = model.with_structured_output(AnswerWithJustification) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) ``` @@ -3121,10 +3121,10 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] }, } - llm = ChatOpenAI(model="...", temperature=0) - structured_llm = llm.with_structured_output(oai_schema) + model = ChatOpenAI(model="...", temperature=0) + structured_model = model.with_structured_output(oai_schema) - structured_llm.invoke( + structured_model.invoke( "What weighs more a pound of bricks or a pound of feathers" ) ``` @@ -3148,12 +3148,12 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] justification: str - llm = ChatOpenAI(model="...", temperature=0) - structured_llm = llm.with_structured_output( + model = ChatOpenAI(model="...", temperature=0) + structured_model = model.with_structured_output( AnswerWithJustification, method="json_mode", include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n" "What's heavier a pound of bricks or a pound of feathers?" @@ -3176,11 +3176,11 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] ??? note "Example: `schema=None`, `method='json_mode'`, `include_raw=True`" ```python - structured_llm = llm.with_structured_output( + structured_model = model.with_structured_output( method="json_mode", include_raw=True ) - structured_llm.invoke( + structured_model.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n" "What's heavier a pound of bricks or a pound of feathers?" diff --git a/libs/partners/openai/langchain_openai/llms/base.py b/libs/partners/openai/langchain_openai/llms/base.py index 1668854bba2..b835526552b 100644 --- a/libs/partners/openai/langchain_openai/llms/base.py +++ b/libs/partners/openai/langchain_openai/llms/base.py @@ -110,7 +110,7 @@ class BaseOpenAI(BaseLLM): ```python from langchain_openai.llms.base import BaseOpenAI - llm = BaseOpenAI( + model = BaseOpenAI( model_name="gpt-3.5-turbo-instruct", temperature=0.7, max_tokens=256, @@ -127,7 +127,7 @@ class BaseOpenAI(BaseLLM): Invoke: ```python input_text = "The meaning of life is " - response = llm.invoke(input_text) + response = model.invoke(input_text) print(response) ``` @@ -138,7 +138,7 @@ class BaseOpenAI(BaseLLM): Stream: ```python - for chunk in llm.stream(input_text): + for chunk in model.stream(input_text): print(chunk, end="") ``` ```txt @@ -148,14 +148,14 @@ class BaseOpenAI(BaseLLM): Async: ```python - response = await llm.ainvoke(input_text) + response = await model.ainvoke(input_text) # stream: - # async for chunk in llm.astream(input_text): + # async for chunk in model.astream(input_text): # print(chunk, end="") # batch: - # await llm.abatch([input_text]) + # await model.abatch([input_text]) ``` ``` "a philosophical question that has been debated by thinkers and @@ -739,7 +739,7 @@ class OpenAI(BaseOpenAI): ```python from langchain_openai import OpenAI - llm = OpenAI( + model = OpenAI( model="gpt-3.5-turbo-instruct", temperature=0, max_retries=2, @@ -753,7 +753,7 @@ class OpenAI(BaseOpenAI): Invoke: ```python input_text = "The meaning of life is " - llm.invoke(input_text) + model.invoke(input_text) ``` ```txt "a philosophical question that has been debated by thinkers and scholars for centuries." @@ -761,7 +761,7 @@ class OpenAI(BaseOpenAI): Stream: ```python - for chunk in llm.stream(input_text): + for chunk in model.stream(input_text): print(chunk, end="|") ``` ```txt @@ -769,7 +769,7 @@ class OpenAI(BaseOpenAI): ``` ```python - "".join(llm.stream(input_text)) + "".join(model.stream(input_text)) ``` ```txt "a philosophical question that has been debated by thinkers and scholars for centuries." @@ -777,14 +777,14 @@ class OpenAI(BaseOpenAI): Async: ```python - await llm.ainvoke(input_text) + await model.ainvoke(input_text) # stream: - # async for chunk in (await llm.astream(input_text)): + # async for chunk in (await model.astream(input_text)): # print(chunk) # batch: - # await llm.abatch([input_text]) + # await model.abatch([input_text]) ``` ```txt "a philosophical question that has been debated by thinkers and scholars for centuries." diff --git a/libs/partners/openai/langchain_openai/tools/custom_tool.py b/libs/partners/openai/langchain_openai/tools/custom_tool.py index 0724d56277c..bf8b1ad1bcf 100644 --- a/libs/partners/openai/langchain_openai/tools/custom_tool.py +++ b/libs/partners/openai/langchain_openai/tools/custom_tool.py @@ -38,9 +38,9 @@ def custom_tool(*args: Any, **kwargs: Any) -> Any: return "27" - llm = ChatOpenAI(model="gpt-5", output_version="responses/v1") + model = ChatOpenAI(model="gpt-5", output_version="responses/v1") - agent = create_react_agent(llm, [execute_code]) + agent = create_react_agent(model, [execute_code]) input_message = {"role": "user", "content": "Use the tool to calculate 3^3."} for step in agent.stream( @@ -79,9 +79,9 @@ def custom_tool(*args: Any, **kwargs: Any) -> Any: return "27" - llm = ChatOpenAI(model="gpt-5", output_version="responses/v1") + model = ChatOpenAI(model="gpt-5", output_version="responses/v1") - agent = create_react_agent(llm, [do_math]) + agent = create_react_agent(model, [do_math]) input_message = {"role": "user", "content": "Use the tool to calculate 3^3."} for step in agent.stream( diff --git a/libs/partners/perplexity/langchain_perplexity/chat_models.py b/libs/partners/perplexity/langchain_perplexity/chat_models.py index 3cd1b1a76d7..7566792fe9e 100644 --- a/libs/partners/perplexity/langchain_perplexity/chat_models.py +++ b/libs/partners/perplexity/langchain_perplexity/chat_models.py @@ -96,14 +96,14 @@ class ChatPerplexity(BaseChatModel): ```python from langchain_perplexity import ChatPerplexity - llm = ChatPerplexity(model="sonar", temperature=0.7) + model = ChatPerplexity(model="sonar", temperature=0.7) ``` Invoke: ```python messages = [("system", "You are a chatbot."), ("user", "Hello!")] - llm.invoke(messages) + model.invoke(messages) ``` Invoke with structured output: @@ -117,31 +117,31 @@ class ChatPerplexity(BaseChatModel): content: str - llm.with_structured_output(StructuredOutput) - llm.invoke(messages) + model.with_structured_output(StructuredOutput) + model.invoke(messages) ``` Invoke with perplexity-specific params: ```python - llm.invoke(messages, extra_body={"search_recency_filter": "week"}) + model.invoke(messages, extra_body={"search_recency_filter": "week"}) ``` Stream: ```python - for chunk in llm.stream(messages): + for chunk in model.stream(messages): print(chunk.content) ``` Token usage: ```python - response = llm.invoke(messages) + response = model.invoke(messages) response.usage_metadata ``` Response metadata: ```python - response = llm.invoke(messages) + response = model.invoke(messages) response.response_metadata ``` """ # noqa: E501 diff --git a/libs/partners/xai/langchain_xai/chat_models.py b/libs/partners/xai/langchain_xai/chat_models.py index 2f0224bfa21..5bfd2fb62d7 100644 --- a/libs/partners/xai/langchain_xai/chat_models.py +++ b/libs/partners/xai/langchain_xai/chat_models.py @@ -62,7 +62,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] ```python from langchain_xai import ChatXAI - llm = ChatXAI( + model = ChatXAI( model="grok-4", temperature=0, max_tokens=None, @@ -82,7 +82,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] ), ("human", "I love programming."), ] - llm.invoke(messages) + model.invoke(messages) ``` ```python @@ -110,7 +110,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] Stream: ```python - for chunk in llm.stream(messages): + for chunk in model.stream(messages): print(chunk.text, end="") ``` @@ -129,13 +129,13 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] Async: ```python - await llm.ainvoke(messages) + await model.ainvoke(messages) # stream: - # async for chunk in (await llm.astream(messages)) + # async for chunk in (await model.astream(messages)) # batch: - # await llm.abatch([messages]) + # await model.abatch([messages]) ``` ```python @@ -192,7 +192,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] ```python from pydantic import BaseModel, Field - llm = ChatXAI(model="grok-4") + model = ChatXAI(model="grok-4") class GetWeather(BaseModel): @@ -207,8 +207,8 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] location: str = Field(..., description="The city and state, e.g. San Francisco, CA") - llm_with_tools = llm.bind_tools([GetWeather, GetPopulation]) - ai_msg = llm_with_tools.invoke("Which city is bigger: LA or NY?") + model_with_tools = model.bind_tools([GetWeather, GetPopulation]) + ai_msg = model_with_tools.invoke("Which city is bigger: LA or NY?") ai_msg.tool_calls ``` @@ -237,12 +237,12 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] constructor's `extra_body` argument. For example, to disable tool / function calling: ```python - llm = ChatXAI(model="grok-4", extra_body={"tool_choice": "none"}) + model = ChatXAI(model="grok-4", extra_body={"tool_choice": "none"}) ``` To require that the model always calls a tool / function, set `tool_choice` to `'required'`: ```python - llm = ChatXAI(model="grok-4", extra_body={"tool_choice": "required"}) + model = ChatXAI(model="grok-4", extra_body={"tool_choice": "required"}) ``` To specify a tool / function to call, set `tool_choice` to the name of the tool / function: @@ -250,7 +250,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] ```python from pydantic import BaseModel, Field - llm = ChatXAI( + model = ChatXAI( model="grok-4", extra_body={ "tool_choice": {"type": "function", "function": {"name": "GetWeather"}} @@ -269,8 +269,8 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] location: str = Field(..., description='The city and state, e.g. San Francisco, CA') - llm_with_tools = llm.bind_tools([GetWeather, GetPopulation]) - ai_msg = llm_with_tools.invoke( + model_with_tools = model.bind_tools([GetWeather, GetPopulation]) + ai_msg = model_with_tools.invoke( "Which city is bigger: LA or NY?", ) ai_msg.tool_calls @@ -309,8 +309,8 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] rating: int | None = Field(description="How funny the joke is, from 1 to 10") - structured_llm = llm.with_structured_output(Joke) - structured_llm.invoke("Tell me a joke about cats") + structured_model = model.with_structured_output(Joke) + structured_model.invoke("Tell me a joke about cats") ``` ```python @@ -328,7 +328,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] ```python from langchain_xai import ChatXAI - llm = ChatXAI( + model = ChatXAI( model="grok-4", search_parameters={ "mode": "auto", @@ -339,7 +339,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] }, ) - llm.invoke("Provide me a digest of world news in the last 24 hours.") + model.invoke("Provide me a digest of world news in the last 24 hours.") ``` !!! note @@ -348,7 +348,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] Token usage: ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.usage_metadata ``` @@ -358,9 +358,9 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] Logprobs: ```python - logprobs_llm = llm.bind(logprobs=True) + logprobs_model = model.bind(logprobs=True) messages = [("human", "Say Hello World! Do not return anything else.")] - ai_msg = logprobs_llm.invoke(messages) + ai_msg = logprobs_model.invoke(messages) ai_msg.response_metadata["logprobs"] ``` @@ -375,7 +375,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] Response metadata ```python - ai_msg = llm.invoke(messages) + ai_msg = model.invoke(messages) ai_msg.response_metadata ``` diff --git a/libs/standard-tests/langchain_tests/integration_tests/chat_models.py b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py index e20085e1306..07303e5a61f 100644 --- a/libs/standard-tests/langchain_tests/integration_tests/chat_models.py +++ b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py @@ -1498,8 +1498,8 @@ class ChatModelIntegrationTests(ChatModelTests): prompt = ChatPromptTemplate.from_messages( [("human", "Hello. Please respond in the style of {answer_style}.")] ) - llm = GenericFakeChatModel(messages=iter(["hello matey"])) - chain = prompt | llm | StrOutputParser() + model = GenericFakeChatModel(messages=iter(["hello matey"])) + chain = prompt | model | StrOutputParser() tool_ = chain.as_tool( name="greeting_generator", description="Generate a greeting in a particular style of speaking.",