mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-16 06:53:16 +00:00
patch: remove usage of llm, chat model __call__ (#20788)
- `llm(prompt)` -> `llm.invoke(prompt)` - `llm(prompt=prompt` -> `llm.invoke(prompt)` (same with `messages=`) - `llm(prompt, callbacks=callbacks)` -> `llm.invoke(prompt, config={"callbacks": callbacks})` - `llm(prompt, **kwargs)` -> `llm.invoke(prompt, **kwargs)`
This commit is contained in:
@@ -194,7 +194,7 @@
|
||||
"llm = OpenAI(\n",
|
||||
" temperature=0, callbacks=[LabelStudioCallbackHandler(project_name=\"My Project\")]\n",
|
||||
")\n",
|
||||
"print(llm(\"Tell me a joke\"))"
|
||||
"print(llm.invoke(\"Tell me a joke\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -270,7 +270,7 @@
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"llm_results = chat_llm(\n",
|
||||
"llm_results = chat_llm.invoke(\n",
|
||||
" [\n",
|
||||
" SystemMessage(content=\"Always use a lot of emojis\"),\n",
|
||||
" HumanMessage(content=\"Tell me a joke\"),\n",
|
||||
|
@@ -107,7 +107,7 @@ User tracking allows you to identify your users, track their cost, conversations
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler, identify
|
||||
|
||||
with identify("user-123"):
|
||||
llm("Tell me a joke")
|
||||
llm.invoke("Tell me a joke")
|
||||
|
||||
with identify("user-456", user_props={"email": "user456@test.com"}):
|
||||
agen.run("Who is Leo DiCaprio's girlfriend?")
|
||||
|
@@ -103,7 +103,7 @@
|
||||
" temperature=0,\n",
|
||||
" callbacks=[PromptLayerCallbackHandler(pl_tags=[\"chatopenai\"])],\n",
|
||||
")\n",
|
||||
"llm_results = chat_llm(\n",
|
||||
"llm_results = chat_llm.invoke(\n",
|
||||
" [\n",
|
||||
" HumanMessage(content=\"What comes after 1,2,3 ?\"),\n",
|
||||
" HumanMessage(content=\"Tell me another joke?\"),\n",
|
||||
@@ -129,10 +129,11 @@
|
||||
"from langchain_community.llms import GPT4All\n",
|
||||
"\n",
|
||||
"model = GPT4All(model=\"./models/gpt4all-model.bin\", n_ctx=512, n_threads=8)\n",
|
||||
"callbacks = [PromptLayerCallbackHandler(pl_tags=[\"langchain\", \"gpt4all\"])]\n",
|
||||
"\n",
|
||||
"response = model(\n",
|
||||
"response = model.invoke(\n",
|
||||
" \"Once upon a time, \",\n",
|
||||
" callbacks=[PromptLayerCallbackHandler(pl_tags=[\"langchain\", \"gpt4all\"])],\n",
|
||||
" config={\"callbacks\": callbacks},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -181,7 +182,7 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"example_prompt = promptlayer.prompts.get(\"example\", version=1, langchain=True)\n",
|
||||
"openai_llm(example_prompt.format(product=\"toasters\"))"
|
||||
"openai_llm.invoke(example_prompt.format(product=\"toasters\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -315,7 +315,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_res = chat_llm(\n",
|
||||
"chat_res = chat_llm.invoke(\n",
|
||||
" [\n",
|
||||
" SystemMessage(content=\"Every answer of yours must be about OpenAI.\"),\n",
|
||||
" HumanMessage(content=\"Tell me a joke\"),\n",
|
||||
|
@@ -72,7 +72,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output = chat([HumanMessage(content=\"write a funny joke\")])\n",
|
||||
"output = chat.invoke([HumanMessage(content=\"write a funny joke\")])\n",
|
||||
"print(\"output:\", output)"
|
||||
]
|
||||
},
|
||||
@@ -90,7 +90,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kwargs = {\"temperature\": 0.8, \"top_p\": 0.8, \"top_k\": 5}\n",
|
||||
"output = chat([HumanMessage(content=\"write a funny joke\")], **kwargs)\n",
|
||||
"output = chat.invoke([HumanMessage(content=\"write a funny joke\")], **kwargs)\n",
|
||||
"print(\"output:\", output)"
|
||||
]
|
||||
},
|
||||
|
@@ -62,7 +62,7 @@
|
||||
"messages = [system_message, user_message]\n",
|
||||
"\n",
|
||||
"# chat with wasm-chat service\n",
|
||||
"response = chat(messages)\n",
|
||||
"response = chat.invoke(messages)\n",
|
||||
"\n",
|
||||
"print(f\"[Bot] {response.content}\")"
|
||||
]
|
||||
|
@@ -119,7 +119,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = chat(messages)\n",
|
||||
"response = chat.invoke(messages)\n",
|
||||
"print(response.content) # Displays the AI-generated poem"
|
||||
]
|
||||
},
|
||||
|
@@ -147,7 +147,7 @@
|
||||
"\n",
|
||||
"@ray.remote(num_cpus=0.1)\n",
|
||||
"def send_query(llm, prompt):\n",
|
||||
" resp = llm(prompt)\n",
|
||||
" resp = llm.invoke(prompt)\n",
|
||||
" return resp\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
@@ -96,7 +96,7 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\n",
|
||||
" llm(\n",
|
||||
" llm.invoke(\n",
|
||||
" '<|system|>Enter RP mode. You are Ayumu \"Osaka\" Kasuga.<|user|>Hey Osaka. Tell me about yourself.<|model|>'\n",
|
||||
" )\n",
|
||||
")"
|
||||
|
@@ -45,7 +45,7 @@
|
||||
"# Load the model\n",
|
||||
"llm = BaichuanLLM()\n",
|
||||
"\n",
|
||||
"res = llm(\"What's your name?\")\n",
|
||||
"res = llm.invoke(\"What's your name?\")\n",
|
||||
"print(res)"
|
||||
]
|
||||
},
|
||||
|
@@ -80,7 +80,7 @@
|
||||
"os.environ[\"QIANFAN_SK\"] = \"your_sk\"\n",
|
||||
"\n",
|
||||
"llm = QianfanLLMEndpoint(streaming=True)\n",
|
||||
"res = llm(\"hi\")\n",
|
||||
"res = llm.invoke(\"hi\")\n",
|
||||
"print(res)"
|
||||
]
|
||||
},
|
||||
@@ -185,7 +185,7 @@
|
||||
" model=\"ERNIE-Bot-turbo\",\n",
|
||||
" endpoint=\"eb-instant\",\n",
|
||||
")\n",
|
||||
"res = llm(\"hi\")"
|
||||
"res = llm.invoke(\"hi\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -62,7 +62,7 @@
|
||||
" } \"\"\"\n",
|
||||
"\n",
|
||||
"multi_response_llm = NIBittensorLLM(top_responses=10)\n",
|
||||
"multi_resp = multi_response_llm(\"What is Neural Network Feeding Mechanism?\")\n",
|
||||
"multi_resp = multi_response_llm.invoke(\"What is Neural Network Feeding Mechanism?\")\n",
|
||||
"json_multi_resp = json.loads(multi_resp)\n",
|
||||
"pprint(json_multi_resp)"
|
||||
]
|
||||
|
@@ -62,7 +62,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(llm(\"AI is going to\"))"
|
||||
"print(llm.invoke(\"AI is going to\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -85,7 +85,7 @@
|
||||
" model=\"marella/gpt-2-ggml\", callbacks=[StreamingStdOutCallbackHandler()]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response = llm(\"AI is going to\")"
|
||||
"response = llm.invoke(\"AI is going to\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -97,7 +97,7 @@
|
||||
],
|
||||
"source": [
|
||||
"print(\n",
|
||||
" llm(\n",
|
||||
" llm.invoke(\n",
|
||||
" \"He presented me with plausible evidence for the existence of unicorns: \",\n",
|
||||
" max_length=256,\n",
|
||||
" sampling_topk=50,\n",
|
||||
|
@@ -32,7 +32,7 @@
|
||||
" model=\"zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(llm(\"def fib():\"))"
|
||||
"print(llm.invoke(\"def fib():\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -203,7 +203,7 @@
|
||||
"User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?\n",
|
||||
"Assistant:\n",
|
||||
"\"\"\"\n",
|
||||
"print(llm(prompt))"
|
||||
"print(llm.invoke(prompt))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -359,7 +359,7 @@
|
||||
"}\n",
|
||||
"message = HumanMessage(content=[text_message, image_message])\n",
|
||||
"\n",
|
||||
"output = llm([message])\n",
|
||||
"output = llm.invoke([message])\n",
|
||||
"print(output.content)"
|
||||
]
|
||||
},
|
||||
@@ -432,7 +432,7 @@
|
||||
"}\n",
|
||||
"message = HumanMessage(content=[text_message, image_message])\n",
|
||||
"\n",
|
||||
"output = llm([message])\n",
|
||||
"output = llm.invoke([message])\n",
|
||||
"print(output.content)"
|
||||
]
|
||||
},
|
||||
@@ -457,7 +457,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"message2 = HumanMessage(content=\"And where the image is taken?\")\n",
|
||||
"output2 = llm([message, output, message2])\n",
|
||||
"output2 = llm.invoke([message, output, message2])\n",
|
||||
"print(output2.content)"
|
||||
]
|
||||
},
|
||||
@@ -486,7 +486,7 @@
|
||||
"}\n",
|
||||
"message = HumanMessage(content=[text_message, image_message])\n",
|
||||
"\n",
|
||||
"output = llm([message])\n",
|
||||
"output = llm.invoke([message])\n",
|
||||
"print(output.content)"
|
||||
]
|
||||
},
|
||||
|
@@ -57,7 +57,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = llm(\"### Instruction:\\nWhat is the first book of the bible?\\n### Response:\")"
|
||||
"response = llm.invoke(\n",
|
||||
" \"### Instruction:\\nWhat is the first book of the bible?\\n### Response:\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@@ -90,7 +90,7 @@
|
||||
"llm = Konko(model=\"mistralai/mistral-7b-v0.1\", temperature=0.1, max_tokens=128)\n",
|
||||
"\n",
|
||||
"input_ = \"\"\"You are a helpful assistant. Explain Big Bang Theory briefly.\"\"\"\n",
|
||||
"print(llm(input_))"
|
||||
"print(llm.invoke(input_))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -1020,7 +1020,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Why is the Moon always showing the same side?\"))"
|
||||
"print(llm.invoke(\"Why is the Moon always showing the same side?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1044,7 +1044,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Why is the Moon always showing the same side?\"))"
|
||||
"print(llm.invoke(\"Why is the Moon always showing the same side?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1109,7 +1109,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Why is the Moon always showing the same side?\"))"
|
||||
"print(llm.invoke(\"Why is the Moon always showing the same side?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1133,7 +1133,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"How come we always see one face of the moon?\"))"
|
||||
"print(llm.invoke(\"How come we always see one face of the moon?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1238,7 +1238,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Is a true fakery the same as a fake truth?\"))"
|
||||
"print(llm.invoke(\"Is a true fakery the same as a fake truth?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1262,7 +1262,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Is a true fakery the same as a fake truth?\"))"
|
||||
"print(llm.invoke(\"Is a true fakery the same as a fake truth?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1327,7 +1327,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Are there truths that are false?\"))"
|
||||
"print(llm.invoke(\"Are there truths that are false?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1351,7 +1351,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Is is possible that something false can be also true?\"))"
|
||||
"print(llm.invoke(\"Is is possible that something false can be also true?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -96,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = model(\"Can you recommend me a nice dry wine?\")\n",
|
||||
"response = model.invoke(\"Can you recommend me a nice dry wine?\")\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
@@ -269,7 +269,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# response = model(\"Can you help categorize the following emails into positive, negative, and neutral?\")"
|
||||
"# response = model.invoke(\"Can you help categorize the following emails into positive, negative, and neutral?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@@ -323,7 +323,7 @@
|
||||
"User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?\n",
|
||||
"Assistant:\n",
|
||||
"\"\"\"\n",
|
||||
"_ = llm(prompt)"
|
||||
"_ = llm.invoke(prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -376,13 +376,13 @@
|
||||
"Assistant:\n",
|
||||
"\"\"\"\n",
|
||||
"start_time = time.perf_counter()\n",
|
||||
"raw_output = llm(prompt) # raw output, no stop\n",
|
||||
"raw_output = llm.invoke(prompt) # raw output, no stop\n",
|
||||
"end_time = time.perf_counter()\n",
|
||||
"print(f\"Raw output:\\n {raw_output}\")\n",
|
||||
"print(f\"Raw output runtime: {end_time - start_time} seconds\")\n",
|
||||
"\n",
|
||||
"start_time = time.perf_counter()\n",
|
||||
"stopped_output = llm(prompt, stop=[\"\\n\\n\"]) # stop on double newlines\n",
|
||||
"stopped_output = llm.invoke(prompt, stop=[\"\\n\\n\"]) # stop on double newlines\n",
|
||||
"end_time = time.perf_counter()\n",
|
||||
"print(f\"Stopped output:\\n {stopped_output}\")\n",
|
||||
"print(f\"Stopped output runtime: {end_time - start_time} seconds\")"
|
||||
|
@@ -65,7 +65,7 @@
|
||||
"# Load the model\n",
|
||||
"llm = SparkLLM()\n",
|
||||
"\n",
|
||||
"res = llm(\"What's your name?\")\n",
|
||||
"res = llm.invoke(\"What's your name?\")\n",
|
||||
"print(res)"
|
||||
]
|
||||
},
|
||||
|
@@ -23,7 +23,7 @@ It provides a unified interface for all models:
|
||||
```python
|
||||
llm = CTransformers(model='/path/to/ggml-gpt-2.bin', model_type='gpt2')
|
||||
|
||||
print(llm('AI is going to'))
|
||||
print(llm.invoke('AI is going to'))
|
||||
```
|
||||
|
||||
If you are getting `illegal instruction` error, try using `lib='avx'` or `lib='basic'`:
|
||||
|
@@ -22,7 +22,7 @@ It provides a unified interface for all models:
|
||||
```python
|
||||
llm = DeepSparse(model='zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none')
|
||||
|
||||
print(llm('def fib():'))
|
||||
print(llm.invoke('def fib():'))
|
||||
```
|
||||
|
||||
Additional parameters can be passed using the `config` parameter:
|
||||
|
@@ -83,7 +83,7 @@ def langchain_llm() -> str:
|
||||
temperature=0.2,
|
||||
callbacks=[FlyteCallbackHandler()],
|
||||
)
|
||||
return llm([HumanMessage(content="Tell me a joke")]).content
|
||||
return llm.invoke([HumanMessage(content="Tell me a joke")]).content
|
||||
```
|
||||
|
||||
### Chain
|
||||
|
@@ -27,7 +27,7 @@ from langchain_community.llms import GPT4All
|
||||
model = GPT4All(model="./models/mistral-7b-openorca.Q4_0.gguf", n_threads=8)
|
||||
|
||||
# Generate text
|
||||
response = model("Once upon a time, ")
|
||||
response = model.invoke("Once upon a time, ")
|
||||
```
|
||||
|
||||
You can also customize the generation parameters, such as n_predict, temp, top_p, top_k, and others.
|
||||
|
@@ -29,7 +29,7 @@ openai.api_base = "https://oai.hconeai.com/v1"
|
||||
|
||||
llm = OpenAI(temperature=0.9, headers={"Helicone-Cache-Enabled": "true"})
|
||||
text = "What is a helicone?"
|
||||
print(llm(text))
|
||||
print(llm.invoke(text))
|
||||
```
|
||||
|
||||
[Helicone caching docs](https://docs.helicone.ai/advanced-usage/caching)
|
||||
@@ -47,7 +47,7 @@ llm = OpenAI(temperature=0.9, headers={
|
||||
"Helicone-Property-App": "mobile",
|
||||
})
|
||||
text = "What is a helicone?"
|
||||
print(llm(text))
|
||||
print(llm.invoke(text))
|
||||
```
|
||||
|
||||
[Helicone property docs](https://docs.helicone.ai/advanced-usage/custom-properties)
|
||||
|
@@ -44,7 +44,7 @@ See a usage [example](/docs/integrations/llms/konko).
|
||||
from langchain.llms import Konko
|
||||
llm = Konko(max_tokens=800, model='mistralai/Mistral-7B-v0.1')
|
||||
prompt = "Generate a Product Description for Apple Iphone 15"
|
||||
response = llm(prompt)
|
||||
response = llm.invoke(prompt)
|
||||
```
|
||||
|
||||
## Chat Models
|
||||
|
@@ -23,7 +23,7 @@ model = Predibase(
|
||||
predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)
|
||||
)
|
||||
|
||||
response = model("Can you recommend me a nice dry wine?")
|
||||
response = model.invoke("Can you recommend me a nice dry wine?")
|
||||
print(response)
|
||||
```
|
||||
|
||||
@@ -44,7 +44,7 @@ model = Predibase(
|
||||
adapter_version=1,
|
||||
)
|
||||
|
||||
response = model("Can you recommend me a nice dry wine?")
|
||||
response = model.invoke("Can you recommend me a nice dry wine?")
|
||||
print(response)
|
||||
```
|
||||
|
||||
@@ -64,6 +64,6 @@ model = Predibase(
|
||||
adapter_id="predibase/e2e_nlg",
|
||||
)
|
||||
|
||||
response = model("Can you recommend me a nice dry wine?")
|
||||
response = model.invoke("Can you recommend me a nice dry wine?")
|
||||
print(response)
|
||||
```
|
||||
|
@@ -44,7 +44,7 @@ def generate_prompt(instruction, input=None):
|
||||
|
||||
|
||||
model = RWKV(model="./models/RWKV-4-Raven-3B-v7-Eng-20230404-ctx4096.pth", strategy="cpu fp32", tokens_path="./rwkv/20B_tokenizer.json")
|
||||
response = model(generate_prompt("Once upon a time, "))
|
||||
response = model.invoke(generate_prompt("Once upon a time, "))
|
||||
```
|
||||
## Model File
|
||||
|
||||
|
@@ -545,7 +545,7 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"_input = prompt.format_prompt(text=dataset[0][\"text\"])\n",
|
||||
"output = llm(_input.to_string())\n",
|
||||
"output = llm.invoke(_input.to_string())\n",
|
||||
"\n",
|
||||
"parsed = parser.parse(output)\n",
|
||||
"parsed"
|
||||
|
Reference in New Issue
Block a user