mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-21 06:14:37 +00:00
parent
f97e1825b7
commit
21d6f1fc6a
@ -53,7 +53,7 @@
|
|||||||
"id": "f5ccda4e-7af5-4355-b9c4-25547edf33f9",
|
"id": "f5ccda4e-7af5-4355-b9c4-25547edf33f9",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"Lets first load up this paper, and split into text chunks of size 1000."
|
"Let's first load up this paper, and split into text chunks of size 1000."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -241,7 +241,7 @@
|
|||||||
"id": "360b2837-8024-47e0-a4ba-592505a9a5c8",
|
"id": "360b2837-8024-47e0-a4ba-592505a9a5c8",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"With our embedder in place, lets define our retriever:"
|
"With our embedder in place, let's define our retriever:"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -312,7 +312,7 @@
|
|||||||
"id": "d84ea8f4-a5de-4d76-b44d-85e56583f489",
|
"id": "d84ea8f4-a5de-4d76-b44d-85e56583f489",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"Lets write our documents into our new store. This will use our embedder on each document."
|
"Let's write our documents into our new store. This will use our embedder on each document."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -339,7 +339,7 @@
|
|||||||
"id": "580bc212-8ecd-4d28-8656-b96fcd0d7eb6",
|
"id": "580bc212-8ecd-4d28-8656-b96fcd0d7eb6",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"Great! Our retriever is good to go. Lets load up an LLM, that will reason over the retrieved documents:"
|
"Great! Our retriever is good to go. Let's load up an LLM, that will reason over the retrieved documents:"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -430,7 +430,7 @@
|
|||||||
"id": "3bc53602-86d6-420f-91b1-fc2effa7e986",
|
"id": "3bc53602-86d6-420f-91b1-fc2effa7e986",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"Excellent! lets ask it a question.\n",
|
"Excellent! Let's ask it a question.\n",
|
||||||
"We will also use a verbose and debug, to check which documents were used by the model to produce the answer."
|
"We will also use a verbose and debug, to check which documents were used by the model to produce the answer."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -40,7 +40,7 @@
|
|||||||
"from langchain_core.globals import set_llm_cache\n",
|
"from langchain_core.globals import set_llm_cache\n",
|
||||||
"from langchain_openai import OpenAI\n",
|
"from langchain_openai import OpenAI\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# To make the caching really obvious, lets use a slower and older model.\n",
|
"# To make the caching really obvious, let's use a slower and older model.\n",
|
||||||
"# Caching supports newer chat models as well.\n",
|
"# Caching supports newer chat models as well.\n",
|
||||||
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
||||||
]
|
]
|
||||||
|
@ -51,7 +51,7 @@
|
|||||||
"from langchain.globals import set_llm_cache\n",
|
"from langchain.globals import set_llm_cache\n",
|
||||||
"from langchain_openai import OpenAI\n",
|
"from langchain_openai import OpenAI\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# To make the caching really obvious, lets use a slower and older model.\n",
|
"# To make the caching really obvious, let's use a slower and older model.\n",
|
||||||
"# Caching supports newer chat models as well.\n",
|
"# Caching supports newer chat models as well.\n",
|
||||||
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
||||||
]
|
]
|
||||||
|
@ -211,7 +211,7 @@
|
|||||||
"id": "b6e7b9cf-8ce5-4f87-b4bf-100321ad2dd1",
|
"id": "b6e7b9cf-8ce5-4f87-b4bf-100321ad2dd1",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"***The result is usually closer to the JSON object of the schema definition, rather than a json object conforming to the schema. Lets try to enforce proper output.***"
|
"***The result is usually closer to the JSON object of the schema definition, rather than a json object conforming to the schema. Let's try to enforce proper output.***"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -49,7 +49,7 @@ The power of the AI gateway comes when you're able to use the above code snippet
|
|||||||
|
|
||||||
Let's modify the code above to make a call to Anthropic's `claude-3-opus-20240229` model.
|
Let's modify the code above to make a call to Anthropic's `claude-3-opus-20240229` model.
|
||||||
|
|
||||||
Portkey supports **[Virtual Keys](https://docs.portkey.ai/docs/product/ai-gateway-streamline-llm-integrations/virtual-keys)** which are an easy way to store and manage API keys in a secure vault. Lets try using a Virtual Key to make LLM calls. You can navigate to the Virtual Keys tab in Portkey and create a new key for Anthropic.
|
Portkey supports **[Virtual Keys](https://docs.portkey.ai/docs/product/ai-gateway-streamline-llm-integrations/virtual-keys)** which are an easy way to store and manage API keys in a secure vault. Let's try using a Virtual Key to make LLM calls. You can navigate to the Virtual Keys tab in Portkey and create a new key for Anthropic.
|
||||||
|
|
||||||
The `virtual_key` parameter sets the authentication and provider for the AI provider being used. In our case we're using the Anthropic Virtual key.
|
The `virtual_key` parameter sets the authentication and provider for the AI provider being used. In our case we're using the Anthropic Virtual key.
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@
|
|||||||
"id": "34318164-7a6f-47b6-8690-3b1d71e1fcfc",
|
"id": "34318164-7a6f-47b6-8690-3b1d71e1fcfc",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"Lets ask a question, and compare to 2 documents. The first contains the answer to the question, and the second one does not. \n",
|
"Let's ask a question, and compare to 2 documents. The first contains the answer to the question, and the second one does not. \n",
|
||||||
"\n",
|
"\n",
|
||||||
"We can check better suits our query."
|
"We can check better suits our query."
|
||||||
]
|
]
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## PremEmbeddings\n",
|
"## PremEmbeddings\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In this section we are going to dicuss how we can get access to different embedding model using `PremEmbeddings` with LangChain. Lets start by importing our modules and setting our API Key. "
|
"In this section we are going to dicuss how we can get access to different embedding model using `PremEmbeddings` with LangChain. Let's start by importing our modules and setting our API Key. "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user