mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-05 04:55:14 +00:00
community: extend Predibase integration to support fine-tuned LLM adapters (#19979)
- [x] **PR title**: "package: description" - Where "package" is whichever of langchain, community, core, experimental, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes. - Example: "community: add foobar LLM" - [x] **PR message**: ***Delete this entire checklist*** and replace with - **Description:** Langchain-Predibase integration was failing, because it was not current with the Predibase SDK; in addition, Predibase integration tests were instantiating the Langchain Community `Predibase` class with one required argument (`model`) missing. This change updates the Predibase SDK usage and fixes the integration tests. - **Twitter handle:** `@alexsherstinsky` - [x] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/ Additional guidelines: - Make sure optional dependencies are imported within a function. - Please do not add dependencies to pyproject.toml files (even optional ones) unless they are required for unit tests. - Most PRs should not touch more than one package. - Changes should be backwards compatible. - If you are adding something to community, do not re-import it in langchain. If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, hwchase17. --------- Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
@@ -50,7 +50,24 @@
|
||||
"from langchain_community.llms import Predibase\n",
|
||||
"\n",
|
||||
"model = Predibase(\n",
|
||||
" model=\"vicuna-13b\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n",
|
||||
" model=\"mistral-7b\",\n",
|
||||
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import Predibase\n",
|
||||
"\n",
|
||||
"# With an adapter, fine-tuned on the specified model\n",
|
||||
"model = Predibase(\n",
|
||||
" model=\"mistral-7b\",\n",
|
||||
" adapter_id=\"predibase/e2e_nlg\",\n",
|
||||
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -66,19 +83,43 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "plaintext"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Chain Call Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "plaintext"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import Predibase\n",
|
||||
"\n",
|
||||
"model = Predibase(\n",
|
||||
" model=\"mistral-7b\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# With an adapter, fine-tuned on the specified model\n",
|
||||
"llm = Predibase(\n",
|
||||
" model=\"vicuna-13b\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n",
|
||||
" model=\"mistral-7b\",\n",
|
||||
" adapter_id=\"predibase/e2e_nlg\",\n",
|
||||
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -169,7 +210,11 @@
|
||||
"from langchain_community.llms import Predibase\n",
|
||||
"\n",
|
||||
"model = Predibase(\n",
|
||||
" model=\"my-finetuned-LLM\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n",
|
||||
" model=\"my-base-LLM\",\n",
|
||||
" adapter_id=\"my-finetuned-adapter-id\",\n",
|
||||
" predibase_api_key=os.environ.get(\n",
|
||||
" \"PREDIBASE_API_TOKEN\"\n",
|
||||
" ), # Adapter argument is optional.\n",
|
||||
")\n",
|
||||
"# replace my-finetuned-LLM with the name of your model in Predibase"
|
||||
]
|
||||
|
@@ -17,7 +17,21 @@ os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}"
|
||||
|
||||
from langchain_community.llms import Predibase
|
||||
|
||||
model = Predibase(model = 'vicuna-13b', predibase_api_key=os.environ.get('PREDIBASE_API_TOKEN'))
|
||||
model = Predibase(model="mistral-7b"", predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN"))
|
||||
|
||||
response = model("Can you recommend me a nice dry wine?")
|
||||
print(response)
|
||||
```
|
||||
|
||||
Predibase also supports adapters that are fine-tuned on the base model given by the `model` argument:
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}"
|
||||
|
||||
from langchain_community.llms import Predibase
|
||||
|
||||
model = Predibase(model="mistral-7b"", adapter_id="predibase/e2e_nlg", predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN"))
|
||||
|
||||
response = model("Can you recommend me a nice dry wine?")
|
||||
print(response)
|
||||
|
Reference in New Issue
Block a user