mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-03 20:16:52 +00:00
Add Writer, Banana, Modal, StochasticAI (#1270)
Add LLM wrappers and examples for Banana, Writer, Modal, Stochastic AI Added rigid json format for Banana and Modal
This commit is contained in:
74
docs/ecosystem/bananadev.md
Normal file
74
docs/ecosystem/bananadev.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# Banana
|
||||
|
||||
This page covers how to use the Banana ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Banana wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Install with `pip3 install banana-dev`
|
||||
- Get an CerebriumAI api key and set it as an environment variable (`BANANA_API_KEY`)
|
||||
|
||||
## Define your Banana Template
|
||||
|
||||
If you want to use an available language model template you can find one [here](https://app.banana.dev/templates/conceptofmind/serverless-template-palmyra-base).
|
||||
This template uses the Palmyra-Base model by [Writer](https://writer.com/product/api/).
|
||||
You can check out an example Banana repository [here](https://github.com/conceptofmind/serverless-template-palmyra-base).
|
||||
|
||||
## Build the Banana app
|
||||
|
||||
You must include a output in the result. There is a rigid response structure.
|
||||
```python
|
||||
# Return the results as a dictionary
|
||||
result = {'output': result}
|
||||
```
|
||||
|
||||
An example inference function would be:
|
||||
```python
|
||||
def inference(model_inputs:dict) -> dict:
|
||||
global model
|
||||
global tokenizer
|
||||
|
||||
# Parse out your arguments
|
||||
prompt = model_inputs.get('prompt', None)
|
||||
if prompt == None:
|
||||
return {'message': "No prompt provided"}
|
||||
|
||||
# Run the model
|
||||
input_ids = tokenizer.encode(prompt, return_tensors='pt').cuda()
|
||||
output = model.generate(
|
||||
input_ids,
|
||||
max_length=100,
|
||||
do_sample=True,
|
||||
top_k=50,
|
||||
top_p=0.95,
|
||||
num_return_sequences=1,
|
||||
temperature=0.9,
|
||||
early_stopping=True,
|
||||
no_repeat_ngram_size=3,
|
||||
num_beams=5,
|
||||
length_penalty=1.5,
|
||||
repetition_penalty=1.5,
|
||||
bad_words_ids=[[tokenizer.encode(' ', add_prefix_space=True)[0]]]
|
||||
)
|
||||
|
||||
result = tokenizer.decode(output[0], skip_special_tokens=True)
|
||||
# Return the results as a dictionary
|
||||
result = {'output': result}
|
||||
return result
|
||||
```
|
||||
|
||||
You can find a full example of a Banana app [here](https://github.com/conceptofmind/serverless-template-palmyra-base/blob/main/app.py).
|
||||
|
||||
|
||||
## Wrappers
|
||||
|
||||
### LLM
|
||||
|
||||
There exists an Banana LLM wrapper, which you can access with
|
||||
```python
|
||||
from langchain.llms import Banana
|
||||
```
|
||||
|
||||
You need to provide a model key located in the dashboard:
|
||||
```python
|
||||
llm = Banana(model_key="YOUR_MODEL_KEY")
|
||||
```
|
66
docs/ecosystem/modal.md
Normal file
66
docs/ecosystem/modal.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# Modal
|
||||
|
||||
This page covers how to use the Modal ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Modal wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Install with `pip install modal-client`
|
||||
- Run `modal token new`
|
||||
|
||||
## Define your Modal Functions and Webhooks
|
||||
|
||||
You must include a prompt. There is a rigid response structure.
|
||||
|
||||
```python
|
||||
class Item(BaseModel):
|
||||
prompt: str
|
||||
|
||||
@stub.webhook(method="POST")
|
||||
def my_webhook(item: Item):
|
||||
return {"prompt": my_function.call(item.prompt)}
|
||||
```
|
||||
|
||||
An example with GPT2:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
|
||||
import modal
|
||||
|
||||
stub = modal.Stub("example-get-started")
|
||||
|
||||
volume = modal.SharedVolume().persist("gpt2_model_vol")
|
||||
CACHE_PATH = "/root/model_cache"
|
||||
|
||||
@stub.function(
|
||||
gpu="any",
|
||||
image=modal.Image.debian_slim().pip_install(
|
||||
"tokenizers", "transformers", "torch", "accelerate"
|
||||
),
|
||||
shared_volumes={CACHE_PATH: volume},
|
||||
retries=3,
|
||||
)
|
||||
def run_gpt2(text: str):
|
||||
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
||||
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
||||
model = GPT2LMHeadModel.from_pretrained('gpt2')
|
||||
encoded_input = tokenizer(text, return_tensors='pt').input_ids
|
||||
output = model.generate(encoded_input, max_length=50, do_sample=True)
|
||||
return tokenizer.decode(output[0], skip_special_tokens=True)
|
||||
|
||||
class Item(BaseModel):
|
||||
prompt: str
|
||||
|
||||
@stub.webhook(method="POST")
|
||||
def get_text(item: Item):
|
||||
return {"prompt": run_gpt2.call(item.prompt)}
|
||||
```
|
||||
|
||||
## Wrappers
|
||||
|
||||
### LLM
|
||||
|
||||
There exists an Modal LLM wrapper, which you can access with
|
||||
```python
|
||||
from langchain.llms import Modal
|
||||
```
|
17
docs/ecosystem/stochasticai.md
Normal file
17
docs/ecosystem/stochasticai.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# StochasticAI
|
||||
|
||||
This page covers how to use the StochasticAI ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific StochasticAI wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Install with `pip install stochasticx`
|
||||
- Get an StochasticAI api key and set it as an environment variable (`STOCHASTICAI_API_KEY`)
|
||||
|
||||
## Wrappers
|
||||
|
||||
### LLM
|
||||
|
||||
There exists an StochasticAI LLM wrapper, which you can access with
|
||||
```python
|
||||
from langchain.llms import StochasticAI
|
||||
```
|
16
docs/ecosystem/writer.md
Normal file
16
docs/ecosystem/writer.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Writer
|
||||
|
||||
This page covers how to use the Writer ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Writer wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Get an Writer api key and set it as an environment variable (`WRITER_API_KEY`)
|
||||
|
||||
## Wrappers
|
||||
|
||||
### LLM
|
||||
|
||||
There exists an Writer LLM wrapper, which you can access with
|
||||
```python
|
||||
from langchain.llms import Writer
|
||||
```
|
@@ -17,6 +17,14 @@ The examples here are all "how-to" guides for how to integrate with various LLM
|
||||
|
||||
`Goose AI <./integrations/gooseai_example.html>`_: Covers how to utilize the Goose AI wrapper.
|
||||
|
||||
`Writer <./integrations/writer.html>`_: Covers how to utilize the Writer wrapper.
|
||||
|
||||
`Banana <./integrations/banana.html>`_: Covers how to utilize the Banana wrapper.
|
||||
|
||||
`Modal <./integrations/modal.html>`_: Covers how to utilize the Modal wrapper.
|
||||
|
||||
`StochasticAI <./integrations/stochasticai.html>`_: Covers how to utilize the Stochastic AI wrapper.
|
||||
|
||||
`Cerebrium <./integrations/cerebriumai_example.html>`_: Covers how to utilize the Cerebrium AI wrapper.
|
||||
|
||||
`Petals <./integrations/petals_example.html>`_: Covers how to utilize the Petals wrapper.
|
||||
|
85
docs/modules/llms/integrations/banana.ipynb
Normal file
85
docs/modules/llms/integrations/banana.ipynb
Normal file
@@ -0,0 +1,85 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Banana\n",
|
||||
"This example goes over how to use LangChain to interact with Banana models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from langchain.llms import Banana\n",
|
||||
"from langchain import PromptTemplate, LLMChain\n",
|
||||
"os.environ[\"BANANA_API_KEY\"] = \"YOUR_API_KEY\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = Banana(model_key=\"YOUR_MODEL_KEY\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.12 ('palm')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
83
docs/modules/llms/integrations/modal.ipynb
Normal file
83
docs/modules/llms/integrations/modal.ipynb
Normal file
@@ -0,0 +1,83 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Modal\n",
|
||||
"This example goes over how to use LangChain to interact with Modal models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import Modal\n",
|
||||
"from langchain import PromptTemplate, LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = Modal(endpoint_url=\"YOUR_ENDPOINT_URL\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.12 ('palm')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@@ -88,7 +88,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "Python 3.9.12 ('palm')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -102,7 +102,12 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.9.12"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
83
docs/modules/llms/integrations/stochasticai.ipynb
Normal file
83
docs/modules/llms/integrations/stochasticai.ipynb
Normal file
@@ -0,0 +1,83 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# StochasticAI\n",
|
||||
"This example goes over how to use LangChain to interact with StochasticAI models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import StochasticAI\n",
|
||||
"from langchain import PromptTemplate, LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = StochasticAI(api_url=\"YOUR_API_URL\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.12 ('palm')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
83
docs/modules/llms/integrations/writer.ipynb
Normal file
83
docs/modules/llms/integrations/writer.ipynb
Normal file
@@ -0,0 +1,83 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Writer\n",
|
||||
"This example goes over how to use LangChain to interact with Writer models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import Writer\n",
|
||||
"from langchain import PromptTemplate, LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = Writer()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.12 ('palm')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
Reference in New Issue
Block a user