experimental[minor]: Add bind_tools and with_structured_output functions to OllamaFunctions (#20881)

Implemented bind_tools for OllamaFunctions.
Made OllamaFunctions sub class of ChatOllama.
Implemented with_structured_output for OllamaFunctions.

integration unit test has been updated.
notebook has been updated.

---------

Co-authored-by: Bagatur <baskaryan@gmail.com>
This commit is contained in:
Karim Lalani
2024-04-29 09:13:33 -05:00
committed by GitHub
parent d781560722
commit 2ddac9a7c3
3 changed files with 401 additions and 70 deletions

View File

@@ -17,7 +17,7 @@
"\n",
"This notebook shows how to use an experimental wrapper around Ollama that gives it the same API as OpenAI Functions.\n",
"\n",
"Note that more powerful and capable models will perform better with complex schema and/or multiple functions. The examples below use Mistral.\n",
"Note that more powerful and capable models will perform better with complex schema and/or multiple functions. The examples below use llama3 and phi3 models.\n",
"For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n",
"\n",
"## Setup\n",
@@ -32,12 +32,18 @@
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"metadata": {
"ExecuteTime": {
"end_time": "2024-04-28T00:53:25.276543Z",
"start_time": "2024-04-28T00:53:24.881202Z"
},
"scrolled": true
},
"outputs": [],
"source": [
"from langchain_experimental.llms.ollama_functions import OllamaFunctions\n",
"\n",
"model = OllamaFunctions(model=\"mistral\")"
"model = OllamaFunctions(model=\"llama3\", format=\"json\")"
]
},
{
@@ -50,11 +56,16 @@
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"metadata": {
"ExecuteTime": {
"end_time": "2024-04-26T04:59:17.270931Z",
"start_time": "2024-04-26T04:59:17.263347Z"
}
},
"outputs": [],
"source": [
"model = model.bind(\n",
" functions=[\n",
"model = model.bind_tools(\n",
" tools=[\n",
" {\n",
" \"name\": \"get_current_weather\",\n",
" \"description\": \"Get the current weather in a given location\",\n",
@@ -88,12 +99,17 @@
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"metadata": {
"ExecuteTime": {
"end_time": "2024-04-26T04:59:26.092428Z",
"start_time": "2024-04-26T04:59:17.272627Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_weather', 'arguments': '{\"location\": \"Boston, MA\", \"unit\": \"celsius\"}'}})"
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_weather', 'arguments': '{\"location\": \"Boston, MA\"}'}}, id='run-1791f9fe-95ad-4ca4-bdf7-9f73eab31e6f-0')"
]
},
"execution_count": 3,
@@ -111,54 +127,119 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using for extraction\n",
"## Structured Output\n",
"\n",
"One useful thing you can do with function calling here is extracting properties from a given input in a structured format:"
"One useful thing you can do with function calling using `with_structured_output()` function is extracting properties from a given input in a structured format:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"ExecuteTime": {
"end_time": "2024-04-26T04:59:26.098828Z",
"start_time": "2024-04-26T04:59:26.094021Z"
}
},
"outputs": [],
"source": [
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"# Schema for structured response\n",
"class Person(BaseModel):\n",
" name: str = Field(description=\"The person's name\", required=True)\n",
" height: float = Field(description=\"The person's height\", required=True)\n",
" hair_color: str = Field(description=\"The person's hair color\")\n",
"\n",
"\n",
"# Prompt template\n",
"prompt = PromptTemplate.from_template(\n",
" \"\"\"Alex is 5 feet tall. \n",
"Claudia is 1 feet taller than Alex and jumps higher than him. \n",
"Claudia is a brunette and Alex is blonde.\n",
"\n",
"Human: {question}\n",
"AI: \"\"\"\n",
")\n",
"\n",
"# Chain\n",
"llm = OllamaFunctions(model=\"phi3\", format=\"json\", temperature=0)\n",
"structured_llm = llm.with_structured_output(Person)\n",
"chain = prompt | structured_llm"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Extracting data about Alex"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"ExecuteTime": {
"end_time": "2024-04-26T04:59:30.164955Z",
"start_time": "2024-04-26T04:59:26.099790Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'Alex', 'height': 5, 'hair_color': 'blonde'},\n",
" {'name': 'Claudia', 'height': 6, 'hair_color': 'brunette'}]"
"Person(name='Alex', height=5.0, hair_color='blonde')"
]
},
"execution_count": 4,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.chains import create_extraction_chain\n",
"\n",
"# Schema\n",
"schema = {\n",
" \"properties\": {\n",
" \"name\": {\"type\": \"string\"},\n",
" \"height\": {\"type\": \"integer\"},\n",
" \"hair_color\": {\"type\": \"string\"},\n",
" },\n",
" \"required\": [\"name\", \"height\"],\n",
"}\n",
"\n",
"# Input\n",
"input = \"\"\"Alex is 5 feet tall. Claudia is 1 feet taller than Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\"\"\"\n",
"\n",
"# Run chain\n",
"llm = OllamaFunctions(model=\"mistral\", temperature=0)\n",
"chain = create_extraction_chain(schema, llm)\n",
"chain.run(input)"
"alex = chain.invoke(\"Describe Alex\")\n",
"alex"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Extracting data about Claudia"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"ExecuteTime": {
"end_time": "2024-04-26T04:59:31.509846Z",
"start_time": "2024-04-26T04:59:30.165662Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Person(name='Claudia', height=6.0, hair_color='brunette')"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"claudia = chain.invoke(\"Describe Claudia\")\n",
"claudia"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@@ -172,9 +253,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}