General anthropic functions, steps towards experimental integration tests (#11727)

To match change in js here
https://github.com/langchain-ai/langchainjs/pull/2892

Some integration tests need a bit more work in experimental:
![Screenshot 2023-10-12 at 12 02 49
PM](https://github.com/langchain-ai/langchain/assets/9557659/262d7d22-c405-40e9-afef-669e8d585307)

Pretty sure the sqldatabase ones are an actual regression or change in
interface because it's returning a placeholder.

---------

Co-authored-by: Bagatur <baskaryan@gmail.com>
This commit is contained in:
Erick Friis 2023-10-13 09:48:24 -07:00 committed by GitHub
parent 98c8516ef1
commit 1861cc7100
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 128 additions and 7 deletions

View File

@ -18,6 +18,9 @@ test_watch:
extended_tests: extended_tests:
poetry run pytest --only-extended tests/unit_tests poetry run pytest --only-extended tests/unit_tests
integration_tests:
poetry run pytest tests/integration_tests
###################### ######################
# LINTING AND FORMATTING # LINTING AND FORMATTING

View File

@ -124,11 +124,17 @@ def _destrip(tool_input: Any) -> Any:
class AnthropicFunctions(BaseChatModel): class AnthropicFunctions(BaseChatModel):
model: ChatAnthropic llm: BaseChatModel
@root_validator(pre=True) @root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict: def validate_environment(cls, values: Dict) -> Dict:
return {"model": ChatAnthropic(**values)} values["llm"] = values.get("llm") or ChatAnthropic(**values)
return values
@property
def model(self) -> BaseChatModel:
"""For backwards compatibility."""
return self.llm
def _generate( def _generate(
self, self,

View File

@ -84,5 +84,6 @@ addopts = "--strict-markers --strict-config --durations=5"
# Registering custom markers. # Registering custom markers.
# https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers # https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers
markers = [ markers = [
"requires: mark tests as requiring a specific library" "requires: mark tests as requiring a specific library",
"asyncio: mark tests as requiring asyncio"
] ]

View File

@ -39,7 +39,7 @@ from langchain_experimental.cpal.templates.univariate.narrative import (
from langchain_experimental.cpal.templates.univariate.query import ( from langchain_experimental.cpal.templates.univariate.query import (
template as query_template, template as query_template,
) )
from tests.unit_tests.llms.fake_llm import FakeLLM from tests.unit_tests.fake_llm import FakeLLM
class TestUnitCPALChain_MathWordProblems(unittest.TestCase): class TestUnitCPALChain_MathWordProblems(unittest.TestCase):

View File

@ -1,8 +1,9 @@
"""Test PAL chain.""" """Test PAL chain."""
from langchain.chains.pal.base import PALChain
from langchain.llms import OpenAI from langchain.llms import OpenAI
from langchain_experimental.pal_chain.base import PALChain
def test_math_prompt() -> None: def test_math_prompt() -> None:
"""Test math prompt.""" """Test math prompt."""

View File

@ -1,11 +1,12 @@
"""Test SQL Database Chain.""" """Test SQL Database Chain."""
from langchain.llms.openai import OpenAI from langchain.llms.openai import OpenAI
from langchain.utilities.sql_database import SQLDatabase from langchain.utilities.sql_database import SQLDatabase
from libs.experimental.langchain_experimental.sql.base import ( from sqlalchemy import Column, Integer, MetaData, String, Table, create_engine, insert
from langchain_experimental.sql.base import (
SQLDatabaseChain, SQLDatabaseChain,
SQLDatabaseSequentialChain, SQLDatabaseSequentialChain,
) )
from sqlalchemy import Column, Integer, MetaData, String, Table, create_engine, insert
metadata_obj = MetaData() metadata_obj = MetaData()

View File

@ -0,0 +1,109 @@
"""Test AnthropicFunctions"""
import unittest
from langchain.chat_models.anthropic import ChatAnthropic
from langchain.chat_models.bedrock import BedrockChat
from langchain_experimental.llms.anthropic_functions import AnthropicFunctions
class TestAnthropicFunctions(unittest.TestCase):
"""
Test AnthropicFunctions with default llm (ChatAnthropic) as well as a passed-in llm
"""
def test_default_chat_anthropic(self) -> None:
base_model = AnthropicFunctions(model="claude-2")
self.assertIsInstance(base_model.model, ChatAnthropic)
# bind functions
model = base_model.bind(
functions=[
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, "
"e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
}
],
function_call={"name": "get_current_weather"},
)
res = model.invoke("What's the weather in San Francisco?")
function_call = res.additional_kwargs.get("function_call")
assert function_call
self.assertEqual(function_call.get("name"), "get_current_weather")
self.assertEqual(
function_call.get("arguments"),
'{"location": "San Francisco, CA", "unit": "fahrenheit"}',
)
def test_bedrock_chat_anthropic(self) -> None:
"""
const chatBedrock = new ChatBedrock({
region: process.env.BEDROCK_AWS_REGION ?? "us-east-1",
model: "anthropic.claude-v2",
temperature: 0.1,
credentials: {
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
},
});"""
llm = BedrockChat(
model_id="anthropic.claude-v2",
model_kwargs={"temperature": 0.1},
region_name="us-east-1",
)
base_model = AnthropicFunctions(llm=llm)
assert isinstance(base_model.model, BedrockChat)
# bind functions
model = base_model.bind(
functions=[
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, "
"e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
}
],
function_call={"name": "get_current_weather"},
)
res = model.invoke("What's the weather in San Francisco?")
function_call = res.additional_kwargs.get("function_call")
assert function_call
self.assertEqual(function_call.get("name"), "get_current_weather")
self.assertEqual(
function_call.get("arguments"),
'{"location": "San Francisco, CA", "unit": "fahrenheit"}',
)