mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-20 05:43:55 +00:00
experimental[patch]: prompts
import fix (#20534)
Replaced `from langchain.prompts` with `from langchain_core.prompts` where it is appropriate. Most of the changes go to `langchain_experimental` Similar to #20348
This commit is contained in:
parent
2542a09abc
commit
95dc90609e
@ -1,11 +1,11 @@
|
|||||||
import time
|
import time
|
||||||
from typing import Any, Callable, List, cast
|
from typing import Any, Callable, List, cast
|
||||||
|
|
||||||
from langchain.prompts.chat import (
|
|
||||||
BaseChatPromptTemplate,
|
|
||||||
)
|
|
||||||
from langchain.tools.base import BaseTool
|
from langchain.tools.base import BaseTool
|
||||||
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
|
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
|
||||||
|
from langchain_core.prompts.chat import (
|
||||||
|
BaseChatPromptTemplate,
|
||||||
|
)
|
||||||
from langchain_core.vectorstores import VectorStoreRetriever
|
from langchain_core.vectorstores import VectorStoreRetriever
|
||||||
|
|
||||||
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
|
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
|
||||||
|
@ -5,14 +5,14 @@ from typing import Any, Dict, List, Optional, Union
|
|||||||
|
|
||||||
from langchain.base_language import BaseLanguageModel
|
from langchain.base_language import BaseLanguageModel
|
||||||
from langchain.chains import LLMChain
|
from langchain.chains import LLMChain
|
||||||
from langchain.prompts.chat import (
|
from langchain.tools.base import BaseTool
|
||||||
|
from langchain_core.callbacks.manager import Callbacks
|
||||||
|
from langchain_core.prompts.chat import (
|
||||||
AIMessagePromptTemplate,
|
AIMessagePromptTemplate,
|
||||||
ChatPromptTemplate,
|
ChatPromptTemplate,
|
||||||
HumanMessagePromptTemplate,
|
HumanMessagePromptTemplate,
|
||||||
SystemMessagePromptTemplate,
|
SystemMessagePromptTemplate,
|
||||||
)
|
)
|
||||||
from langchain.tools.base import BaseTool
|
|
||||||
from langchain_core.callbacks.manager import Callbacks
|
|
||||||
|
|
||||||
from langchain_experimental.pydantic_v1 import BaseModel
|
from langchain_experimental.pydantic_v1 import BaseModel
|
||||||
|
|
||||||
|
@ -10,8 +10,8 @@ from langchain.base_language import BaseLanguageModel
|
|||||||
from langchain.chains.base import Chain
|
from langchain.chains.base import Chain
|
||||||
from langchain.chains.llm import LLMChain
|
from langchain.chains.llm import LLMChain
|
||||||
from langchain.output_parsers import PydanticOutputParser
|
from langchain.output_parsers import PydanticOutputParser
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
|
||||||
from langchain_core.callbacks.manager import CallbackManagerForChainRun
|
from langchain_core.callbacks.manager import CallbackManagerForChainRun
|
||||||
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
from langchain_experimental import pydantic_v1 as pydantic
|
from langchain_experimental import pydantic_v1 as pydantic
|
||||||
from langchain_experimental.cpal.constants import Constant
|
from langchain_experimental.cpal.constants import Constant
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
from langchain.prompts.few_shot import FewShotPromptTemplate
|
from langchain_core.prompts.few_shot import FewShotPromptTemplate
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
fallacy_critique_example = PromptTemplate(
|
fallacy_critique_example = PromptTemplate(
|
||||||
template="""Human: {input_prompt}
|
template="""Human: {input_prompt}
|
||||||
|
@ -4,7 +4,7 @@ from __future__ import annotations
|
|||||||
import re
|
import re
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
from langchain_core.output_parsers import BaseOutputParser
|
from langchain_core.output_parsers import BaseOutputParser
|
||||||
from langchain_core.exceptions import OutputParserException
|
from langchain_core.exceptions import OutputParserException
|
||||||
|
|
||||||
|
@ -7,11 +7,11 @@ from typing import Any, Dict, List, Optional
|
|||||||
from langchain.base_language import BaseLanguageModel
|
from langchain.base_language import BaseLanguageModel
|
||||||
from langchain.chains.base import Chain
|
from langchain.chains.base import Chain
|
||||||
from langchain.chains.llm import LLMChain
|
from langchain.chains.llm import LLMChain
|
||||||
from langchain.prompts.base import BasePromptTemplate
|
|
||||||
from langchain_core.callbacks.manager import (
|
from langchain_core.callbacks.manager import (
|
||||||
AsyncCallbackManagerForChainRun,
|
AsyncCallbackManagerForChainRun,
|
||||||
CallbackManagerForChainRun,
|
CallbackManagerForChainRun,
|
||||||
)
|
)
|
||||||
|
from langchain_core.prompts.base import BasePromptTemplate
|
||||||
|
|
||||||
from langchain_experimental.llm_symbolic_math.prompt import PROMPT
|
from langchain_experimental.llm_symbolic_math.prompt import PROMPT
|
||||||
from langchain_experimental.pydantic_v1 import Extra
|
from langchain_experimental.pydantic_v1 import Extra
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# flake8: noqa
|
# flake8: noqa
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
_PROMPT_TEMPLATE = """Translate a math problem into a expression that can be executed using Python's SymPy library. Use the output of running this code to answer the question.
|
_PROMPT_TEMPLATE = """Translate a math problem into a expression that can be executed using Python's SymPy library. Use the output of running this code to answer the question.
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# flake8: noqa
|
# flake8: noqa
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
template = (
|
template = (
|
||||||
"""
|
"""
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# flake8: noqa
|
# flake8: noqa
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
template = (
|
template = (
|
||||||
'''
|
'''
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from langchain.chains import LLMChain
|
from langchain.chains import LLMChain
|
||||||
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
|
|
||||||
from langchain_core.language_models import BaseLanguageModel
|
from langchain_core.language_models import BaseLanguageModel
|
||||||
from langchain_core.messages import SystemMessage
|
from langchain_core.messages import SystemMessage
|
||||||
|
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
|
||||||
|
|
||||||
from langchain_experimental.plan_and_execute.planners.base import LLMPlanner
|
from langchain_experimental.plan_and_execute.planners.base import LLMPlanner
|
||||||
from langchain_experimental.plan_and_execute.schema import (
|
from langchain_experimental.plan_and_execute.schema import (
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
from langchain.prompts.loading import load_prompt
|
from langchain_core.prompts.loading import load_prompt
|
||||||
|
|
||||||
__all__ = ["load_prompt"]
|
__all__ = ["load_prompt"]
|
||||||
|
@ -4,11 +4,11 @@ from typing import Any, Dict, List, Mapping, Optional, cast
|
|||||||
|
|
||||||
from langchain.chains import LLMChain
|
from langchain.chains import LLMChain
|
||||||
from langchain.chains.base import Chain
|
from langchain.chains.base import Chain
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
|
||||||
from langchain.schema.language_model import BaseLanguageModel
|
from langchain.schema.language_model import BaseLanguageModel
|
||||||
from langchain_core.callbacks.manager import (
|
from langchain_core.callbacks.manager import (
|
||||||
CallbackManagerForChainRun,
|
CallbackManagerForChainRun,
|
||||||
)
|
)
|
||||||
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
from langchain_experimental.recommenders.amazon_personalize import AmazonPersonalize
|
from langchain_experimental.recommenders.amazon_personalize import AmazonPersonalize
|
||||||
|
|
||||||
|
@ -18,13 +18,13 @@ from typing import (
|
|||||||
|
|
||||||
from langchain.chains.base import Chain
|
from langchain.chains.base import Chain
|
||||||
from langchain.chains.llm import LLMChain
|
from langchain.chains.llm import LLMChain
|
||||||
from langchain.prompts import (
|
from langchain_core.callbacks.manager import CallbackManagerForChainRun
|
||||||
|
from langchain_core.prompts import (
|
||||||
BasePromptTemplate,
|
BasePromptTemplate,
|
||||||
ChatPromptTemplate,
|
ChatPromptTemplate,
|
||||||
HumanMessagePromptTemplate,
|
HumanMessagePromptTemplate,
|
||||||
SystemMessagePromptTemplate,
|
SystemMessagePromptTemplate,
|
||||||
)
|
)
|
||||||
from langchain_core.callbacks.manager import CallbackManagerForChainRun
|
|
||||||
|
|
||||||
from langchain_experimental.pydantic_v1 import BaseModel, Extra, root_validator
|
from langchain_experimental.pydantic_v1 import BaseModel, Extra, root_validator
|
||||||
from langchain_experimental.rl_chain.metrics import (
|
from langchain_experimental.rl_chain.metrics import (
|
||||||
|
@ -5,8 +5,8 @@ from typing import Any, Dict, List, Optional, Tuple, Type, Union
|
|||||||
|
|
||||||
from langchain.base_language import BaseLanguageModel
|
from langchain.base_language import BaseLanguageModel
|
||||||
from langchain.chains.llm import LLMChain
|
from langchain.chains.llm import LLMChain
|
||||||
from langchain.prompts import BasePromptTemplate
|
|
||||||
from langchain_core.callbacks.manager import CallbackManagerForChainRun
|
from langchain_core.callbacks.manager import CallbackManagerForChainRun
|
||||||
|
from langchain_core.prompts import BasePromptTemplate
|
||||||
|
|
||||||
import langchain_experimental.rl_chain.base as base
|
import langchain_experimental.rl_chain.base as base
|
||||||
|
|
||||||
|
@ -4,15 +4,15 @@ from typing import Any, Dict, List, Optional, Tuple, Type
|
|||||||
from langchain.base_language import BaseLanguageModel
|
from langchain.base_language import BaseLanguageModel
|
||||||
from langchain.chains.base import Chain
|
from langchain.chains.base import Chain
|
||||||
from langchain.input import get_colored_text
|
from langchain.input import get_colored_text
|
||||||
from langchain.prompts.base import BasePromptTemplate
|
from langchain.schema import LLMResult, PromptValue
|
||||||
from langchain.prompts.chat import (
|
from langchain_core.callbacks.manager import CallbackManagerForChainRun
|
||||||
|
from langchain_core.prompts.base import BasePromptTemplate
|
||||||
|
from langchain_core.prompts.chat import (
|
||||||
AIMessagePromptTemplate,
|
AIMessagePromptTemplate,
|
||||||
BaseMessagePromptTemplate,
|
BaseMessagePromptTemplate,
|
||||||
ChatPromptTemplate,
|
ChatPromptTemplate,
|
||||||
HumanMessagePromptTemplate,
|
HumanMessagePromptTemplate,
|
||||||
)
|
)
|
||||||
from langchain.schema import LLMResult, PromptValue
|
|
||||||
from langchain_core.callbacks.manager import CallbackManagerForChainRun
|
|
||||||
|
|
||||||
from langchain_experimental.pydantic_v1 import Extra, root_validator
|
from langchain_experimental.pydantic_v1 import Extra, root_validator
|
||||||
|
|
||||||
|
@ -7,12 +7,12 @@ from typing import Any, Dict, List, Optional
|
|||||||
from langchain.chains.base import Chain
|
from langchain.chains.base import Chain
|
||||||
from langchain.chains.llm import LLMChain
|
from langchain.chains.llm import LLMChain
|
||||||
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
|
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
|
||||||
from langchain.schema import BasePromptTemplate
|
from langchain.schema import BasePromptTemplate
|
||||||
from langchain_community.tools.sql_database.prompt import QUERY_CHECKER
|
from langchain_community.tools.sql_database.prompt import QUERY_CHECKER
|
||||||
from langchain_community.utilities.sql_database import SQLDatabase
|
from langchain_community.utilities.sql_database import SQLDatabase
|
||||||
from langchain_core.callbacks.manager import CallbackManagerForChainRun
|
from langchain_core.callbacks.manager import CallbackManagerForChainRun
|
||||||
from langchain_core.language_models import BaseLanguageModel
|
from langchain_core.language_models import BaseLanguageModel
|
||||||
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
from langchain_experimental.pydantic_v1 import Extra, Field, root_validator
|
from langchain_experimental.pydantic_v1 import Extra, Field, root_validator
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# flake8: noqa
|
# flake8: noqa
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
|
|
||||||
PROMPT_SUFFIX = """Only use the following tables:
|
PROMPT_SUFFIX = """Only use the following tables:
|
||||||
|
@ -6,7 +6,6 @@ from typing import Any, Dict, List, Optional, Sequence, Union
|
|||||||
|
|
||||||
from langchain.chains.llm import LLMChain
|
from langchain.chains.llm import LLMChain
|
||||||
from langchain.chains.sql_database.prompt import PROMPT, SQL_PROMPTS
|
from langchain.chains.sql_database.prompt import PROMPT, SQL_PROMPTS
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
|
||||||
from langchain_community.tools.sql_database.prompt import QUERY_CHECKER
|
from langchain_community.tools.sql_database.prompt import QUERY_CHECKER
|
||||||
from langchain_community.utilities.sql_database import SQLDatabase
|
from langchain_community.utilities.sql_database import SQLDatabase
|
||||||
from langchain_core.callbacks.manager import CallbackManagerForChainRun
|
from langchain_core.callbacks.manager import CallbackManagerForChainRun
|
||||||
@ -14,6 +13,7 @@ from langchain_core.embeddings import Embeddings
|
|||||||
from langchain_core.language_models import BaseLanguageModel
|
from langchain_core.language_models import BaseLanguageModel
|
||||||
from langchain_core.output_parsers import BaseOutputParser
|
from langchain_core.output_parsers import BaseOutputParser
|
||||||
from langchain_core.prompts import BasePromptTemplate
|
from langchain_core.prompts import BasePromptTemplate
|
||||||
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
from langchain_experimental.sql.base import INTERMEDIATE_STEPS_KEY, SQLDatabaseChain
|
from langchain_experimental.sql.base import INTERMEDIATE_STEPS_KEY, SQLDatabaseChain
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from langchain.prompts.prompt import PromptTemplate
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
sentence_template = """Given the following fields, create a sentence about them.
|
sentence_template = """Given the following fields, create a sentence about them.
|
||||||
Make the sentence detailed and interesting. Use every given field.
|
Make the sentence detailed and interesting. Use every given field.
|
||||||
|
@ -3,9 +3,9 @@ from typing import Any, Dict, List, Optional, Union
|
|||||||
|
|
||||||
from langchain.chains.base import Chain
|
from langchain.chains.base import Chain
|
||||||
from langchain.chains.llm import LLMChain
|
from langchain.chains.llm import LLMChain
|
||||||
from langchain.prompts.few_shot import FewShotPromptTemplate
|
|
||||||
from langchain.pydantic_v1 import BaseModel, root_validator
|
from langchain.pydantic_v1 import BaseModel, root_validator
|
||||||
from langchain_core.language_models import BaseLanguageModel
|
from langchain_core.language_models import BaseLanguageModel
|
||||||
|
from langchain_core.prompts.few_shot import FewShotPromptTemplate
|
||||||
|
|
||||||
|
|
||||||
class SyntheticDataGenerator(BaseModel):
|
class SyntheticDataGenerator(BaseModel):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from langchain.prompts.prompt import PromptTemplate
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
DEFAULT_INPUT_KEY = "example"
|
DEFAULT_INPUT_KEY = "example"
|
||||||
DEFAULT_PROMPT = PromptTemplate(
|
DEFAULT_PROMPT = PromptTemplate(
|
||||||
|
@ -10,7 +10,7 @@ from abc import abstractmethod
|
|||||||
from typing import Any, Dict, List, Tuple
|
from typing import Any, Dict, List, Tuple
|
||||||
|
|
||||||
from langchain.chains.llm import LLMChain
|
from langchain.chains.llm import LLMChain
|
||||||
from langchain.prompts.base import BasePromptTemplate
|
from langchain_core.prompts.base import BasePromptTemplate
|
||||||
|
|
||||||
from langchain_experimental.pydantic_v1 import Field
|
from langchain_experimental.pydantic_v1 import Field
|
||||||
from langchain_experimental.tot.prompts import get_cot_prompt, get_propose_prompt
|
from langchain_experimental.tot.prompts import get_cot_prompt, get_propose_prompt
|
||||||
|
@ -7,8 +7,8 @@ from unittest import mock
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from langchain.output_parsers import PydanticOutputParser
|
from langchain.output_parsers import PydanticOutputParser
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
|
||||||
from langchain_community.llms import OpenAI
|
from langchain_community.llms import OpenAI
|
||||||
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
from langchain_experimental import pydantic_v1 as pydantic
|
from langchain_experimental import pydantic_v1 as pydantic
|
||||||
from langchain_experimental.cpal.base import (
|
from langchain_experimental.cpal.base import (
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import pytest
|
import pytest
|
||||||
from langchain.prompts.few_shot import FewShotPromptTemplate
|
|
||||||
from langchain.pydantic_v1 import BaseModel
|
from langchain.pydantic_v1 import BaseModel
|
||||||
from langchain_community.chat_models import ChatOpenAI
|
from langchain_community.chat_models import ChatOpenAI
|
||||||
|
from langchain_core.prompts.few_shot import FewShotPromptTemplate
|
||||||
|
|
||||||
from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator
|
from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator
|
||||||
from langchain_experimental.tabular_synthetic_data.openai import (
|
from langchain_experimental.tabular_synthetic_data.openai import (
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
|
||||||
from langchain_community.chat_models import FakeListChatModel
|
from langchain_community.chat_models import FakeListChatModel
|
||||||
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
from test_utils import MockEncoder, MockEncoderReturnsList
|
from test_utils import MockEncoder, MockEncoderReturnsList
|
||||||
|
|
||||||
import langchain_experimental.rl_chain.base as rl_chain
|
import langchain_experimental.rl_chain.base as rl_chain
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
"""Test SmartLLM."""
|
"""Test SmartLLM."""
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
|
||||||
from langchain_community.chat_models import FakeListChatModel
|
from langchain_community.chat_models import FakeListChatModel
|
||||||
from langchain_community.llms import FakeListLLM
|
from langchain_community.llms import FakeListLLM
|
||||||
|
from langchain_core.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
from langchain_experimental.smart_llm import SmartLLMChain
|
from langchain_experimental.smart_llm import SmartLLMChain
|
||||||
|
|
||||||
|
@ -240,7 +240,7 @@ def create_ernie_fn_runnable(
|
|||||||
|
|
||||||
from langchain.chains.ernie_functions import create_ernie_fn_chain
|
from langchain.chains.ernie_functions import create_ernie_fn_chain
|
||||||
from langchain_community.chat_models import ErnieBotChat
|
from langchain_community.chat_models import ErnieBotChat
|
||||||
from langchain.prompts import ChatPromptTemplate
|
from langchain_core.prompts import ChatPromptTemplate
|
||||||
from langchain.pydantic_v1 import BaseModel, Field
|
from langchain.pydantic_v1 import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
@ -314,7 +314,7 @@ def create_structured_output_runnable(
|
|||||||
|
|
||||||
from langchain.chains.ernie_functions import create_structured_output_chain
|
from langchain.chains.ernie_functions import create_structured_output_chain
|
||||||
from langchain_community.chat_models import ErnieBotChat
|
from langchain_community.chat_models import ErnieBotChat
|
||||||
from langchain.prompts import ChatPromptTemplate
|
from langchain_core.prompts import ChatPromptTemplate
|
||||||
from langchain.pydantic_v1 import BaseModel, Field
|
from langchain.pydantic_v1 import BaseModel, Field
|
||||||
|
|
||||||
class Dog(BaseModel):
|
class Dog(BaseModel):
|
||||||
@ -411,7 +411,7 @@ def create_ernie_fn_chain(
|
|||||||
|
|
||||||
from langchain.chains.ernie_functions import create_ernie_fn_chain
|
from langchain.chains.ernie_functions import create_ernie_fn_chain
|
||||||
from langchain_community.chat_models import ErnieBotChat
|
from langchain_community.chat_models import ErnieBotChat
|
||||||
from langchain.prompts import ChatPromptTemplate
|
from langchain_core.prompts import ChatPromptTemplate
|
||||||
|
|
||||||
from langchain.pydantic_v1 import BaseModel, Field
|
from langchain.pydantic_v1 import BaseModel, Field
|
||||||
|
|
||||||
@ -498,7 +498,7 @@ def create_structured_output_chain(
|
|||||||
|
|
||||||
from langchain.chains.ernie_functions import create_structured_output_chain
|
from langchain.chains.ernie_functions import create_structured_output_chain
|
||||||
from langchain_community.chat_models import ErnieBotChat
|
from langchain_community.chat_models import ErnieBotChat
|
||||||
from langchain.prompts import ChatPromptTemplate
|
from langchain_core.prompts import ChatPromptTemplate
|
||||||
|
|
||||||
from langchain.pydantic_v1 import BaseModel, Field
|
from langchain.pydantic_v1 import BaseModel, Field
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ from langchain_core.messages import (
|
|||||||
HumanMessage,
|
HumanMessage,
|
||||||
ToolCall,
|
ToolCall,
|
||||||
)
|
)
|
||||||
from langchain_core.prompts import MessagesPlaceholder
|
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||||
from langchain_core.runnables.utils import add
|
from langchain_core.runnables.utils import add
|
||||||
from langchain_core.tools import Tool
|
from langchain_core.tools import Tool
|
||||||
from langchain_core.tracers import RunLog, RunLogPatch
|
from langchain_core.tracers import RunLog, RunLogPatch
|
||||||
@ -33,7 +33,6 @@ from langchain.agents import (
|
|||||||
initialize_agent,
|
initialize_agent,
|
||||||
)
|
)
|
||||||
from langchain.agents.output_parsers.openai_tools import OpenAIToolAgentAction
|
from langchain.agents.output_parsers.openai_tools import OpenAIToolAgentAction
|
||||||
from langchain.prompts import ChatPromptTemplate
|
|
||||||
from langchain.tools import tool
|
from langchain.tools import tool
|
||||||
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
||||||
from tests.unit_tests.llms.fake_chat_model import GenericFakeChatModel
|
from tests.unit_tests.llms.fake_chat_model import GenericFakeChatModel
|
||||||
|
Loading…
Reference in New Issue
Block a user