move prompts to core

This commit is contained in:
Harrison Chase
2023-07-09 11:50:33 -04:00
parent 5c74c93c61
commit b100633103
83 changed files with 175 additions and 166 deletions

View File

@@ -17,7 +17,7 @@ from langchain.agents.tools import InvalidTool
from langchain.chains.llm import LLMChain
from langchain.input import get_color_mapping
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import (
AgentAction,
AgentFinish,

View File

@@ -1,6 +1,6 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
API_PLANNER_PROMPT = """You are a planner that plans a sequence of API calls to assist with user queries against an API.

View File

@@ -7,7 +7,7 @@ from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,

View File

@@ -13,7 +13,7 @@ from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.chains.llm import LLMChain
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,

View File

@@ -12,7 +12,7 @@ from langchain.agents.chat.prompt import (
)
from langchain.agents.utils import validate_tools_single_input
from langchain.chains.llm import LLMChain
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,

View File

@@ -14,7 +14,7 @@ from langchain.agents.conversational_chat.prompt import (
)
from langchain.agents.utils import validate_tools_single_input
from langchain.chains import LLMChain
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,

View File

@@ -8,7 +8,7 @@ from pydantic import root_validator
from langchain.agents import BaseSingleActionAgent
from langchain.chat_models.openai import ChatOpenAI
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
BaseMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,

View File

@@ -8,7 +8,7 @@ from pydantic import root_validator
from langchain.agents import BaseMultiActionAgent
from langchain.chat_models.openai import ChatOpenAI
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
BaseMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
EXAMPLES = [
"""Setup: You are now playing a fast paced round of TextWorld! Here is your task for

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
EXAMPLES = [
"""Question: What is the elevation range for the area that the eastern sector of the Colorado orogeny extends into?

View File

@@ -1,6 +1,6 @@
from typing import Any, Dict, List, Tuple
from langchain.prompts.chat import ChatPromptTemplate
from langchain.core.prompts.chat import ChatPromptTemplate
from langchain.schema import AgentAction

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
_DEFAULT_TEMPLATE = """Question: Who lived longer, Muhammad Ali or Alan Turing?
Are follow up questions needed here: Yes.

View File

@@ -9,7 +9,7 @@ from langchain.agents.structured_chat.output_parser import (
)
from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain.chains.llm import LLMChain
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,

View File

@@ -6,7 +6,7 @@ from typing import Any
from langchain.chains.api.openapi.prompts import REQUEST_TEMPLATE
from langchain.chains.llm import LLMChain
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import BaseOutputParser
from langchain.schema.language_model import BaseLanguageModel

View File

@@ -6,7 +6,7 @@ from typing import Any
from langchain.chains.api.openapi.prompts import RESPONSE_TEMPLATE
from langchain.chains.llm import LLMChain
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import BaseOutputParser
from langchain.schema.language_model import BaseLanguageModel

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
API_URL_PROMPT_TEMPLATE = """You are given the below API Documentation:
{api_docs}

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.

View File

@@ -11,7 +11,7 @@ from langchain.chains.combine_documents.base import (
)
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import BasePromptTemplate, format_document
from langchain.schema.callbacks.manager import Callbacks

View File

@@ -9,7 +9,7 @@ from langchain.chains.combine_documents.base import (
)
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import BasePromptTemplate, format_document
from langchain.schema.callbacks.manager import Callbacks

View File

@@ -1,8 +1,7 @@
# flake8: noqa
from copy import deepcopy
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
critique_example = PromptTemplate(
template="""Human: {input_prompt}

View File

@@ -6,7 +6,7 @@ from langchain.memory.prompt import (
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
SUMMARY_PROMPT,
)
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """Extract all entities from the following text. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.

View File

@@ -15,7 +15,7 @@ from langchain.chains.graph_qa.prompts import (
)
from langchain.chains.llm import LLMChain
from langchain.graphs.rdf_graph import RdfGraph
from langchain.prompts.base import BasePromptTemplate
from langchain.core.prompts.base import BasePromptTemplate
from langchain.schema.callbacks.manager import CallbackManagerForChainRun
from langchain.schema.chain import Chain
from langchain.schema.language_model import BaseLanguageModel

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
web_search_template = """Please write a passage to answer the question
Question: {QUESTION}

View File

@@ -8,7 +8,7 @@ from pydantic import Extra, Field
from langchain.input import get_colored_text
from langchain.load.dump import dumpd
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import (
BaseLLMOutputParser,
BasePromptTemplate,

View File

@@ -4,7 +4,7 @@ from __future__ import annotations
import re
from typing import List
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import BaseOutputParser, OutputParserException
_PROMPT_TEMPLATE = """If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put "#!/bin/bash" in your answer. Make sure to reason step by step, using this format:

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
_CREATE_DRAFT_ANSWER_TEMPLATE = """{question}\n\n"""
CREATE_DRAFT_ANSWER_PROMPT = PromptTemplate(

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """Translate a math problem into a expression that can be executed using Python's numexpr library. Use the output of running this code to answer the question.

View File

@@ -10,7 +10,7 @@ from pydantic import Extra, root_validator
from langchain.chains.llm import LLMChain
from langchain.chains.sequential import SequentialChain
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema.callbacks.manager import CallbackManagerForChainRun
from langchain.schema.chain import Chain
from langchain.schema.language_model import BaseLanguageModel

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """
You are an agents controlling a browser. You are given:

View File

@@ -7,7 +7,7 @@ from langchain.chains.openai_functions.utils import get_llm_kwargs
from langchain.output_parsers.openai_functions import (
PydanticOutputFunctionsParser,
)
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import HumanMessage, SystemMessage

View File

@@ -9,7 +9,7 @@ from langchain.output_parsers.openai_functions import (
PydanticOutputFunctionsParser,
)
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.schema import BaseLLMOutputParser
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import HumanMessage, SystemMessage

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
template = (
"""

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
template = (
'''

View File

@@ -1,11 +1,11 @@
# flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
templ1 = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.

View File

@@ -1,11 +1,11 @@
# flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question.
Return any relevant text verbatim.

View File

@@ -1,12 +1,12 @@
# flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
AIMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
DEFAULT_REFINE_PROMPT_TMPL = (
"The original question is as follows: {question}\n"

View File

@@ -1,7 +1,7 @@
# flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,

View File

@@ -8,7 +8,7 @@ from pydantic import Extra, Field, root_validator
from langchain.chains.llm import LLMChain
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import BasePromptTemplate
from langchain.schema.callbacks.manager import CallbackManagerForChainRun
from langchain.schema.chain import Chain

View File

@@ -1,6 +1,6 @@
# flake8: noqa
from langchain.output_parsers.list import CommaSeparatedListOutputParser
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
PROMPT_SUFFIX = """Only use the following tables:

View File

View File

@@ -0,0 +1,38 @@
"""Utilities for formatting strings."""
from string import Formatter
from typing import Any, List, Mapping, Sequence, Union
class StrictFormatter(Formatter):
"""A subclass of formatter that checks for extra keys."""
def check_unused_args(
self,
used_args: Sequence[Union[int, str]],
args: Sequence,
kwargs: Mapping[str, Any],
) -> None:
"""Check to see if extra parameters are passed."""
extra = set(kwargs).difference(used_args)
if extra:
raise KeyError(extra)
def vformat(
self, format_string: str, args: Sequence, kwargs: Mapping[str, Any]
) -> str:
"""Check that no arguments are provided."""
if len(args) > 0:
raise ValueError(
"No arguments should be provided, "
"everything should be passed as keyword arguments."
)
return super().vformat(format_string, args, kwargs)
def validate_input_variables(
self, format_string: str, input_variables: List[str]
) -> None:
dummy_inputs = {input_variable: "foo" for input_variable in input_variables}
super().format(format_string, **dummy_inputs)
formatter = StrictFormatter()

42
langchain/core/input.py Normal file
View File

@@ -0,0 +1,42 @@
"""Handle chained inputs."""
from typing import Dict, List, Optional, TextIO
_TEXT_COLOR_MAPPING = {
"blue": "36;1",
"yellow": "33;1",
"pink": "38;5;200",
"green": "32;1",
"red": "31;1",
}
def get_color_mapping(
items: List[str], excluded_colors: Optional[List] = None
) -> Dict[str, str]:
"""Get mapping for items to a support color."""
colors = list(_TEXT_COLOR_MAPPING.keys())
if excluded_colors is not None:
colors = [c for c in colors if c not in excluded_colors]
color_mapping = {item: colors[i % len(colors)] for i, item in enumerate(items)}
return color_mapping
def get_colored_text(text: str, color: str) -> str:
"""Get colored text."""
color_str = _TEXT_COLOR_MAPPING[color]
return f"\u001b[{color_str}m\033[1;3m{text}\u001b[0m"
def get_bolded_text(text: str) -> str:
"""Get bolded text."""
return f"\033[1m{text}\033[0m"
def print_text(
text: str, color: Optional[str] = None, end: str = "", file: Optional[TextIO] = None
) -> None:
"""Print text with highlighting and no end characters."""
text_to_print = get_colored_text(text, color) if color else text
print(text_to_print, end=end, file=file)
if file:
file.flush() # ensure all printed content are written to file

View File

View File

@@ -5,7 +5,7 @@ import warnings
from abc import ABC
from typing import Any, Callable, Dict, List, Set
from langchain.formatting import formatter
from langchain.core.formatting import formatter
from langchain.schema import BasePromptTemplate
from langchain.schema.messages import BaseMessage, HumanMessage
from langchain.schema.prompt import PromptValue

View File

@@ -8,8 +8,8 @@ from typing import Any, Callable, List, Sequence, Tuple, Type, TypeVar, Union
from pydantic import Field, root_validator
from langchain.load.serializable import Serializable
from langchain.prompts.base import StringPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.base import StringPromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import (
BasePromptTemplate,
PromptValue,

View File

@@ -7,7 +7,7 @@ from typing import Any, Dict, List, Union
from pydantic import root_validator
from langchain.prompts.base import (
from langchain.core.prompts.base import (
DEFAULT_FORMATTER_MAPPING,
StringPromptTemplate,
_get_jinja2_variables_from_template,

View File

@@ -2,7 +2,7 @@
# flake8: noqa
from langchain.schema.messages import HumanMessage, AIMessage, SystemMessage
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)

View File

@@ -8,7 +8,7 @@ from pydantic import Extra, Field
from langchain.chains.llm import LLMChain
from langchain.evaluation.comparison.prompt import PROMPT, PROMPT_WITH_REFERENCE
from langchain.evaluation.schema import LLMEvalChain, PairwiseStringEvaluator
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import BaseOutputParser
from langchain.schema.callbacks.manager import Callbacks
from langchain.schema.language_model import BaseLanguageModel

View File

@@ -21,7 +21,7 @@ from langchain.evaluation.run_evaluators.base import (
RunEvaluatorInputMapper,
RunEvaluatorOutputParser,
)
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.base import BaseTool

View File

@@ -3,7 +3,7 @@ from typing import List
from langchain.chains.llm import LLMChain
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."

View File

@@ -4,7 +4,7 @@ from typing import Any, Callable, List
from pydantic import BaseModel
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
BaseChatPromptTemplate,
)
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage

View File

@@ -1,38 +1 @@
"""Utilities for formatting strings."""
from string import Formatter
from typing import Any, List, Mapping, Sequence, Union
class StrictFormatter(Formatter):
"""A subclass of formatter that checks for extra keys."""
def check_unused_args(
self,
used_args: Sequence[Union[int, str]],
args: Sequence,
kwargs: Mapping[str, Any],
) -> None:
"""Check to see if extra parameters are passed."""
extra = set(kwargs).difference(used_args)
if extra:
raise KeyError(extra)
def vformat(
self, format_string: str, args: Sequence, kwargs: Mapping[str, Any]
) -> str:
"""Check that no arguments are provided."""
if len(args) > 0:
raise ValueError(
"No arguments should be provided, "
"everything should be passed as keyword arguments."
)
return super().vformat(format_string, args, kwargs)
def validate_input_variables(
self, format_string: str, input_variables: List[str]
) -> None:
dummy_inputs = {input_variable: "foo" for input_variable in input_variables}
super().format(format_string, **dummy_inputs)
formatter = StrictFormatter()

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_SUMMARIZATION_TEMPLATE = """You are an AI assistant helping a human keep track of facts about relevant people, places, and concepts in their life. Update the summary of the provided entity in the "Entity" section based on the last line of your conversation with the human. If you are writing the summary for the first time, return a single sentence.
The update should only include facts that are relayed in the last line of conversation about the provided entity, and should only contain facts about the provided entity.

View File

@@ -1,7 +1,7 @@
# flake8: noqa
from langchain.graphs.networkx_graph import KG_TRIPLE_DELIMITER
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = (
"You are a networked intelligence helping a human track knowledge triples"

View File

@@ -1,42 +1,9 @@
"""Handle chained inputs."""
from typing import Dict, List, Optional, TextIO
"""Purely for backwards compatibility."""
from langchain.core.input import get_colored_text, get_bolded_text, get_color_mapping, print_text
_TEXT_COLOR_MAPPING = {
"blue": "36;1",
"yellow": "33;1",
"pink": "38;5;200",
"green": "32;1",
"red": "31;1",
}
def get_color_mapping(
items: List[str], excluded_colors: Optional[List] = None
) -> Dict[str, str]:
"""Get mapping for items to a support color."""
colors = list(_TEXT_COLOR_MAPPING.keys())
if excluded_colors is not None:
colors = [c for c in colors if c not in excluded_colors]
color_mapping = {item: colors[i % len(colors)] for i, item in enumerate(items)}
return color_mapping
def get_colored_text(text: str, color: str) -> str:
"""Get colored text."""
color_str = _TEXT_COLOR_MAPPING[color]
return f"\u001b[{color_str}m\033[1;3m{text}\u001b[0m"
def get_bolded_text(text: str) -> str:
"""Get bolded text."""
return f"\033[1m{text}\033[0m"
def print_text(
text: str, color: Optional[str] = None, end: str = "", file: Optional[TextIO] = None
) -> None:
"""Print text with highlighting and no end characters."""
text_to_print = get_colored_text(text, color) if color else text
print(text_to_print, end=end, file=file)
if file:
file.flush() # ensure all printed content are written to file
__all__ = [
"get_bolded_text",
"get_color_mapping",
"get_colored_text",
"print_text"
]

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_MEMORY_CONVERSATION_TEMPLATE = """You are an assistant to a human, powered by a large language model trained by OpenAI.

View File

@@ -6,7 +6,7 @@ from typing import List, Optional, Sequence
from langchain.chains.llm import LLMChain
from langchain.input import get_color_mapping, print_text
from langchain.llms.base import BaseLLM
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema.chain import Chain

View File

@@ -1,5 +1,5 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
NAIVE_FIX = """Instructions:
--------------

View File

@@ -3,7 +3,7 @@ from __future__ import annotations
from typing import TypeVar
from langchain.chains.llm import LLMChain
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import (
BaseOutputParser,
BasePromptTemplate,

View File

@@ -1,6 +1,6 @@
"""Prompt template classes."""
from langchain.prompts.base import StringPromptTemplate
from langchain.prompts.chat import (
from langchain.core.prompts.base import StringPromptTemplate
from langchain.core.prompts.chat import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
ChatMessagePromptTemplate,
@@ -19,7 +19,7 @@ from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.few_shot_with_templates import FewShotPromptWithTemplates
from langchain.prompts.loading import load_prompt
from langchain.prompts.pipeline import PipelinePromptTemplate
from langchain.prompts.prompt import Prompt, PromptTemplate
from langchain.core.prompts.prompt import Prompt, PromptTemplate
from langchain.schema.prompt_template import BasePromptTemplate
__all__ = [

View File

@@ -5,7 +5,7 @@ from typing import Callable, Dict, List
from pydantic import BaseModel, validator
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
def _get_length_based(text: str) -> int:

View File

@@ -9,7 +9,7 @@ import numpy as np
from pydantic import BaseModel, root_validator
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
def ngram_overlap_score(source: List[str], example: List[str]) -> float:

View File

@@ -3,13 +3,13 @@ from typing import Any, Dict, List, Optional
from pydantic import Extra, root_validator
from langchain.prompts.base import (
from langchain.core.prompts.base import (
DEFAULT_FORMATTER_MAPPING,
StringPromptTemplate,
check_valid_template,
)
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
class FewShotPromptTemplate(StringPromptTemplate):

View File

@@ -3,9 +3,9 @@ from typing import Any, Dict, List, Optional
from pydantic import Extra, root_validator
from langchain.prompts.base import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate
from langchain.core.prompts.base import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
class FewShotPromptWithTemplates(StringPromptTemplate):

View File

@@ -9,7 +9,7 @@ import yaml
from langchain.output_parsers.regex import RegexParser
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import BaseLLMOutputParser, BasePromptTemplate, NoOpOutputParser
from langchain.utilities.loading import try_load_from_hub

View File

@@ -2,7 +2,7 @@ from typing import Any, Dict, List, Tuple
from pydantic import root_validator
from langchain.prompts.chat import BaseChatPromptTemplate
from langchain.core.prompts.chat import BaseChatPromptTemplate
from langchain.schema import BasePromptTemplate, PromptValue

View File

@@ -6,7 +6,7 @@ from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.llms.base import BaseLLM
from langchain.output_parsers.pydantic import PydanticOutputParser
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import BaseRetriever, Document
from langchain.schema.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,

View File

@@ -6,7 +6,7 @@ from langchain.prompts.example_selector.ngram_overlap import (
NGramOverlapExampleSelector,
ngram_overlap_score,
)
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
EXAMPLES = [
{"input": "See Spot run.", "output": "foo1"},

View File

@@ -7,7 +7,7 @@ from langchain.agents.tools import Tool
from langchain.docstore.base import Docstore
from langchain.docstore.document import Document
from langchain.llms.fake import FakeListLLM
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import AgentAction
_PAGE_CONTENT = """This is a page about LangChain.

View File

@@ -5,7 +5,7 @@ from langchain.chains.conversation.base import ConversationChain
from langchain.memory.buffer import ConversationBufferMemory
from langchain.memory.buffer_window import ConversationBufferWindowMemory
from langchain.memory.summary import ConversationSummaryMemory
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import BaseMemory
from tests.unit_tests.llms.fake_llm import FakeLLM

View File

@@ -7,7 +7,7 @@ import pytest
from langchain.chains.llm import LLMChain
from langchain.chains.loading import load_chain
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
from langchain.schema import BaseOutputParser
from tests.unit_tests.llms.fake_llm import FakeLLM

View File

@@ -10,8 +10,8 @@ from langchain.chat_models.openai import ChatOpenAI
from langchain.llms.openai import OpenAI
from langchain.load.dump import dumps
from langchain.load.serializable import Serializable
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
class Person(Serializable):

View File

@@ -6,7 +6,7 @@ from langchain.chains.llm import LLMChain
from langchain.llms.openai import OpenAI
from langchain.load.dump import dumps
from langchain.load.load import loads
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
class NotSerializable:

View File

@@ -4,7 +4,7 @@ from typing import List
import pytest
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
from langchain.core.prompts.chat import (
AIMessagePromptTemplate,
BaseMessagePromptTemplate,
ChatMessagePromptTemplate,

View File

@@ -4,7 +4,7 @@ from typing import Dict, List, Tuple
import pytest
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
EXAMPLE_PROMPT = PromptTemplate(
input_variables=["question", "answer"], template="{question}: {answer}"

View File

@@ -1,7 +1,7 @@
"""Test few shot prompt template."""
from langchain.prompts.few_shot_with_templates import FewShotPromptWithTemplates
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
EXAMPLE_PROMPT = PromptTemplate(
input_variables=["question", "answer"], template="{question}: {answer}"

View File

@@ -2,7 +2,7 @@
import pytest
from langchain.prompts.example_selector.length_based import LengthBasedExampleSelector
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
EXAMPLES = [
{"question": "Question: who are you?\nAnswer: foo"},

View File

@@ -7,7 +7,7 @@ from typing import Iterator
from langchain.output_parsers import RegexParser
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.loading import load_prompt
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
EXAMPLE_DIR = Path("tests/unit_tests/examples").absolute()

View File

@@ -1,6 +1,6 @@
from langchain.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
from langchain.core.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
from langchain.prompts.pipeline import PipelinePromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
def test_get_input_variables() -> None:

View File

@@ -1,7 +1,7 @@
"""Test functionality related to prompts."""
import pytest
from langchain.prompts.prompt import PromptTemplate
from langchain.core.prompts.prompt import PromptTemplate
def test_prompt_valid() -> None: