Harrison/official pre release (#8106)

This commit is contained in:
Harrison Chase
2023-07-21 18:44:32 -07:00
committed by GitHub
parent 95bcf68802
commit aa0e69bc98
65 changed files with 210 additions and 602 deletions

View File

@@ -1,19 +0,0 @@
from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT
from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
from langchain.experimental.generative_agents.generative_agent import GenerativeAgent
from langchain.experimental.generative_agents.memory import GenerativeAgentMemory
from langchain.experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
__all__ = [
"BabyAGI",
"AutoGPT",
"GenerativeAgent",
"GenerativeAgentMemory",
"PlanAndExecute",
"load_agent_executor",
"load_chat_planner",
]

View File

@@ -1,4 +0,0 @@
from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT
from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
__all__ = ["BabyAGI", "AutoGPT"]

View File

@@ -1,6 +0,0 @@
"""Experimental LLM wrappers."""
from langchain.experimental.llms.jsonformer_decoder import JsonFormer
from langchain.experimental.llms.rellm_decoder import RELLM
__all__ = ["RELLM", "JsonFormer"]

View File

@@ -1,9 +0,0 @@
from langchain.experimental.plan_and_execute.agent_executor import PlanAndExecute
from langchain.experimental.plan_and_execute.executors.agent_executor import (
load_agent_executor,
)
from langchain.experimental.plan_and_execute.planners.chat_planner import (
load_chat_planner,
)
__all__ = ["PlanAndExecute", "load_agent_executor", "load_chat_planner"]

View File

@@ -1,3 +0,0 @@
from langchain.experimental.prompts.load import load_prompt
__all__ = ["load_prompt"]

View File

@@ -0,0 +1,19 @@
from langchain_experimental.autonomous_agents.autogpt.agent import AutoGPT
from langchain_experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
from langchain_experimental.generative_agents.generative_agent import GenerativeAgent
from langchain_experimental.generative_agents.memory import GenerativeAgentMemory
from langchain_experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
__all__ = [
"BabyAGI",
"AutoGPT",
"GenerativeAgent",
"GenerativeAgentMemory",
"PlanAndExecute",
"load_agent_executor",
"load_chat_planner",
]

View File

@@ -0,0 +1,4 @@
from langchain_experimental.autonomous_agents.autogpt.agent import AutoGPT
from langchain_experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
__all__ = ["BabyAGI", "AutoGPT"]

View File

@@ -2,18 +2,8 @@ from __future__ import annotations
from typing import List, Optional
from pydantic import ValidationError
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGPTOutputParser,
)
from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import (
FINISH_NAME,
)
from langchain.memory import ChatMessageHistory
from langchain.schema import (
BaseChatMessageHistory,
@@ -23,6 +13,16 @@ from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import ValidationError
from langchain_experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGPTOutputParser,
)
from langchain_experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import (
FINISH_NAME,
)
class AutoGPT:

View File

@@ -1,9 +1,8 @@
from typing import Any, Dict, List
from pydantic import Field
from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import Field
class AutoGPTMemory(BaseChatMemory):

View File

@@ -1,15 +1,15 @@
import time
from typing import Any, Callable, List
from pydantic import BaseModel
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
from langchain.prompts.chat import (
BaseChatPromptTemplate,
)
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
from langchain.tools.base import BaseTool
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import BaseModel
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):

View File

@@ -1,11 +1,11 @@
from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
from langchain.experimental.autonomous_agents.baby_agi.task_creation import (
from langchain_experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
from langchain_experimental.autonomous_agents.baby_agi.task_creation import (
TaskCreationChain,
)
from langchain.experimental.autonomous_agents.baby_agi.task_execution import (
from langchain_experimental.autonomous_agents.baby_agi.task_execution import (
TaskExecutionChain,
)
from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import (
from langchain_experimental.autonomous_agents.baby_agi.task_prioritization import (
TaskPrioritizationChain,
)

View File

@@ -2,21 +2,21 @@
from collections import deque
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.experimental.autonomous_agents.baby_agi.task_creation import (
TaskCreationChain,
)
from langchain.experimental.autonomous_agents.baby_agi.task_execution import (
TaskExecutionChain,
)
from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import (
TaskPrioritizationChain,
)
from langchain.schema.language_model import BaseLanguageModel
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Field
from langchain_experimental.autonomous_agents.baby_agi.task_creation import (
TaskCreationChain,
)
from langchain_experimental.autonomous_agents.baby_agi.task_execution import (
TaskExecutionChain,
)
from langchain_experimental.autonomous_agents.baby_agi.task_prioritization import (
TaskPrioritizationChain,
)
class BabyAGI(Chain, BaseModel):

View File

@@ -7,33 +7,33 @@ import json
from typing import Any, ClassVar, Dict, List, Optional, Type
import pydantic
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.experimental.cpal.constants import Constant
from langchain.experimental.cpal.models import (
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts.prompt import PromptTemplate
from langchain_experimental.cpal.constants import Constant
from langchain_experimental.cpal.models import (
CausalModel,
InterventionModel,
NarrativeModel,
QueryModel,
StoryModel,
)
from langchain.experimental.cpal.templates.univariate.causal import (
from langchain_experimental.cpal.templates.univariate.causal import (
template as causal_template,
)
from langchain.experimental.cpal.templates.univariate.intervention import (
from langchain_experimental.cpal.templates.univariate.intervention import (
template as intervention_template,
)
from langchain.experimental.cpal.templates.univariate.narrative import (
from langchain_experimental.cpal.templates.univariate.narrative import (
template as narrative_template,
)
from langchain.experimental.cpal.templates.univariate.query import (
from langchain_experimental.cpal.templates.univariate.query import (
template as query_template,
)
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts.prompt import PromptTemplate
class _BaseStoryElementChain(Chain):

View File

@@ -5,10 +5,10 @@ from typing import Any, Optional, Union
import duckdb
import pandas as pd
from langchain.graphs.networkx_graph import NetworkxEntityGraph
from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator
from langchain.experimental.cpal.constants import Constant
from langchain.graphs.networkx_graph import NetworkxEntityGraph
from langchain_experimental.cpal.constants import Constant
class NarrativeModel(BaseModel):

View File

@@ -1,5 +1,5 @@
"""Generative Agents primitives."""
from langchain.experimental.generative_agents.generative_agent import GenerativeAgent
from langchain.experimental.generative_agents.memory import GenerativeAgentMemory
from langchain_experimental.generative_agents.generative_agent import GenerativeAgent
from langchain_experimental.generative_agents.memory import GenerativeAgentMemory
__all__ = ["GenerativeAgent", "GenerativeAgentMemory"]

View File

@@ -2,12 +2,12 @@ import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
from pydantic import BaseModel, Field
from langchain.chains import LLMChain
from langchain.experimental.generative_agents.memory import GenerativeAgentMemory
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from pydantic import BaseModel, Field
from langchain_experimental.generative_agents.memory import GenerativeAgentMemory
class GenerativeAgent(BaseModel):

View File

@@ -0,0 +1,6 @@
"""Experimental LLM wrappers."""
from langchain_experimental.llms.jsonformer_decoder import JsonFormer
from langchain_experimental.llms.rellm_decoder import RELLM
__all__ = ["RELLM", "JsonFormer"]

View File

@@ -4,10 +4,9 @@ from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, List, Optional, cast
from pydantic import Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from pydantic import Field, root_validator
if TYPE_CHECKING:
import jsonformer

View File

@@ -3,11 +3,10 @@ from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional, cast
from pydantic import Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain.llms.utils import enforce_stop_tokens
from pydantic import Field, root_validator
if TYPE_CHECKING:
import rellm

View File

@@ -5,6 +5,6 @@ As in https://arxiv.org/pdf/2211.10435.pdf.
This is vulnerable to arbitrary code execution:
https://github.com/hwchase17/langchain/issues/5872
"""
from langchain.experimental.pal_chain.base import PALChain
from langchain_experimental.pal_chain.base import PALChain
__all__ = ["PALChain"]

View File

@@ -11,16 +11,16 @@ import ast
import warnings
from typing import Any, Dict, List, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.pal.colored_object_prompt import COLORED_OBJECT_PROMPT
from langchain.chains.pal.math_prompt import MATH_PROMPT
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.utilities import PythonREPL
from pydantic import Extra, Field, root_validator
from langchain_experimental.pal_chain.colored_object_prompt import COLORED_OBJECT_PROMPT
from langchain_experimental.pal_chain.math_prompt import MATH_PROMPT
COMMAND_EXECUTION_FUNCTIONS = ["system", "exec", "execfile", "eval"]

View File

@@ -0,0 +1,9 @@
from langchain_experimental.plan_and_execute.agent_executor import PlanAndExecute
from langchain_experimental.plan_and_execute.executors.agent_executor import (
load_agent_executor,
)
from langchain_experimental.plan_and_execute.planners.chat_planner import (
load_chat_planner,
)
__all__ = ["PlanAndExecute", "load_agent_executor", "load_chat_planner"]

View File

@@ -1,12 +1,12 @@
from typing import Any, Dict, List, Optional
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.experimental.plan_and_execute.executors.base import BaseExecutor
from langchain.experimental.plan_and_execute.planners.base import BasePlanner
from langchain.experimental.plan_and_execute.schema import (
from pydantic import Field
from langchain_experimental.plan_and_execute.executors.base import BaseExecutor
from langchain_experimental.plan_and_execute.planners.base import BasePlanner
from langchain_experimental.plan_and_execute.schema import (
BaseStepContainer,
ListStepContainer,
)

View File

@@ -2,10 +2,11 @@ from typing import List
from langchain.agents.agent import AgentExecutor
from langchain.agents.structured_chat.base import StructuredChatAgent
from langchain.experimental.plan_and_execute.executors.base import ChainExecutor
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools import BaseTool
from langchain_experimental.plan_and_execute.executors.base import ChainExecutor
HUMAN_MESSAGE_TEMPLATE = """Previous steps: {previous_steps}
Current objective: {current_step}

View File

@@ -1,11 +1,11 @@
from abc import abstractmethod
from typing import Any
from pydantic import BaseModel
from langchain.callbacks.manager import Callbacks
from langchain.chains.base import Chain
from langchain.experimental.plan_and_execute.schema import StepResponse
from pydantic import BaseModel
from langchain_experimental.plan_and_execute.schema import StepResponse
class BaseExecutor(BaseModel):

View File

@@ -1,11 +1,11 @@
from abc import abstractmethod
from typing import Any, List, Optional
from pydantic import BaseModel
from langchain.callbacks.manager import Callbacks
from langchain.chains.llm import LLMChain
from langchain.experimental.plan_and_execute.schema import Plan, PlanOutputParser
from pydantic import BaseModel
from langchain_experimental.plan_and_execute.schema import Plan, PlanOutputParser
class BasePlanner(BaseModel):

View File

@@ -1,15 +1,16 @@
import re
from langchain.chains import LLMChain
from langchain.experimental.plan_and_execute.planners.base import LLMPlanner
from langchain.experimental.plan_and_execute.schema import (
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import SystemMessage
from langchain_experimental.plan_and_execute.planners.base import LLMPlanner
from langchain_experimental.plan_and_execute.schema import (
Plan,
PlanOutputParser,
Step,
)
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import SystemMessage
SYSTEM_PROMPT = (
"Let's first understand the problem and devise a plan to solve the problem."

View File

@@ -1,9 +1,8 @@
from abc import abstractmethod
from typing import List, Tuple
from pydantic import BaseModel, Field
from langchain.schema import BaseOutputParser
from pydantic import BaseModel, Field
class Step(BaseModel):

View File

@@ -0,0 +1,3 @@
from langchain_experimental.prompts.load import load_prompt
__all__ = ["load_prompt"]

View File

@@ -5,7 +5,6 @@ from pathlib import Path
from typing import Union
import yaml
from langchain.prompts.loading import load_prompt_from_config, try_load_from_hub
from langchain.schema.prompts import BasePromptTemplate

View File

@@ -1,4 +1,4 @@
"""Chain for interacting with SQL Database."""
from langchain.experimental.sql.base import SQLDatabaseChain
from langchain_experimental.sql.base import SQLDatabaseChain
__all__ = ["SQLDatabaseChain"]

View File

@@ -4,17 +4,17 @@ from __future__ import annotations
import warnings
from typing import Any, Dict, List, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.sql_database.prompt import QUERY_CHECKER
from langchain.utilities.sql_database import SQLDatabase
from pydantic import Extra, Field, root_validator
from langchain_experimental.sql.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
@@ -25,7 +25,7 @@ class SQLDatabaseChain(Chain):
Example:
.. code-block:: python
from langchain.experimental.sql import SQLDatabaseChain
from langchain_experimental.sql import SQLDatabaseChain
from langchain import OpenAI, SQLDatabase
db = SQLDatabase(...)
db_chain = SQLDatabaseChain.from_llm(OpenAI(), db)

View File

@@ -1,14 +1,11 @@
[tool.poetry]
name = "langchain-experimental"
version = "0.0.1rc0"
version = "0.0.1rc4"
description = "Building applications with LLMs through composability"
authors = []
license = "MIT"
readme = "README.md"
repository = "https://www.github.com/hwchase17/langchain"
packages = [
{include = "langchain"}
]
[tool.poetry.dependencies]