Harrison/official pre release (#8106)

This commit is contained in:
Harrison Chase
2023-07-21 18:44:32 -07:00
committed by GitHub
parent 95bcf68802
commit aa0e69bc98
65 changed files with 210 additions and 602 deletions

View File

@@ -1,19 +0,0 @@
from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT
from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
from langchain.experimental.generative_agents.generative_agent import GenerativeAgent
from langchain.experimental.generative_agents.memory import GenerativeAgentMemory
from langchain.experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
__all__ = [
"BabyAGI",
"AutoGPT",
"GenerativeAgent",
"GenerativeAgentMemory",
"PlanAndExecute",
"load_agent_executor",
"load_chat_planner",
]

View File

@@ -1,4 +0,0 @@
from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT
from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
__all__ = ["BabyAGI", "AutoGPT"]

View File

@@ -1,6 +0,0 @@
"""Experimental LLM wrappers."""
from langchain.experimental.llms.jsonformer_decoder import JsonFormer
from langchain.experimental.llms.rellm_decoder import RELLM
__all__ = ["RELLM", "JsonFormer"]

View File

@@ -1,9 +0,0 @@
from langchain.experimental.plan_and_execute.agent_executor import PlanAndExecute
from langchain.experimental.plan_and_execute.executors.agent_executor import (
load_agent_executor,
)
from langchain.experimental.plan_and_execute.planners.chat_planner import (
load_chat_planner,
)
__all__ = ["PlanAndExecute", "load_agent_executor", "load_chat_planner"]

View File

@@ -1,3 +0,0 @@
from langchain.experimental.prompts.load import load_prompt
__all__ = ["load_prompt"]

View File

@@ -0,0 +1,19 @@
from langchain_experimental.autonomous_agents.autogpt.agent import AutoGPT
from langchain_experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
from langchain_experimental.generative_agents.generative_agent import GenerativeAgent
from langchain_experimental.generative_agents.memory import GenerativeAgentMemory
from langchain_experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
__all__ = [
"BabyAGI",
"AutoGPT",
"GenerativeAgent",
"GenerativeAgentMemory",
"PlanAndExecute",
"load_agent_executor",
"load_chat_planner",
]

View File

@@ -0,0 +1,4 @@
from langchain_experimental.autonomous_agents.autogpt.agent import AutoGPT
from langchain_experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
__all__ = ["BabyAGI", "AutoGPT"]

View File

@@ -2,18 +2,8 @@ from __future__ import annotations
from typing import List, Optional
from pydantic import ValidationError
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGPTOutputParser,
)
from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import (
FINISH_NAME,
)
from langchain.memory import ChatMessageHistory
from langchain.schema import (
BaseChatMessageHistory,
@@ -23,6 +13,16 @@ from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import ValidationError
from langchain_experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGPTOutputParser,
)
from langchain_experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import (
FINISH_NAME,
)
class AutoGPT:

View File

@@ -1,9 +1,8 @@
from typing import Any, Dict, List
from pydantic import Field
from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import Field
class AutoGPTMemory(BaseChatMemory):

View File

@@ -1,15 +1,15 @@
import time
from typing import Any, Callable, List
from pydantic import BaseModel
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
from langchain.prompts.chat import (
BaseChatPromptTemplate,
)
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
from langchain.tools.base import BaseTool
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import BaseModel
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):

View File

@@ -1,11 +1,11 @@
from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
from langchain.experimental.autonomous_agents.baby_agi.task_creation import (
from langchain_experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
from langchain_experimental.autonomous_agents.baby_agi.task_creation import (
TaskCreationChain,
)
from langchain.experimental.autonomous_agents.baby_agi.task_execution import (
from langchain_experimental.autonomous_agents.baby_agi.task_execution import (
TaskExecutionChain,
)
from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import (
from langchain_experimental.autonomous_agents.baby_agi.task_prioritization import (
TaskPrioritizationChain,
)

View File

@@ -2,21 +2,21 @@
from collections import deque
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.experimental.autonomous_agents.baby_agi.task_creation import (
TaskCreationChain,
)
from langchain.experimental.autonomous_agents.baby_agi.task_execution import (
TaskExecutionChain,
)
from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import (
TaskPrioritizationChain,
)
from langchain.schema.language_model import BaseLanguageModel
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Field
from langchain_experimental.autonomous_agents.baby_agi.task_creation import (
TaskCreationChain,
)
from langchain_experimental.autonomous_agents.baby_agi.task_execution import (
TaskExecutionChain,
)
from langchain_experimental.autonomous_agents.baby_agi.task_prioritization import (
TaskPrioritizationChain,
)
class BabyAGI(Chain, BaseModel):

View File

@@ -7,33 +7,33 @@ import json
from typing import Any, ClassVar, Dict, List, Optional, Type
import pydantic
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.experimental.cpal.constants import Constant
from langchain.experimental.cpal.models import (
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts.prompt import PromptTemplate
from langchain_experimental.cpal.constants import Constant
from langchain_experimental.cpal.models import (
CausalModel,
InterventionModel,
NarrativeModel,
QueryModel,
StoryModel,
)
from langchain.experimental.cpal.templates.univariate.causal import (
from langchain_experimental.cpal.templates.univariate.causal import (
template as causal_template,
)
from langchain.experimental.cpal.templates.univariate.intervention import (
from langchain_experimental.cpal.templates.univariate.intervention import (
template as intervention_template,
)
from langchain.experimental.cpal.templates.univariate.narrative import (
from langchain_experimental.cpal.templates.univariate.narrative import (
template as narrative_template,
)
from langchain.experimental.cpal.templates.univariate.query import (
from langchain_experimental.cpal.templates.univariate.query import (
template as query_template,
)
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts.prompt import PromptTemplate
class _BaseStoryElementChain(Chain):

View File

@@ -5,10 +5,10 @@ from typing import Any, Optional, Union
import duckdb
import pandas as pd
from langchain.graphs.networkx_graph import NetworkxEntityGraph
from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator
from langchain.experimental.cpal.constants import Constant
from langchain.graphs.networkx_graph import NetworkxEntityGraph
from langchain_experimental.cpal.constants import Constant
class NarrativeModel(BaseModel):

View File

@@ -1,5 +1,5 @@
"""Generative Agents primitives."""
from langchain.experimental.generative_agents.generative_agent import GenerativeAgent
from langchain.experimental.generative_agents.memory import GenerativeAgentMemory
from langchain_experimental.generative_agents.generative_agent import GenerativeAgent
from langchain_experimental.generative_agents.memory import GenerativeAgentMemory
__all__ = ["GenerativeAgent", "GenerativeAgentMemory"]

View File

@@ -2,12 +2,12 @@ import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
from pydantic import BaseModel, Field
from langchain.chains import LLMChain
from langchain.experimental.generative_agents.memory import GenerativeAgentMemory
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from pydantic import BaseModel, Field
from langchain_experimental.generative_agents.memory import GenerativeAgentMemory
class GenerativeAgent(BaseModel):

View File

@@ -0,0 +1,6 @@
"""Experimental LLM wrappers."""
from langchain_experimental.llms.jsonformer_decoder import JsonFormer
from langchain_experimental.llms.rellm_decoder import RELLM
__all__ = ["RELLM", "JsonFormer"]

View File

@@ -4,10 +4,9 @@ from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, List, Optional, cast
from pydantic import Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from pydantic import Field, root_validator
if TYPE_CHECKING:
import jsonformer

View File

@@ -3,11 +3,10 @@ from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional, cast
from pydantic import Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain.llms.utils import enforce_stop_tokens
from pydantic import Field, root_validator
if TYPE_CHECKING:
import rellm

View File

@@ -5,6 +5,6 @@ As in https://arxiv.org/pdf/2211.10435.pdf.
This is vulnerable to arbitrary code execution:
https://github.com/hwchase17/langchain/issues/5872
"""
from langchain.experimental.pal_chain.base import PALChain
from langchain_experimental.pal_chain.base import PALChain
__all__ = ["PALChain"]

View File

@@ -11,16 +11,16 @@ import ast
import warnings
from typing import Any, Dict, List, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.pal.colored_object_prompt import COLORED_OBJECT_PROMPT
from langchain.chains.pal.math_prompt import MATH_PROMPT
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.utilities import PythonREPL
from pydantic import Extra, Field, root_validator
from langchain_experimental.pal_chain.colored_object_prompt import COLORED_OBJECT_PROMPT
from langchain_experimental.pal_chain.math_prompt import MATH_PROMPT
COMMAND_EXECUTION_FUNCTIONS = ["system", "exec", "execfile", "eval"]

View File

@@ -0,0 +1,9 @@
from langchain_experimental.plan_and_execute.agent_executor import PlanAndExecute
from langchain_experimental.plan_and_execute.executors.agent_executor import (
load_agent_executor,
)
from langchain_experimental.plan_and_execute.planners.chat_planner import (
load_chat_planner,
)
__all__ = ["PlanAndExecute", "load_agent_executor", "load_chat_planner"]

View File

@@ -1,12 +1,12 @@
from typing import Any, Dict, List, Optional
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.experimental.plan_and_execute.executors.base import BaseExecutor
from langchain.experimental.plan_and_execute.planners.base import BasePlanner
from langchain.experimental.plan_and_execute.schema import (
from pydantic import Field
from langchain_experimental.plan_and_execute.executors.base import BaseExecutor
from langchain_experimental.plan_and_execute.planners.base import BasePlanner
from langchain_experimental.plan_and_execute.schema import (
BaseStepContainer,
ListStepContainer,
)

View File

@@ -2,10 +2,11 @@ from typing import List
from langchain.agents.agent import AgentExecutor
from langchain.agents.structured_chat.base import StructuredChatAgent
from langchain.experimental.plan_and_execute.executors.base import ChainExecutor
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools import BaseTool
from langchain_experimental.plan_and_execute.executors.base import ChainExecutor
HUMAN_MESSAGE_TEMPLATE = """Previous steps: {previous_steps}
Current objective: {current_step}

View File

@@ -1,11 +1,11 @@
from abc import abstractmethod
from typing import Any
from pydantic import BaseModel
from langchain.callbacks.manager import Callbacks
from langchain.chains.base import Chain
from langchain.experimental.plan_and_execute.schema import StepResponse
from pydantic import BaseModel
from langchain_experimental.plan_and_execute.schema import StepResponse
class BaseExecutor(BaseModel):

View File

@@ -1,11 +1,11 @@
from abc import abstractmethod
from typing import Any, List, Optional
from pydantic import BaseModel
from langchain.callbacks.manager import Callbacks
from langchain.chains.llm import LLMChain
from langchain.experimental.plan_and_execute.schema import Plan, PlanOutputParser
from pydantic import BaseModel
from langchain_experimental.plan_and_execute.schema import Plan, PlanOutputParser
class BasePlanner(BaseModel):

View File

@@ -1,15 +1,16 @@
import re
from langchain.chains import LLMChain
from langchain.experimental.plan_and_execute.planners.base import LLMPlanner
from langchain.experimental.plan_and_execute.schema import (
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import SystemMessage
from langchain_experimental.plan_and_execute.planners.base import LLMPlanner
from langchain_experimental.plan_and_execute.schema import (
Plan,
PlanOutputParser,
Step,
)
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import SystemMessage
SYSTEM_PROMPT = (
"Let's first understand the problem and devise a plan to solve the problem."

View File

@@ -1,9 +1,8 @@
from abc import abstractmethod
from typing import List, Tuple
from pydantic import BaseModel, Field
from langchain.schema import BaseOutputParser
from pydantic import BaseModel, Field
class Step(BaseModel):

View File

@@ -0,0 +1,3 @@
from langchain_experimental.prompts.load import load_prompt
__all__ = ["load_prompt"]

View File

@@ -5,7 +5,6 @@ from pathlib import Path
from typing import Union
import yaml
from langchain.prompts.loading import load_prompt_from_config, try_load_from_hub
from langchain.schema.prompts import BasePromptTemplate

View File

@@ -1,4 +1,4 @@
"""Chain for interacting with SQL Database."""
from langchain.experimental.sql.base import SQLDatabaseChain
from langchain_experimental.sql.base import SQLDatabaseChain
__all__ = ["SQLDatabaseChain"]

View File

@@ -4,17 +4,17 @@ from __future__ import annotations
import warnings
from typing import Any, Dict, List, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.sql_database.prompt import QUERY_CHECKER
from langchain.utilities.sql_database import SQLDatabase
from pydantic import Extra, Field, root_validator
from langchain_experimental.sql.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
@@ -25,7 +25,7 @@ class SQLDatabaseChain(Chain):
Example:
.. code-block:: python
from langchain.experimental.sql import SQLDatabaseChain
from langchain_experimental.sql import SQLDatabaseChain
from langchain import OpenAI, SQLDatabase
db = SQLDatabase(...)
db_chain = SQLDatabaseChain.from_llm(OpenAI(), db)

View File

@@ -1,14 +1,11 @@
[tool.poetry]
name = "langchain-experimental"
version = "0.0.1rc0"
version = "0.0.1rc4"
description = "Building applications with LLMs through composability"
authors = []
license = "MIT"
readme = "README.md"
repository = "https://www.github.com/hwchase17/langchain"
packages = [
{include = "langchain"}
]
[tool.poetry.dependencies]

View File

@@ -1,8 +1,17 @@
"""GitHub Toolkit."""
from typing import List
from typing import Dict, List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.github.prompt import (
COMMENT_ON_ISSUE_PROMPT,
CREATE_FILE_PROMPT,
DELETE_FILE_PROMPT,
GET_ISSUE_PROMPT,
GET_ISSUES_PROMPT,
READ_FILE_PROMPT,
UPDATE_FILE_PROMPT,
)
from langchain.tools.github.tool import GitHubAction
from langchain.utilities.github import GitHubAPIWrapper
@@ -16,7 +25,43 @@ class GitHubToolkit(BaseToolkit):
def from_github_api_wrapper(
cls, github_api_wrapper: GitHubAPIWrapper
) -> "GitHubToolkit":
actions = github_api_wrapper.list()
operations: List[Dict] = [
{
"mode": "get_issues",
"name": "Get Issues",
"description": GET_ISSUES_PROMPT,
},
{
"mode": "get_issue",
"name": "Get Issue",
"description": GET_ISSUE_PROMPT,
},
{
"mode": "comment_on_issue",
"name": "Comment on Issue",
"description": COMMENT_ON_ISSUE_PROMPT,
},
{
"mode": "create_file",
"name": "Create File",
"description": CREATE_FILE_PROMPT,
},
{
"mode": "read_file",
"name": "Read File",
"description": READ_FILE_PROMPT,
},
{
"mode": "update_file",
"name": "Update File",
"description": UPDATE_FILE_PROMPT,
},
{
"mode": "delete_file",
"name": "Delete File",
"description": DELETE_FILE_PROMPT,
},
]
tools = [
GitHubAction(
name=action["name"],
@@ -24,7 +69,7 @@ class GitHubToolkit(BaseToolkit):
mode=action["mode"],
api_wrapper=github_api_wrapper,
)
for action in actions
for action in operations
]
return cls(tools=tools)

View File

@@ -1,7 +1,14 @@
from typing import List
from typing import Dict, List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.jira.prompt import (
JIRA_CATCH_ALL_PROMPT,
JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
JIRA_GET_ALL_PROJECTS_PROMPT,
JIRA_ISSUE_CREATE_PROMPT,
JIRA_JQL_PROMPT,
)
from langchain.tools.jira.tool import JiraAction
from langchain.utilities.jira import JiraAPIWrapper
@@ -13,7 +20,33 @@ class JiraToolkit(BaseToolkit):
@classmethod
def from_jira_api_wrapper(cls, jira_api_wrapper: JiraAPIWrapper) -> "JiraToolkit":
actions = jira_api_wrapper.list()
operations: List[Dict] = [
{
"mode": "jql",
"name": "JQL Query",
"description": JIRA_JQL_PROMPT,
},
{
"mode": "get_projects",
"name": "Get Projects",
"description": JIRA_GET_ALL_PROJECTS_PROMPT,
},
{
"mode": "create_issue",
"name": "Create Issue",
"description": JIRA_ISSUE_CREATE_PROMPT,
},
{
"mode": "other",
"name": "Catch all Jira API call",
"description": JIRA_CATCH_ALL_PROMPT,
},
{
"mode": "create_page",
"name": "Create confluence page",
"description": JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
},
]
tools = [
JiraAction(
name=action["name"],
@@ -21,7 +54,7 @@ class JiraToolkit(BaseToolkit):
mode=action["mode"],
api_wrapper=jira_api_wrapper,
)
for action in actions
for action in operations
]
return cls(tools=tools)

View File

@@ -1,6 +1,5 @@
"""General utilities."""
from langchain.requests import TextRequestsWrapper
from langchain.utilities.apify import ApifyWrapper
from langchain.utilities.arxiv import ArxivAPIWrapper
from langchain.utilities.awslambda import LambdaWrapper
from langchain.utilities.bash import BashProcess
@@ -32,7 +31,6 @@ from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from langchain.utilities.zapier import ZapierNLAWrapper
__all__ = [
"ApifyWrapper",
"ArxivAPIWrapper",
"GoldenQueryAPIWrapper",
"BashProcess",

View File

@@ -1,205 +0,0 @@
from typing import Any, Callable, Dict, Optional
from pydantic import BaseModel, root_validator
from langchain.document_loaders import ApifyDatasetLoader
from langchain.document_loaders.base import Document
from langchain.utils import get_from_dict_or_env
class ApifyWrapper(BaseModel):
"""Wrapper around Apify.
To use, you should have the ``apify-client`` python package installed,
and the environment variable ``APIFY_API_TOKEN`` set with your API key, or pass
`apify_api_token` as a named parameter to the constructor.
"""
apify_client: Any
apify_client_async: Any
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate environment.
Validate that an Apify API token is set and the apify-client
Python package exists in the current environment.
"""
apify_api_token = get_from_dict_or_env(
values, "apify_api_token", "APIFY_API_TOKEN"
)
try:
from apify_client import ApifyClient, ApifyClientAsync
values["apify_client"] = ApifyClient(apify_api_token)
values["apify_client_async"] = ApifyClientAsync(apify_api_token)
except ImportError:
raise ValueError(
"Could not import apify-client Python package. "
"Please install it with `pip install apify-client`."
)
return values
def call_actor(
self,
actor_id: str,
run_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> ApifyDatasetLoader:
"""Run an Actor on the Apify platform and wait for results to be ready.
Args:
actor_id (str): The ID or name of the Actor on the Apify platform.
run_input (Dict): The input object of the Actor that you're trying to run.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
Actor run's default dataset.
"""
actor_call = self.apify_client.actor(actor_id).call(
run_input=run_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=actor_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
async def acall_actor(
self,
actor_id: str,
run_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> ApifyDatasetLoader:
"""Run an Actor on the Apify platform and wait for results to be ready.
Args:
actor_id (str): The ID or name of the Actor on the Apify platform.
run_input (Dict): The input object of the Actor that you're trying to run.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to
an instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
Actor run's default dataset.
"""
actor_call = await self.apify_client_async.actor(actor_id).call(
run_input=run_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=actor_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
def call_actor_task(
self,
task_id: str,
task_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> ApifyDatasetLoader:
"""Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
"""
task_call = self.apify_client.task(task_id).call(
task_input=task_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=task_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
async def acall_actor_task(
self,
task_id: str,
task_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> ApifyDatasetLoader:
"""Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
"""
task_call = await self.apify_client_async.task(task_id).call(
task_input=task_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=task_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)

View File

@@ -4,15 +4,6 @@ from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.tools.github.prompt import (
COMMENT_ON_ISSUE_PROMPT,
CREATE_FILE_PROMPT,
DELETE_FILE_PROMPT,
GET_ISSUE_PROMPT,
GET_ISSUES_PROMPT,
READ_FILE_PROMPT,
UPDATE_FILE_PROMPT,
)
from langchain.utils import get_from_dict_or_env
@@ -26,52 +17,11 @@ class GitHubAPIWrapper(BaseModel):
github_app_private_key: Optional[str] = None
github_branch: Optional[str] = None
operations: List[Dict] = [
{
"mode": "get_issues",
"name": "Get Issues",
"description": GET_ISSUES_PROMPT,
},
{
"mode": "get_issue",
"name": "Get Issue",
"description": GET_ISSUE_PROMPT,
},
{
"mode": "comment_on_issue",
"name": "Comment on Issue",
"description": COMMENT_ON_ISSUE_PROMPT,
},
{
"mode": "create_file",
"name": "Create File",
"description": CREATE_FILE_PROMPT,
},
{
"mode": "read_file",
"name": "Read File",
"description": READ_FILE_PROMPT,
},
{
"mode": "update_file",
"name": "Update File",
"description": UPDATE_FILE_PROMPT,
},
{
"mode": "delete_file",
"name": "Delete File",
"description": DELETE_FILE_PROMPT,
},
]
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def list(self) -> List[Dict]:
return self.operations
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""

View File

@@ -3,13 +3,6 @@ from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.tools.jira.prompt import (
JIRA_CATCH_ALL_PROMPT,
JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
JIRA_GET_ALL_PROJECTS_PROMPT,
JIRA_ISSUE_CREATE_PROMPT,
JIRA_JQL_PROMPT,
)
from langchain.utils import get_from_dict_or_env
@@ -23,42 +16,11 @@ class JiraAPIWrapper(BaseModel):
jira_api_token: Optional[str] = None
jira_instance_url: Optional[str] = None
operations: List[Dict] = [
{
"mode": "jql",
"name": "JQL Query",
"description": JIRA_JQL_PROMPT,
},
{
"mode": "get_projects",
"name": "Get Projects",
"description": JIRA_GET_ALL_PROJECTS_PROMPT,
},
{
"mode": "create_issue",
"name": "Create Issue",
"description": JIRA_ISSUE_CREATE_PROMPT,
},
{
"mode": "other",
"name": "Catch all Jira API call",
"description": JIRA_CATCH_ALL_PROMPT,
},
{
"mode": "create_page",
"name": "Create confluence page",
"description": JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
},
]
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def list(self) -> List[Dict]:
return self.operations
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""

View File

@@ -1,9 +1,8 @@
"""Util that calls OpenWeatherMap using PyOWM."""
from typing import Any, Dict, Optional
from pydantic import Extra, root_validator
from pydantic import BaseModel, Extra, root_validator
from langchain.tools.base import BaseModel
from langchain.utils import get_from_dict_or_env

View File

@@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain"
version = "0.0.240rc0"
version = "0.0.240rc4"
description = "Building applications with LLMs through composability"
authors = []
license = "MIT"