Harrison/move experimental (#8084)

This commit is contained in:
Harrison Chase 2023-07-21 10:36:28 -07:00 committed by GitHub
parent f35db9f43e
commit da04760de1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 3519 additions and 28 deletions

View File

@ -37,6 +37,10 @@ jobs:
- name: Install dependencies
run: |
poetry install
- name: Install langchain editable
if: ${{ inputs.working-directory != 'langchain' }}
run: |
pip install -e ../langchain
- name: Analysing the code with our lint
run: |
make lint

View File

@ -7,6 +7,10 @@ on:
required: true
type: string
description: "From which folder this pipeline executes"
test_type:
type: string
description: "Test types to run"
default: '["core", "extended"]'
env:
POETRY_VERSION: "1.4.2"
@ -24,9 +28,7 @@ jobs:
- "3.9"
- "3.10"
- "3.11"
test_type:
- "core"
- "extended"
test_type: ${{ fromJSON(inputs.test_type) }}
name: Python ${{ matrix.python-version }} ${{ matrix.test_type }}
steps:
- uses: actions/checkout@v3
@ -45,6 +47,10 @@ jobs:
echo "Running extended tests, installing dependencies with poetry..."
poetry install -E extended_testing
fi
- name: Install langchain editable
if: ${{ inputs.working-directory != 'langchain' }}
run: |
pip install -e ../langchain
- name: Run ${{matrix.test_type}} tests
run: |
if [ "${{ matrix.test_type }}" == "core" ]; then

View File

@ -0,0 +1,29 @@
---
name: libs/langchain-experimental CI
on:
push:
branches: [ master ]
pull_request:
paths:
- '.github/workflows/_lint.yml'
- '.github/workflows/_test.yml'
- '.github/workflows/langchain_experimental_ci.yml'
- 'libs/langchain/**'
- 'libs/experimental/**'
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
jobs:
lint:
uses:
./.github/workflows/_lint.yml
with:
working-directory: libs/experimental
secrets: inherit
test:
uses:
./.github/workflows/_test.yml
with:
working-directory: libs/experimental
test_type: '["core"]'
secrets: inherit

View File

@ -0,0 +1,54 @@
.PHONY: all format lint test tests test_watch integration_tests docker_tests help extended_tests
# Default target executed when no arguments are given to make.
all: help
# Define a variable for the test file path.
TEST_FILE ?= tests/unit_tests/
test:
poetry run pytest $(TEST_FILE)
tests:
poetry run pytest $(TEST_FILE)
test_watch:
poetry run ptw --now . -- tests/unit_tests
######################
# LINTING AND FORMATTING
######################
# Define a variable for Python and notebook files.
PYTHON_FILES=.
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
lint lint_diff:
poetry run mypy $(PYTHON_FILES)
poetry run black $(PYTHON_FILES) --check
poetry run ruff .
format format_diff:
poetry run black $(PYTHON_FILES)
poetry run ruff --select I --fix $(PYTHON_FILES)
spell_check:
poetry run codespell --toml pyproject.toml
spell_fix:
poetry run codespell --toml pyproject.toml -w
######################
# HELP
######################
help:
@echo '----'
@echo 'format - run code formatters'
@echo 'lint - run linters'
@echo 'test - run unit tests'
@echo 'tests - run unit tests'
@echo 'test TEST_FILE=<test_file> - run all tests in file'
@echo 'test_watch - run unit tests in watch mode'

View File

@ -0,0 +1,3 @@
# 🦜️🧪 LangChain Experimental
This repository holds more experimental LangChain code.

View File

@ -2,8 +2,6 @@ from __future__ import annotations
from typing import List, Optional
from pydantic import ValidationError
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
@ -23,6 +21,7 @@ from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import ValidationError
class AutoGPT:

View File

@ -1,9 +1,8 @@
from typing import Any, Dict, List
from pydantic import Field
from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import Field
class AutoGPTMemory(BaseChatMemory):

View File

@ -1,8 +1,6 @@
import time
from typing import Any, Callable, List
from pydantic import BaseModel
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
from langchain.prompts.chat import (
BaseChatPromptTemplate,
@ -10,6 +8,7 @@ from langchain.prompts.chat import (
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
from langchain.tools.base import BaseTool
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import BaseModel
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):

View File

@ -2,8 +2,6 @@
from collections import deque
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.experimental.autonomous_agents.baby_agi.task_creation import (
@ -17,6 +15,7 @@ from langchain.experimental.autonomous_agents.baby_agi.task_prioritization impor
)
from langchain.schema.language_model import BaseLanguageModel
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Field
class BabyAGI(Chain, BaseModel):

View File

@ -7,7 +7,6 @@ import json
from typing import Any, ClassVar, Dict, List, Optional, Type
import pydantic
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain

View File

@ -5,10 +5,9 @@ from typing import Any, Optional, Union
import duckdb
import pandas as pd
from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator
from langchain.experimental.cpal.constants import Constant
from langchain.graphs.networkx_graph import NetworkxEntityGraph
from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator
class NarrativeModel(BaseModel):

View File

@ -2,12 +2,11 @@ import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
from pydantic import BaseModel, Field
from langchain import LLMChain
from langchain.experimental.generative_agents.memory import GenerativeAgentMemory
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from pydantic import BaseModel, Field
class GenerativeAgent(BaseModel):

View File

@ -4,10 +4,9 @@ from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, List, Optional, cast
from pydantic import Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from pydantic import Field, root_validator
if TYPE_CHECKING:
import jsonformer

View File

@ -3,11 +3,10 @@ from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional, cast
from pydantic import Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain.llms.utils import enforce_stop_tokens
from pydantic import Field, root_validator
if TYPE_CHECKING:
import rellm

View File

@ -1,7 +1,5 @@
from typing import Any, Dict, List, Optional
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.experimental.plan_and_execute.executors.base import BaseExecutor
@ -10,6 +8,7 @@ from langchain.experimental.plan_and_execute.schema import (
BaseStepContainer,
ListStepContainer,
)
from pydantic import Field
class PlanAndExecute(Chain):

View File

@ -1,11 +1,10 @@
from abc import abstractmethod
from typing import Any
from pydantic import BaseModel
from langchain.callbacks.manager import Callbacks
from langchain.chains.base import Chain
from langchain.experimental.plan_and_execute.schema import StepResponse
from pydantic import BaseModel
class BaseExecutor(BaseModel):

View File

@ -1,11 +1,10 @@
from abc import abstractmethod
from typing import Any, List, Optional
from pydantic import BaseModel
from langchain.callbacks.manager import Callbacks
from langchain.chains.llm import LLMChain
from langchain.experimental.plan_and_execute.schema import Plan, PlanOutputParser
from pydantic import BaseModel
class BasePlanner(BaseModel):

View File

@ -1,9 +1,8 @@
from abc import abstractmethod
from typing import List, Tuple
from pydantic import BaseModel, Field
from langchain.schema import BaseOutputParser
from pydantic import BaseModel, Field
class Step(BaseModel):

3332
libs/experimental/poetry.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
[virtualenvs]
in-project = true
[installer]
modern-installation = false

View File

@ -0,0 +1,69 @@
[tool.poetry]
name = "langchain.experimental"
version = "0.0.1"
description = "Building applications with LLMs through composability"
authors = []
license = "MIT"
readme = "README.md"
repository = "https://www.github.com/hwchase17/langchain"
[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
langchain = "^0.0.235"
[tool.poetry.group.lint.dependencies]
ruff = "^0.0.249"
black = "^23.1.0"
[tool.poetry.group.typing.dependencies]
mypy = "^0.991"
[tool.poetry.group.dev.dependencies]
jupyter = "^1.0.0"
setuptools = "^67.6.1"
[tool.poetry.group.test.dependencies]
# The only dependencies that should be added are
# dependencies used for running tests (e.g., pytest, freezegun, response).
# Any dependencies that do not meet that criteria will be removed.
pytest = "^7.3.0"
[tool.ruff]
select = [
"E", # pycodestyle
"F", # pyflakes
"I", # isort
]
[tool.mypy]
ignore_missing_imports = "True"
disallow_untyped_defs = "True"
exclude = ["notebooks", "examples", "example_data"]
[tool.coverage.run]
omit = [
"tests/*",
]
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.pytest.ini_options]
# --strict-markers will raise errors on unknown marks.
# https://docs.pytest.org/en/7.1.x/how-to/mark.html#raising-errors-on-unknown-marks
#
# https://docs.pytest.org/en/7.1.x/reference/reference.html
# --strict-config any warnings encountered while parsing the `pytest`
# section of the configuration file raise errors.
#
# https://github.com/tophat/syrupy
# --snapshot-warn-unused Prints a warning on unused snapshots rather than fail the test suite.
addopts = "--strict-markers --strict-config --durations=5"
# Registering custom markers.
# https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers
markers = [
"requires: mark tests as requiring a specific library"
]

View File

View File

@ -0,0 +1,2 @@
def test_mock() -> None:
assert True