mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-09 10:41:52 +00:00
Compare commits
18 Commits
v0.0.339rc
...
wfh/tqdm_f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
85eca4c055 | ||
|
|
b27f9da12b | ||
|
|
4bf249f7a3 | ||
|
|
50413a9648 | ||
|
|
b3b17d76f3 | ||
|
|
16af282429 | ||
|
|
78da34153e | ||
|
|
e327bb4ba4 | ||
|
|
d47ee1ae79 | ||
|
|
a21e84faf7 | ||
|
|
ace9e64d62 | ||
|
|
5064890fcf | ||
|
|
143049c90f | ||
|
|
c5ae9f832d | ||
|
|
131db4ba68 | ||
|
|
04bddbaba4 | ||
|
|
aec8715073 | ||
|
|
bb18b0266e |
@@ -648,7 +648,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"OpenAIEmbeddings(client=<class 'openai.api_resources.embedding.Embedding'>, model='text-embedding-ada-002', deployment='text-embedding-ada-002', openai_api_version='', openai_api_base='', openai_api_type='', openai_proxy='', embedding_ctx_length=8191, openai_api_key='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', openai_organization='', allowed_special=set(), disallowed_special='all', chunk_size=1000, max_retries=6, request_timeout=None, headers=None, tiktoken_model_name=None, show_progress_bar=False, model_kwargs={})"
|
||||
"OpenAIEmbeddings(client=<class 'openai.api_resources.embedding.Embedding'>, model='text-embedding-ada-002', deployment='text-embedding-ada-002', openai_api_version='', openai_api_base='', openai_api_type='', openai_proxy='', embedding_ctx_length=8191, openai_api_key='', openai_organization='', allowed_special=set(), disallowed_special='all', chunk_size=1000, max_retries=6, request_timeout=None, headers=None, tiktoken_model_name=None, show_progress_bar=False, model_kwargs={})"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
|
||||
@@ -52,6 +52,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c6fb4903-f845-4907-ae14-df305891b0ff",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data Loading\n",
|
||||
@@ -76,17 +77,18 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"id": "fc0767d4-9155-4591-855c-ef2e14e0e10f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import tempfile\n",
|
||||
"from pathlib import Path\n",
|
||||
"from pprint import pprint\n",
|
||||
"import requests\n",
|
||||
"import tempfile\n",
|
||||
"from time import sleep\n",
|
||||
"from typing import Dict, List\n",
|
||||
"\n",
|
||||
"import requests\n",
|
||||
"from docugami import Docugami\n",
|
||||
"from docugami.types import Document as DocugamiDocument\n",
|
||||
"\n",
|
||||
@@ -166,6 +168,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"id": "ce0b2b21-7623-46e7-ae2c-3a9f67e8b9b9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -207,6 +210,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "01f035e5-c3f8-4d23-9d1b-8d2babdea8e9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you are on the free Docugami tier, your files should be done in ~15 minutes or less depending on the number of pages uploaded and available resources (please contact Docugami for paid plans for faster processing). You can re-run the code above without reprocessing your files to continue waiting if your notebook is not continuously running (it does not re-upload)."
|
||||
@@ -225,6 +229,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"id": "05fcdd57-090f-44bf-a1fb-2c3609c80e34",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -268,6 +273,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bfc1f2c9-e6d4-4d98-a799-6bc30bc61661",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The file processed by Docugami in the example above was [this one](https://data.ntsb.gov/carol-repgen/api/Aviation/ReportMain/GenerateNewestReport/192541/pdf) from the NTSB and you can look at the PDF side by side to compare the XML chunks above. \n",
|
||||
@@ -278,6 +284,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"id": "8a4b49e0-de78-4790-a930-ad7cf324697a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -326,6 +333,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1cfc06bc-67d2-46dd-b04d-95efa3619d0a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Docugami XML Deep Dive: Jane Doe NDA Example\n",
|
||||
@@ -336,6 +344,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 109,
|
||||
"id": "7b697d30-1e94-47f0-87e8-f81d4b180da2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -361,6 +370,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 98,
|
||||
"id": "14714576-6e1d-499b-bcc8-39140bb2fd78",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -415,6 +425,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dc09ba64-4973-4471-9501-54294c1143fc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The Docugami XML contains extremely detailed semantics and visual bounding boxes for all elements. The `dgml-utils` library parses text and non-text elements into formats appropriate to pass into LLMs (chunked text with XML semantic labels)"
|
||||
@@ -423,6 +434,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 100,
|
||||
"id": "2b4ece00-2e43-4254-adc9-66dbb79139a6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -460,6 +472,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 101,
|
||||
"id": "08350119-aa22-4ec1-8f65-b1316a0d4123",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -476,6 +489,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dca87b46-c0c2-4973-94ec-689c18075653",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The XML markup contains structural as well as semantic tags, which provide additional semantics to the LLM for improved retrieval and generation.\n",
|
||||
@@ -486,6 +500,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 112,
|
||||
"id": "bcac8294-c54a-4b6e-af9d-3911a69620b2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -539,8 +554,8 @@
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser"
|
||||
]
|
||||
@@ -610,11 +625,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import uuid\n",
|
||||
"from langchain.vectorstores.chroma import Chroma\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain.schema.document import Document\n",
|
||||
"\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.schema.document import Document\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain.vectorstores.chroma import Chroma\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def build_retriever(text_elements, tables, table_summaries):\n",
|
||||
@@ -710,6 +726,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 120,
|
||||
"id": "636e992f-823b-496b-a082-8b4fcd479de5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -743,6 +760,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "86cad5db-81fe-4ae6-a20e-550b85fcbe96",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RAG on Llama2 paper\n",
|
||||
@@ -753,6 +771,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 121,
|
||||
"id": "0e4a2f43-dd48-4ae3-8e27-7e87d169965f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -777,6 +796,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 124,
|
||||
"id": "56b78fb3-603d-4343-ae72-be54a3c5dd72",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -801,6 +821,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 125,
|
||||
"id": "d3cc5ba9-8553-4eda-a5d1-b799751186af",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -812,6 +833,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 126,
|
||||
"id": "d7c73faf-74cb-400d-8059-b69e2493de38",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -823,6 +845,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 127,
|
||||
"id": "4c553722-be42-42ce-83b8-76a17f323f1c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -832,6 +855,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 128,
|
||||
"id": "65dce40b-f1c3-494a-949e-69a9c9544ddb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -851,6 +875,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "59877edf-9a02-45db-95cb-b7f4234abfa3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can check the [trace](https://smith.langchain.com/public/5de100c3-bb40-4234-bf02-64bc708686a1/r) to see what chunks were retrieved.\n",
|
||||
@@ -939,6 +964,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0879349e-7298-4f2c-b246-f1142e97a8e5",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
|
||||
@@ -69,8 +69,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.llm_bash.prompt import BashOutputParser\n",
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain_experimental.llm_bash.prompt import BashOutputParser\n",
|
||||
"\n",
|
||||
"_PROMPT_TEMPLATE = \"\"\"If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put \"#!/bin/bash\" in your answer. Make sure to reason step by step, using this format:\n",
|
||||
"Question: \"copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'\"\n",
|
||||
|
||||
@@ -26,6 +26,8 @@ lint format: PYTHON_FILES=.
|
||||
lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/experimental --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
|
||||
|
||||
lint lint_diff:
|
||||
./scripts/check_pydantic.sh .
|
||||
./scripts/check_imports.sh
|
||||
poetry run ruff .
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run mypy $(PYTHON_FILES)
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
"""Base callback handler that can be used to handle callbacks in langchain."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional, Sequence, TypeVar, Union
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, TypeVar, Union
|
||||
from uuid import UUID
|
||||
|
||||
from tenacity import RetryCallState
|
||||
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult
|
||||
|
||||
|
||||
class RetrieverManagerMixin:
|
||||
|
||||
@@ -7,6 +7,7 @@ import uuid
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from contextlib import asynccontextmanager, contextmanager
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
AsyncGenerator,
|
||||
Coroutine,
|
||||
@@ -25,7 +26,6 @@ from uuid import UUID
|
||||
from langsmith.run_helpers import get_run_tree_context
|
||||
from tenacity import RetryCallState
|
||||
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.callbacks.base import (
|
||||
BaseCallbackHandler,
|
||||
BaseCallbackManager,
|
||||
@@ -37,11 +37,14 @@ from langchain_core.callbacks.base import (
|
||||
ToolManagerMixin,
|
||||
)
|
||||
from langchain_core.callbacks.stdout import StdOutCallbackHandler
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult
|
||||
from langchain_core.utils.env import env_var_is_set
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
"""Callback Handler that prints to std out."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
||||
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.callbacks.base import BaseCallbackHandler
|
||||
from langchain_core.outputs import LLMResult
|
||||
from langchain_core.utils import print_text
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.outputs import LLMResult
|
||||
|
||||
|
||||
class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Callback Handler that prints to std out."""
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
"""Callback Handler streams to stdout on new llm token."""
|
||||
import sys
|
||||
from typing import Any, Dict, List
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from typing import TYPE_CHECKING, Any, Dict, List
|
||||
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.callbacks.base import BaseCallbackHandler
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langchain_core.outputs import LLMResult
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langchain_core.outputs import LLMResult
|
||||
|
||||
|
||||
class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
@@ -3,9 +3,10 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
from abc import ABC, abstractmethod
|
||||
from functools import partial
|
||||
from typing import Any, Sequence
|
||||
from typing import TYPE_CHECKING, Any, Sequence
|
||||
|
||||
from langchain_core.documents import Document
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.documents import Document
|
||||
|
||||
|
||||
class BaseDocumentTransformer(ABC):
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
"""Example selector that selects examples based on SemanticSimilarity."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type
|
||||
|
||||
from langchain_core.embeddings import Embeddings
|
||||
from langchain_core.example_selectors.base import BaseExampleSelector
|
||||
from langchain_core.pydantic_v1 import BaseModel, Extra
|
||||
from langchain_core.vectorstores import VectorStore
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.embeddings import Embeddings
|
||||
|
||||
|
||||
def sorted_values(values: Dict[str, str]) -> List[Any]:
|
||||
"""Return a list of values in dict sorted by key."""
|
||||
|
||||
@@ -16,13 +16,13 @@ from typing import (
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from langchain_core.messages import AnyMessage, BaseMessage, get_buffer_string
|
||||
from langchain_core.outputs import LLMResult
|
||||
from langchain_core.prompt_values import PromptValue
|
||||
from langchain_core.runnables import RunnableSerializable
|
||||
from langchain_core.utils import get_pydantic_field_names
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.callbacks import Callbacks
|
||||
from langchain_core.outputs import LLMResult
|
||||
|
||||
|
||||
@lru_cache(maxsize=None) # Cache the tokenizer
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import inspect
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from functools import partial
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
AsyncIterator,
|
||||
Dict,
|
||||
@@ -41,7 +44,9 @@ from langchain_core.outputs import (
|
||||
)
|
||||
from langchain_core.prompt_values import ChatPromptValue, PromptValue, StringPromptValue
|
||||
from langchain_core.pydantic_v1 import Field, root_validator
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
|
||||
|
||||
def _get_verbosity() -> bool:
|
||||
|
||||
@@ -37,10 +37,10 @@ from tenacity import (
|
||||
wait_exponential,
|
||||
)
|
||||
|
||||
from langchain_core.callbacks.base import BaseCallbackManager
|
||||
from langchain_core.callbacks.manager import (
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManager,
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
BaseCallbackManager,
|
||||
CallbackManager,
|
||||
CallbackManagerForLLMRun,
|
||||
Callbacks,
|
||||
|
||||
@@ -54,6 +54,8 @@ def get_buffer_string(
|
||||
role = "System"
|
||||
elif isinstance(m, FunctionMessage):
|
||||
role = "Function"
|
||||
elif isinstance(m, ToolMessage):
|
||||
role = "Tool"
|
||||
elif isinstance(m, ChatMessage):
|
||||
role = m.role
|
||||
else:
|
||||
|
||||
@@ -4,6 +4,7 @@ import asyncio
|
||||
import functools
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
Generic,
|
||||
@@ -18,9 +19,11 @@ from typing_extensions import get_args
|
||||
|
||||
from langchain_core.messages import AnyMessage, BaseMessage
|
||||
from langchain_core.outputs import ChatGeneration, Generation
|
||||
from langchain_core.prompt_values import PromptValue
|
||||
from langchain_core.runnables import RunnableConfig, RunnableSerializable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.prompt_values import PromptValue
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
AsyncIterator,
|
||||
Iterator,
|
||||
@@ -16,7 +17,9 @@ from langchain_core.outputs import (
|
||||
Generation,
|
||||
GenerationChunk,
|
||||
)
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
|
||||
|
||||
class BaseTransformOutputParser(BaseOutputParser[T]):
|
||||
|
||||
@@ -41,7 +41,7 @@ from langchain_core.prompts.few_shot import (
|
||||
from langchain_core.prompts.few_shot_with_templates import FewShotPromptWithTemplates
|
||||
from langchain_core.prompts.loading import load_prompt
|
||||
from langchain_core.prompts.pipeline import PipelinePromptTemplate
|
||||
from langchain_core.prompts.prompt import Prompt, PromptTemplate
|
||||
from langchain_core.prompts.prompt import PromptTemplate
|
||||
from langchain_core.prompts.string import (
|
||||
StringPromptTemplate,
|
||||
check_valid_template,
|
||||
@@ -62,7 +62,6 @@ __all__ = [
|
||||
"HumanMessagePromptTemplate",
|
||||
"MessagesPlaceholder",
|
||||
"PipelinePromptTemplate",
|
||||
"Prompt",
|
||||
"PromptTemplate",
|
||||
"StringPromptTemplate",
|
||||
"SystemMessagePromptTemplate",
|
||||
|
||||
@@ -3,16 +3,32 @@ from __future__ import annotations
|
||||
import json
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Mapping, Optional, Type, Union
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
|
||||
import yaml
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.output_parsers.base import BaseOutputParser
|
||||
from langchain_core.prompt_values import PromptValue
|
||||
from langchain_core.prompt_values import (
|
||||
ChatPromptValueConcrete,
|
||||
PromptValue,
|
||||
StringPromptValue,
|
||||
)
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, create_model, root_validator
|
||||
from langchain_core.runnables import RunnableConfig, RunnableSerializable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.documents import Document
|
||||
|
||||
|
||||
class BasePromptTemplate(RunnableSerializable[Dict, PromptValue], ABC):
|
||||
"""Base class for all prompt templates, returning a prompt."""
|
||||
@@ -40,11 +56,6 @@ class BasePromptTemplate(RunnableSerializable[Dict, PromptValue], ABC):
|
||||
|
||||
@property
|
||||
def OutputType(self) -> Any:
|
||||
from langchain_core.prompt_values import (
|
||||
ChatPromptValueConcrete,
|
||||
StringPromptValue,
|
||||
)
|
||||
|
||||
return Union[StringPromptValue, ChatPromptValueConcrete]
|
||||
|
||||
def get_input_schema(
|
||||
|
||||
@@ -8,6 +8,7 @@ from typing import (
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
@@ -136,6 +137,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
||||
cls: Type[MessagePromptTemplateT],
|
||||
template: str,
|
||||
template_format: str = "f-string",
|
||||
partial_variables: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> MessagePromptTemplateT:
|
||||
"""Create a class from a string template.
|
||||
@@ -143,12 +145,21 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
||||
Args:
|
||||
template: a template.
|
||||
template_format: format of the template.
|
||||
partial_variables: A dictionary of variables that can be used to partially
|
||||
fill in the template. For example, if the template is
|
||||
`"{variable1} {variable2}"`, and `partial_variables` is
|
||||
`{"variable1": "foo"}`, then the final prompt will be
|
||||
`"foo {variable2}"`.
|
||||
**kwargs: keyword arguments to pass to the constructor.
|
||||
|
||||
Returns:
|
||||
A new instance of this class.
|
||||
"""
|
||||
prompt = PromptTemplate.from_template(template, template_format=template_format)
|
||||
prompt = PromptTemplate.from_template(
|
||||
template,
|
||||
template_format=template_format,
|
||||
partial_variables=partial_variables,
|
||||
)
|
||||
return cls(prompt=prompt, **kwargs)
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -244,7 +244,3 @@ class PromptTemplate(StringPromptTemplate):
|
||||
partial_variables=_partial_variables,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
# For backwards compatibility.
|
||||
Prompt = PromptTemplate
|
||||
|
||||
@@ -31,17 +31,6 @@ from typing import (
|
||||
|
||||
from typing_extensions import Literal, get_args
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.callbacks.manager import (
|
||||
AsyncCallbackManagerForChainRun,
|
||||
CallbackManagerForChainRun,
|
||||
)
|
||||
from langchain_core.runnables.fallbacks import (
|
||||
RunnableWithFallbacks as RunnableWithFallbacksT,
|
||||
)
|
||||
from langchain_core.tracers.log_stream import RunLog, RunLogPatch
|
||||
from langchain_core.tracers.root_listeners import Listener
|
||||
|
||||
from langchain_core.load.dump import dumpd
|
||||
from langchain_core.load.serializable import Serializable
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, create_model
|
||||
@@ -75,6 +64,18 @@ from langchain_core.runnables.utils import (
|
||||
from langchain_core.utils.aiter import atee, py_anext
|
||||
from langchain_core.utils.iter import safetee
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.callbacks.manager import (
|
||||
AsyncCallbackManagerForChainRun,
|
||||
CallbackManagerForChainRun,
|
||||
)
|
||||
from langchain_core.runnables.fallbacks import (
|
||||
RunnableWithFallbacks as RunnableWithFallbacksT,
|
||||
)
|
||||
from langchain_core.tracers.log_stream import RunLog, RunLogPatch
|
||||
from langchain_core.tracers.root_listeners import Listener
|
||||
|
||||
|
||||
Other = TypeVar("Other")
|
||||
|
||||
|
||||
|
||||
@@ -9,10 +9,10 @@ from functools import partial
|
||||
from inspect import signature
|
||||
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
|
||||
|
||||
from langchain_core.callbacks.base import BaseCallbackManager
|
||||
from langchain_core.callbacks.manager import (
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManager,
|
||||
AsyncCallbackManagerForToolRun,
|
||||
BaseCallbackManager,
|
||||
CallbackManager,
|
||||
CallbackManagerForToolRun,
|
||||
Callbacks,
|
||||
|
||||
@@ -4,13 +4,12 @@ from __future__ import annotations
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional, Sequence, Union, cast
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union, cast
|
||||
from uuid import UUID
|
||||
|
||||
from tenacity import RetryCallState
|
||||
|
||||
from langchain_core.callbacks.base import BaseCallbackHandler
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.exceptions import TracerException
|
||||
from langchain_core.load import dumpd
|
||||
from langchain_core.outputs import (
|
||||
@@ -21,6 +20,9 @@ from langchain_core.outputs import (
|
||||
)
|
||||
from langchain_core.tracers.schemas import Run
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.documents import Document
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ from __future__ import annotations
|
||||
import logging
|
||||
import threading
|
||||
import weakref
|
||||
from concurrent.futures import Future, ThreadPoolExecutor, wait
|
||||
from concurrent.futures import Future, ThreadPoolExecutor, as_completed, wait
|
||||
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast
|
||||
from uuid import UUID
|
||||
|
||||
@@ -22,12 +22,33 @@ logger = logging.getLogger(__name__)
|
||||
_TRACERS: weakref.WeakSet[EvaluatorCallbackHandler] = weakref.WeakSet()
|
||||
|
||||
|
||||
def wait_for_all_evaluators() -> None:
|
||||
"""Wait for all tracers to finish."""
|
||||
def wait_for_all_evaluators(verbose: bool = True) -> None:
|
||||
"""Wait for all tracers to finish using concurrent futures."""
|
||||
global _TRACERS
|
||||
for tracer in list(_TRACERS):
|
||||
if tracer is not None:
|
||||
tracer.wait_for_futures()
|
||||
tracers = list(_TRACERS)
|
||||
futures = []
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
for tracer in tracers:
|
||||
if tracer is not None:
|
||||
# Submit each wait_for_futures call to the executor
|
||||
futures.append(executor.submit(tracer.wait_for_futures))
|
||||
|
||||
if verbose:
|
||||
try:
|
||||
from tqdm.auto import tqdm # noqa: F401
|
||||
|
||||
# Iterate over futures as they complete
|
||||
for future in tqdm(
|
||||
as_completed(futures),
|
||||
total=len(futures),
|
||||
desc="Waiting for evaluators to finish",
|
||||
):
|
||||
future.result() # You can handle results or exceptions here
|
||||
except ImportError:
|
||||
# If tqdm is not installed, just wait without progress bar
|
||||
for future in as_completed(futures):
|
||||
future.result()
|
||||
|
||||
|
||||
class EvaluatorCallbackHandler(BaseTracer):
|
||||
|
||||
@@ -5,7 +5,7 @@ import logging
|
||||
import weakref
|
||||
from concurrent.futures import Future, ThreadPoolExecutor, wait
|
||||
from datetime import datetime
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
|
||||
from uuid import UUID
|
||||
|
||||
from langsmith import Client
|
||||
@@ -19,10 +19,12 @@ from tenacity import (
|
||||
|
||||
from langchain_core.env import get_runtime_environment
|
||||
from langchain_core.load import dumpd
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langchain_core.tracers.base import BaseTracer
|
||||
from langchain_core.tracers.schemas import Run
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.messages import BaseMessage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
_LOGGED = set()
|
||||
_TRACERS: weakref.WeakSet[LangChainTracer] = weakref.WeakSet()
|
||||
|
||||
@@ -21,7 +21,6 @@ from typing import (
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.embeddings import Embeddings
|
||||
from langchain_core.pydantic_v1 import Field, root_validator
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
@@ -31,6 +30,7 @@ if TYPE_CHECKING:
|
||||
AsyncCallbackManagerForRetrieverRun,
|
||||
CallbackManagerForRetrieverRun,
|
||||
)
|
||||
from langchain_core.documents import Document
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
14
libs/core/poetry.lock
generated
14
libs/core/poetry.lock
generated
@@ -793,7 +793,6 @@ optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
|
||||
files = [
|
||||
{file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"},
|
||||
{file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2524,6 +2523,17 @@ files = [
|
||||
[package.dependencies]
|
||||
urllib3 = ">=2"
|
||||
|
||||
[[package]]
|
||||
name = "types-tqdm"
|
||||
version = "4.66.0.4"
|
||||
description = "Typing stubs for tqdm"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "types-tqdm-4.66.0.4.tar.gz", hash = "sha256:a2f0ebd4cfd48f4914395819a176d7947387e1b98f9228fca38f8cac1b59891c"},
|
||||
{file = "types_tqdm-4.66.0.4-py3-none-any.whl", hash = "sha256:8eda4c5123dd66985a4cb44268705cfa18beb32d66772271ae185e92b8b10c40"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
version = "4.8.0"
|
||||
@@ -2686,4 +2696,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "b08d47f726dd194af0f801d300402b174c8db96a4184cc1136cb8e5a0e287190"
|
||||
content-hash = "6c0cc519dbf48494aa7f95416fa777406ae0a3c0b68d1056b38fd2e1446ea718"
|
||||
|
||||
@@ -22,6 +22,7 @@ ruff = "^0.1.5"
|
||||
mypy = "^0.991"
|
||||
types-pyyaml = "^6.0.12.2"
|
||||
types-requests = "^2.28.11.5"
|
||||
types-tqdm = "^4.66.0.4"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
jupyter = "^1.0.0"
|
||||
|
||||
17
libs/core/scripts/check_imports.sh
Executable file
17
libs/core/scripts/check_imports.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
|
||||
# Initialize a variable to keep track of errors
|
||||
errors=0
|
||||
|
||||
# make sure not importing from langchain or langchain_experimental
|
||||
git --no-pager grep '^from langchain\.' . && errors=$((errors+1))
|
||||
git --no-pager grep '^from langchain_experimental\.' . && errors=$((errors+1))
|
||||
|
||||
# Decide on an exit status based on the errors
|
||||
if [ "$errors" -gt 0 ]; then
|
||||
exit 1
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
27
libs/core/scripts/check_pydantic.sh
Executable file
27
libs/core/scripts/check_pydantic.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script searches for lines starting with "import pydantic" or "from pydantic"
|
||||
# in tracked files within a Git repository.
|
||||
#
|
||||
# Usage: ./scripts/check_pydantic.sh /path/to/repository
|
||||
|
||||
# Check if a path argument is provided
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "Usage: $0 /path/to/repository"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
repository_path="$1"
|
||||
|
||||
# Search for lines matching the pattern within the specified repository
|
||||
result=$(git -C "$repository_path" grep -E '^import pydantic|^from pydantic')
|
||||
|
||||
# Check if any matching lines were found
|
||||
if [ -n "$result" ]; then
|
||||
echo "ERROR: The following lines need to be updated:"
|
||||
echo "$result"
|
||||
echo "Please replace the code with an import from langchain_core.pydantic_v1."
|
||||
echo "For example, replace 'from pydantic import BaseModel'"
|
||||
echo "with 'from langchain_core.pydantic_v1 import BaseModel'"
|
||||
exit 1
|
||||
fi
|
||||
19
libs/core/tests/unit_tests/language_models/llms/test_base.py
Normal file
19
libs/core/tests/unit_tests/language_models/llms/test_base.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from tests.unit_tests.fake.llm import FakeListLLM
|
||||
|
||||
|
||||
def test_batch() -> None:
|
||||
llm = FakeListLLM(responses=["foo"] * 3)
|
||||
output = llm.batch(["foo", "bar", "foo"])
|
||||
assert output == ["foo"] * 3
|
||||
|
||||
output = llm.batch(["foo", "bar", "foo"], config={"max_concurrency": 2})
|
||||
assert output == ["foo"] * 3
|
||||
|
||||
|
||||
async def test_abatch() -> None:
|
||||
llm = FakeListLLM(responses=["foo"] * 3)
|
||||
output = await llm.abatch(["foo", "bar", "foo"])
|
||||
assert output == ["foo"] * 3
|
||||
|
||||
output = await llm.abatch(["foo", "bar", "foo"], config={"max_concurrency": 2})
|
||||
assert output == ["foo"] * 3
|
||||
@@ -13,7 +13,6 @@ EXPECTED_ALL = [
|
||||
"HumanMessagePromptTemplate",
|
||||
"MessagesPlaceholder",
|
||||
"PipelinePromptTemplate",
|
||||
"Prompt",
|
||||
"PromptTemplate",
|
||||
"StringPromptTemplate",
|
||||
"SystemMessagePromptTemplate",
|
||||
|
||||
@@ -1,10 +1,21 @@
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
AIMessageChunk,
|
||||
ChatMessage,
|
||||
ChatMessageChunk,
|
||||
FunctionMessage,
|
||||
FunctionMessageChunk,
|
||||
HumanMessage,
|
||||
HumanMessageChunk,
|
||||
SystemMessage,
|
||||
ToolMessage,
|
||||
get_buffer_string,
|
||||
messages_from_dict,
|
||||
messages_to_dict,
|
||||
)
|
||||
|
||||
|
||||
@@ -100,3 +111,76 @@ def test_ani_message_chunks() -> None:
|
||||
AIMessageChunk(example=True, content="I am") + AIMessageChunk(
|
||||
example=False, content=" indeed."
|
||||
)
|
||||
|
||||
|
||||
class TestGetBufferString(unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
self.human_msg = HumanMessage(content="human")
|
||||
self.ai_msg = AIMessage(content="ai")
|
||||
self.sys_msg = SystemMessage(content="system")
|
||||
self.func_msg = FunctionMessage(name="func", content="function")
|
||||
self.tool_msg = ToolMessage(tool_call_id="tool_id", content="tool")
|
||||
self.chat_msg = ChatMessage(role="Chat", content="chat")
|
||||
|
||||
def test_empty_input(self) -> None:
|
||||
self.assertEqual(get_buffer_string([]), "")
|
||||
|
||||
def test_valid_single_message(self) -> None:
|
||||
expected_output = f"Human: {self.human_msg.content}"
|
||||
self.assertEqual(
|
||||
get_buffer_string([self.human_msg]),
|
||||
expected_output,
|
||||
)
|
||||
|
||||
def test_custom_human_prefix(self) -> None:
|
||||
prefix = "H"
|
||||
expected_output = f"{prefix}: {self.human_msg.content}"
|
||||
self.assertEqual(
|
||||
get_buffer_string([self.human_msg], human_prefix="H"),
|
||||
expected_output,
|
||||
)
|
||||
|
||||
def test_custom_ai_prefix(self) -> None:
|
||||
prefix = "A"
|
||||
expected_output = f"{prefix}: {self.ai_msg.content}"
|
||||
self.assertEqual(
|
||||
get_buffer_string([self.ai_msg], ai_prefix="A"),
|
||||
expected_output,
|
||||
)
|
||||
|
||||
def test_multiple_msg(self) -> None:
|
||||
msgs = [
|
||||
self.human_msg,
|
||||
self.ai_msg,
|
||||
self.sys_msg,
|
||||
self.func_msg,
|
||||
self.tool_msg,
|
||||
self.chat_msg,
|
||||
]
|
||||
expected_output = "\n".join(
|
||||
[
|
||||
"Human: human",
|
||||
"AI: ai",
|
||||
"System: system",
|
||||
"Function: function",
|
||||
"Tool: tool",
|
||||
"Chat: chat",
|
||||
]
|
||||
)
|
||||
self.assertEqual(
|
||||
get_buffer_string(msgs),
|
||||
expected_output,
|
||||
)
|
||||
|
||||
|
||||
def test_multiple_msg() -> None:
|
||||
human_msg = HumanMessage(content="human", additional_kwargs={"key": "value"})
|
||||
ai_msg = AIMessage(content="ai")
|
||||
sys_msg = SystemMessage(content="sys")
|
||||
|
||||
msgs = [
|
||||
human_msg,
|
||||
ai_msg,
|
||||
sys_msg,
|
||||
]
|
||||
assert messages_from_dict(messages_to_dict(msgs)) == msgs
|
||||
|
||||
@@ -3,6 +3,7 @@ from langchain_core.language_models.chat_models import (
|
||||
SimpleChatModel,
|
||||
_agenerate_from_stream,
|
||||
_generate_from_stream,
|
||||
_get_verbosity,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
@@ -10,4 +11,5 @@ __all__ = [
|
||||
"SimpleChatModel",
|
||||
"_generate_from_stream",
|
||||
"_agenerate_from_stream",
|
||||
"_get_verbosity",
|
||||
]
|
||||
|
||||
@@ -28,7 +28,7 @@ class ChatPromptAdapter:
|
||||
) -> str:
|
||||
if provider == "anthropic":
|
||||
prompt = convert_messages_to_prompt_anthropic(messages=messages)
|
||||
if provider == "meta":
|
||||
elif provider == "meta":
|
||||
prompt = convert_messages_to_prompt_llama(messages=messages)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
|
||||
@@ -115,10 +115,16 @@ class VoyageEmbeddings(BaseModel, Embeddings):
|
||||
return params
|
||||
|
||||
def _get_embeddings(
|
||||
self, texts: List[str], batch_size: int, input_type: Optional[str] = None
|
||||
self,
|
||||
texts: List[str],
|
||||
batch_size: Optional[int] = None,
|
||||
input_type: Optional[str] = None,
|
||||
) -> List[List[float]]:
|
||||
embeddings: List[List[float]] = []
|
||||
|
||||
if batch_size is None:
|
||||
batch_size = self.batch_size
|
||||
|
||||
if self.show_progress_bar:
|
||||
try:
|
||||
from tqdm.auto import tqdm
|
||||
@@ -168,9 +174,24 @@ class VoyageEmbeddings(BaseModel, Embeddings):
|
||||
Args:
|
||||
text: The text to embed.
|
||||
|
||||
Returns:
|
||||
Embedding for the text.
|
||||
"""
|
||||
return self._get_embeddings([text], input_type="query")[0]
|
||||
|
||||
def embed_general_texts(
|
||||
self, texts: List[str], *, input_type: Optional[str] = None
|
||||
) -> List[List[float]]:
|
||||
"""Call out to Voyage Embedding endpoint for embedding general text.
|
||||
|
||||
Args:
|
||||
texts: The list of texts to embed.
|
||||
input_type: Type of the input text. Default to None, meaning the type is
|
||||
unspecified. Other options: query, document.
|
||||
|
||||
Returns:
|
||||
Embedding for the text.
|
||||
"""
|
||||
return self._get_embeddings(
|
||||
[text], batch_size=self.batch_size, input_type="query"
|
||||
)[0]
|
||||
texts, batch_size=self.batch_size, input_type=input_type
|
||||
)
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
# Backwards compatibility.
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.language_models.llms import (
|
||||
LLM,
|
||||
BaseLLM,
|
||||
_get_verbosity,
|
||||
create_base_retry_decorator,
|
||||
get_prompts,
|
||||
update_cache,
|
||||
@@ -10,6 +13,8 @@ __all__ = [
|
||||
"create_base_retry_decorator",
|
||||
"get_prompts",
|
||||
"update_cache",
|
||||
"BaseLanguageModel",
|
||||
"_get_verbosity",
|
||||
"BaseLLM",
|
||||
"LLM",
|
||||
]
|
||||
|
||||
@@ -44,7 +44,6 @@ from langchain_core.prompts import (
|
||||
HumanMessagePromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
PipelinePromptTemplate,
|
||||
Prompt,
|
||||
PromptTemplate,
|
||||
StringPromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
@@ -52,6 +51,7 @@ from langchain_core.prompts import (
|
||||
)
|
||||
|
||||
from langchain.prompts.example_selector import NGramOverlapExampleSelector
|
||||
from langchain.prompts.prompt import Prompt
|
||||
|
||||
__all__ = [
|
||||
"AIMessagePromptTemplate",
|
||||
@@ -67,11 +67,11 @@ __all__ = [
|
||||
"MessagesPlaceholder",
|
||||
"NGramOverlapExampleSelector",
|
||||
"PipelinePromptTemplate",
|
||||
"Prompt",
|
||||
"PromptTemplate",
|
||||
"SemanticSimilarityExampleSelector",
|
||||
"StringPromptTemplate",
|
||||
"SystemMessagePromptTemplate",
|
||||
"load_prompt",
|
||||
"FewShotChatMessagePromptTemplate",
|
||||
"Prompt",
|
||||
]
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from langchain_core.prompt_values import StringPromptValue
|
||||
from langchain_core.prompts import (
|
||||
BasePromptTemplate,
|
||||
StringPromptTemplate,
|
||||
@@ -6,6 +7,7 @@ from langchain_core.prompts import (
|
||||
jinja2_formatter,
|
||||
validate_jinja2,
|
||||
)
|
||||
from langchain_core.prompts.string import _get_jinja2_variables_from_template
|
||||
|
||||
__all__ = [
|
||||
"jinja2_formatter",
|
||||
@@ -14,4 +16,6 @@ __all__ = [
|
||||
"get_template_variables",
|
||||
"StringPromptTemplate",
|
||||
"BasePromptTemplate",
|
||||
"StringPromptValue",
|
||||
"_get_jinja2_variables_from_template",
|
||||
]
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from langchain_core.prompt_values import ChatPromptValue, ChatPromptValueConcrete
|
||||
from langchain_core.prompts.chat import (
|
||||
AIMessagePromptTemplate,
|
||||
BaseChatPromptTemplate,
|
||||
@@ -8,6 +9,8 @@ from langchain_core.prompts.chat import (
|
||||
HumanMessagePromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
SystemMessagePromptTemplate,
|
||||
_convert_to_message,
|
||||
_create_template_from_message_type,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
@@ -20,4 +23,8 @@ __all__ = [
|
||||
"SystemMessagePromptTemplate",
|
||||
"BaseChatPromptTemplate",
|
||||
"ChatPromptTemplate",
|
||||
"ChatPromptValue",
|
||||
"ChatPromptValueConcrete",
|
||||
"_convert_to_message",
|
||||
"_create_template_from_message_type",
|
||||
]
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
from langchain_core.prompts.few_shot import (
|
||||
FewShotChatMessagePromptTemplate,
|
||||
FewShotPromptTemplate,
|
||||
_FewShotPromptTemplateMixin,
|
||||
)
|
||||
|
||||
__all__ = ["FewShotPromptTemplate", "FewShotChatMessagePromptTemplate"]
|
||||
__all__ = [
|
||||
"FewShotPromptTemplate",
|
||||
"FewShotChatMessagePromptTemplate",
|
||||
"_FewShotPromptTemplateMixin",
|
||||
]
|
||||
|
||||
@@ -1,4 +1,23 @@
|
||||
from langchain_core.prompts.loading import load_prompt, load_prompt_from_config
|
||||
from langchain_core.prompts.loading import (
|
||||
_load_examples,
|
||||
_load_few_shot_prompt,
|
||||
_load_output_parser,
|
||||
_load_prompt,
|
||||
_load_prompt_from_file,
|
||||
_load_template,
|
||||
load_prompt,
|
||||
load_prompt_from_config,
|
||||
)
|
||||
from langchain_core.utils.loading import try_load_from_hub
|
||||
|
||||
__all__ = ["load_prompt_from_config", "load_prompt", "try_load_from_hub"]
|
||||
__all__ = [
|
||||
"load_prompt_from_config",
|
||||
"load_prompt",
|
||||
"try_load_from_hub",
|
||||
"_load_examples",
|
||||
"_load_few_shot_prompt",
|
||||
"_load_output_parser",
|
||||
"_load_prompt",
|
||||
"_load_prompt_from_file",
|
||||
"_load_template",
|
||||
]
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from langchain_core.prompts.pipeline import PipelinePromptTemplate
|
||||
from langchain_core.prompts.pipeline import PipelinePromptTemplate, _get_inputs
|
||||
|
||||
__all__ = ["PipelinePromptTemplate"]
|
||||
__all__ = ["PipelinePromptTemplate", "_get_inputs"]
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
from langchain_core.prompts.prompt import PromptTemplate
|
||||
|
||||
__all__ = ["PromptTemplate"]
|
||||
# For backwards compatibility.
|
||||
Prompt = PromptTemplate
|
||||
|
||||
__all__ = ["PromptTemplate", "Prompt"]
|
||||
|
||||
@@ -996,7 +996,7 @@ def _collect_test_results(
|
||||
configs: List[RunnableConfig],
|
||||
project_name: str,
|
||||
) -> TestResult:
|
||||
wait_for_all_evaluators()
|
||||
wait_for_all_evaluators(verbose=True)
|
||||
all_eval_results = {}
|
||||
all_execution_time = {}
|
||||
for c in configs:
|
||||
|
||||
@@ -4,6 +4,9 @@ from langchain_core.tools import (
|
||||
StructuredTool,
|
||||
Tool,
|
||||
ToolException,
|
||||
_create_subset_model,
|
||||
_get_filtered_args,
|
||||
_SchemaConfig,
|
||||
create_schema_from_function,
|
||||
tool,
|
||||
)
|
||||
@@ -16,4 +19,7 @@ __all__ = [
|
||||
"Tool",
|
||||
"StructuredTool",
|
||||
"tool",
|
||||
"_SchemaConfig",
|
||||
"_create_subset_model",
|
||||
"_get_filtered_args",
|
||||
]
|
||||
|
||||
@@ -1381,7 +1381,7 @@ def _prepare_metadata(metadata: Dict[str, Any]) -> Dict[str, Any]:
|
||||
|
||||
clean_meta: Dict[str, Union[str, float, int]] = {}
|
||||
for key, value in metadata.items():
|
||||
if not value:
|
||||
if value is None:
|
||||
clean_meta[key] = ""
|
||||
continue
|
||||
|
||||
|
||||
39
libs/langchain/poetry.lock
generated
39
libs/langchain/poetry.lock
generated
@@ -3778,7 +3778,6 @@ optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
|
||||
files = [
|
||||
{file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"},
|
||||
{file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4603,16 +4602,6 @@ files = [
|
||||
{file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
|
||||
{file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
|
||||
{file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"},
|
||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
|
||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
|
||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
|
||||
@@ -6515,8 +6504,6 @@ files = [
|
||||
{file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"},
|
||||
{file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"},
|
||||
{file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"},
|
||||
{file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"},
|
||||
{file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"},
|
||||
{file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"},
|
||||
{file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"},
|
||||
{file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"},
|
||||
@@ -6559,7 +6546,6 @@ files = [
|
||||
{file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"},
|
||||
{file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"},
|
||||
@@ -6568,8 +6554,6 @@ files = [
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"},
|
||||
{file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"},
|
||||
{file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"},
|
||||
{file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"},
|
||||
@@ -7716,7 +7700,6 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
|
||||
@@ -7724,15 +7707,8 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
|
||||
@@ -7749,7 +7725,6 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
|
||||
@@ -7757,7 +7732,6 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
|
||||
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
|
||||
@@ -10214,6 +10188,17 @@ files = [
|
||||
{file = "types_toml-0.10.8.7-py3-none-any.whl", hash = "sha256:61951da6ad410794c97bec035d59376ce1cbf4453dc9b6f90477e81e4442d631"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-tqdm"
|
||||
version = "4.66.0.4"
|
||||
description = "Typing stubs for tqdm"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "types-tqdm-4.66.0.4.tar.gz", hash = "sha256:a2f0ebd4cfd48f4914395819a176d7947387e1b98f9228fca38f8cac1b59891c"},
|
||||
{file = "types_tqdm-4.66.0.4-py3-none-any.whl", hash = "sha256:8eda4c5123dd66985a4cb44268705cfa18beb32d66772271ae185e92b8b10c40"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-urllib3"
|
||||
version = "1.26.25.14"
|
||||
@@ -11085,4 +11070,4 @@ text-helpers = ["chardet"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "54acd6074885fa2e4069cdba7dfe5b497ca4820b506983a5792fb1e78b53f4a3"
|
||||
content-hash = "84ead96758fe7104796f18249521e2ff0a3408f916bd15f7df493c027a28da0c"
|
||||
|
||||
@@ -201,6 +201,7 @@ types-toml = "^0.10.8.1"
|
||||
types-redis = "^4.3.21.6"
|
||||
types-pytz = "^2023.3.0.0"
|
||||
types-chardet = "^5.0.4.6"
|
||||
types-tqdm = "^4.66.0.4"
|
||||
mypy-protobuf = "^3.0.0"
|
||||
|
||||
[tool.poetry.group.typing.dependencies]
|
||||
|
||||
@@ -24,6 +24,9 @@ git grep '^from langchain\.' langchain/embeddings | grep -vE 'from langchain.(py
|
||||
git grep '^from langchain\.' langchain/docstore | grep -vE 'from langchain.(pydantic_v1|utils|schema|docstore)' && errors=$((errors+1))
|
||||
git grep '^from langchain\.' langchain/vectorstores | grep -vE 'from langchain.(pydantic_v1|utils|schema|load|callbacks|env|_api|storage|llms|docstore|vectorstores|utilities)' && errors=$((errors+1))
|
||||
|
||||
# make sure not importing from langchain_experimental
|
||||
git --no-pager grep '^from langchain_experimental\.' . && errors=$((errors+1))
|
||||
|
||||
# Decide on an exit status based on the errors
|
||||
if [ "$errors" -gt 0 ]; then
|
||||
exit 1
|
||||
|
||||
13
libs/langchain/tests/unit_tests/chat_models/test_base.py
Normal file
13
libs/langchain/tests/unit_tests/chat_models/test_base.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from langchain.chat_models.base import __all__
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"BaseChatModel",
|
||||
"SimpleChatModel",
|
||||
"_agenerate_from_stream",
|
||||
"_generate_from_stream",
|
||||
"_get_verbosity",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
@@ -1,8 +1,10 @@
|
||||
"""Test Anthropic Chat API wrapper."""
|
||||
from typing import List
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.chat_models import BedrockChat
|
||||
from langchain.chat_models.meta import convert_messages_to_prompt_llama
|
||||
from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage
|
||||
|
||||
@@ -28,3 +30,19 @@ from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage
|
||||
def test_formatting(messages: List[BaseMessage], expected: str) -> None:
|
||||
result = convert_messages_to_prompt_llama(messages)
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_anthropic_bedrock() -> None:
|
||||
client = MagicMock()
|
||||
respbody = MagicMock(
|
||||
read=MagicMock(
|
||||
return_value=MagicMock(
|
||||
decode=MagicMock(return_value=b'{"completion":"Hi back"}')
|
||||
)
|
||||
)
|
||||
)
|
||||
client.invoke_model.return_value = {"body": respbody}
|
||||
model = BedrockChat(model_id="anthropic.claude-v2", client=client)
|
||||
|
||||
# should not throw an error
|
||||
model.invoke("hello there")
|
||||
|
||||
@@ -10,8 +10,23 @@ from langchain_core.outputs import Generation, LLMResult
|
||||
|
||||
from langchain.cache import InMemoryCache, SQLAlchemyCache
|
||||
from langchain.globals import get_llm_cache, set_llm_cache
|
||||
from langchain.llms.base import __all__
|
||||
from tests.unit_tests.llms.fake_llm import FakeLLM
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"BaseLLM",
|
||||
"LLM",
|
||||
"_get_verbosity",
|
||||
"create_base_retry_decorator",
|
||||
"get_prompts",
|
||||
"update_cache",
|
||||
"BaseLanguageModel",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
|
||||
|
||||
def test_caching() -> None:
|
||||
"""Test caching behavior."""
|
||||
@@ -74,21 +89,3 @@ def test_custom_caching() -> None:
|
||||
llm_output=None,
|
||||
)
|
||||
assert output == expected_output
|
||||
|
||||
|
||||
def test_batch() -> None:
|
||||
llm = FakeLLM()
|
||||
output = llm.batch(["foo", "bar", "foo"])
|
||||
assert output == ["foo"] * 3
|
||||
|
||||
output = llm.batch(["foo", "bar", "foo"], config={"max_concurrency": 2})
|
||||
assert output == ["foo"] * 3
|
||||
|
||||
|
||||
async def test_abatch() -> None:
|
||||
llm = FakeLLM()
|
||||
output = await llm.abatch(["foo", "bar", "foo"])
|
||||
assert output == ["foo"] * 3
|
||||
|
||||
output = await llm.abatch(["foo", "bar", "foo"], config={"max_concurrency": 2})
|
||||
assert output == ["foo"] * 3
|
||||
|
||||
1
libs/langchain/tests/unit_tests/prompts/__init__.py
Normal file
1
libs/langchain/tests/unit_tests/prompts/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Test prompt functionality."""
|
||||
16
libs/langchain/tests/unit_tests/prompts/test_base.py
Normal file
16
libs/langchain/tests/unit_tests/prompts/test_base.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from langchain.prompts.base import __all__
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"BasePromptTemplate",
|
||||
"StringPromptTemplate",
|
||||
"StringPromptValue",
|
||||
"_get_jinja2_variables_from_template",
|
||||
"check_valid_template",
|
||||
"get_template_variables",
|
||||
"jinja2_formatter",
|
||||
"validate_jinja2",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
21
libs/langchain/tests/unit_tests/prompts/test_chat.py
Normal file
21
libs/langchain/tests/unit_tests/prompts/test_chat.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from langchain.prompts.chat import __all__
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"AIMessagePromptTemplate",
|
||||
"BaseChatPromptTemplate",
|
||||
"BaseMessagePromptTemplate",
|
||||
"BaseStringMessagePromptTemplate",
|
||||
"ChatMessagePromptTemplate",
|
||||
"ChatPromptTemplate",
|
||||
"ChatPromptValue",
|
||||
"ChatPromptValueConcrete",
|
||||
"HumanMessagePromptTemplate",
|
||||
"MessagesPlaceholder",
|
||||
"SystemMessagePromptTemplate",
|
||||
"_convert_to_message",
|
||||
"_create_template_from_message_type",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
11
libs/langchain/tests/unit_tests/prompts/test_few_shot.py
Normal file
11
libs/langchain/tests/unit_tests/prompts/test_few_shot.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from langchain.prompts.few_shot import __all__
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"FewShotChatMessagePromptTemplate",
|
||||
"FewShotPromptTemplate",
|
||||
"_FewShotPromptTemplateMixin",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
@@ -0,0 +1,7 @@
|
||||
from langchain.prompts.few_shot_with_templates import __all__
|
||||
|
||||
EXPECTED_ALL = ["FewShotPromptWithTemplates"]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
28
libs/langchain/tests/unit_tests/prompts/test_imports.py
Normal file
28
libs/langchain/tests/unit_tests/prompts/test_imports.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from langchain.prompts import __all__
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"AIMessagePromptTemplate",
|
||||
"BaseChatPromptTemplate",
|
||||
"BasePromptTemplate",
|
||||
"ChatMessagePromptTemplate",
|
||||
"ChatPromptTemplate",
|
||||
"FewShotPromptTemplate",
|
||||
"FewShotPromptWithTemplates",
|
||||
"HumanMessagePromptTemplate",
|
||||
"LengthBasedExampleSelector",
|
||||
"MaxMarginalRelevanceExampleSelector",
|
||||
"MessagesPlaceholder",
|
||||
"NGramOverlapExampleSelector",
|
||||
"PipelinePromptTemplate",
|
||||
"Prompt",
|
||||
"PromptTemplate",
|
||||
"SemanticSimilarityExampleSelector",
|
||||
"StringPromptTemplate",
|
||||
"SystemMessagePromptTemplate",
|
||||
"load_prompt",
|
||||
"FewShotChatMessagePromptTemplate",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
17
libs/langchain/tests/unit_tests/prompts/test_loading.py
Normal file
17
libs/langchain/tests/unit_tests/prompts/test_loading.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from langchain.prompts.loading import __all__
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"_load_examples",
|
||||
"_load_few_shot_prompt",
|
||||
"_load_output_parser",
|
||||
"_load_prompt",
|
||||
"_load_prompt_from_file",
|
||||
"_load_template",
|
||||
"load_prompt",
|
||||
"load_prompt_from_config",
|
||||
"try_load_from_hub",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
7
libs/langchain/tests/unit_tests/prompts/test_pipeline.py
Normal file
7
libs/langchain/tests/unit_tests/prompts/test_pipeline.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from langchain.prompts.pipeline import __all__
|
||||
|
||||
EXPECTED_ALL = ["PipelinePromptTemplate", "_get_inputs"]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
7
libs/langchain/tests/unit_tests/prompts/test_prompt.py
Normal file
7
libs/langchain/tests/unit_tests/prompts/test_prompt.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from langchain.prompts.prompt import __all__
|
||||
|
||||
EXPECTED_ALL = ["Prompt", "PromptTemplate"]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Test formatting functionality."""
|
||||
import unittest
|
||||
from typing import Union
|
||||
|
||||
import pytest
|
||||
@@ -16,75 +15,12 @@ from langchain_core.messages import (
|
||||
HumanMessageChunk,
|
||||
SystemMessage,
|
||||
SystemMessageChunk,
|
||||
get_buffer_string,
|
||||
messages_from_dict,
|
||||
messages_to_dict,
|
||||
)
|
||||
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, Generation
|
||||
from langchain_core.prompt_values import ChatPromptValueConcrete, StringPromptValue
|
||||
from langchain_core.pydantic_v1 import BaseModel, ValidationError
|
||||
|
||||
|
||||
class TestGetBufferString(unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
self.human_msg = HumanMessage(content="human")
|
||||
self.ai_msg = AIMessage(content="ai")
|
||||
self.sys_msg = SystemMessage(content="sys")
|
||||
|
||||
def test_empty_input(self) -> None:
|
||||
self.assertEqual(get_buffer_string([]), "")
|
||||
|
||||
def test_valid_single_message(self) -> None:
|
||||
expected_output = f"Human: {self.human_msg.content}"
|
||||
self.assertEqual(
|
||||
get_buffer_string([self.human_msg]),
|
||||
expected_output,
|
||||
)
|
||||
|
||||
def test_custom_human_prefix(self) -> None:
|
||||
prefix = "H"
|
||||
expected_output = f"{prefix}: {self.human_msg.content}"
|
||||
self.assertEqual(
|
||||
get_buffer_string([self.human_msg], human_prefix="H"),
|
||||
expected_output,
|
||||
)
|
||||
|
||||
def test_custom_ai_prefix(self) -> None:
|
||||
prefix = "A"
|
||||
expected_output = f"{prefix}: {self.ai_msg.content}"
|
||||
self.assertEqual(
|
||||
get_buffer_string([self.ai_msg], ai_prefix="A"),
|
||||
expected_output,
|
||||
)
|
||||
|
||||
def test_multiple_msg(self) -> None:
|
||||
msgs = [self.human_msg, self.ai_msg, self.sys_msg]
|
||||
expected_output = "\n".join(
|
||||
[
|
||||
f"Human: {self.human_msg.content}",
|
||||
f"AI: {self.ai_msg.content}",
|
||||
f"System: {self.sys_msg.content}",
|
||||
]
|
||||
)
|
||||
self.assertEqual(
|
||||
get_buffer_string(msgs),
|
||||
expected_output,
|
||||
)
|
||||
|
||||
|
||||
def test_multiple_msg() -> None:
|
||||
human_msg = HumanMessage(content="human", additional_kwargs={"key": "value"})
|
||||
ai_msg = AIMessage(content="ai")
|
||||
sys_msg = SystemMessage(content="sys")
|
||||
|
||||
msgs = [
|
||||
human_msg,
|
||||
ai_msg,
|
||||
sys_msg,
|
||||
]
|
||||
assert messages_from_dict(messages_to_dict(msgs)) == msgs
|
||||
|
||||
|
||||
def test_serialization_of_wellknown_objects() -> None:
|
||||
"""Test that pydantic is able to serialize and deserialize well known objects."""
|
||||
|
||||
|
||||
18
libs/langchain/tests/unit_tests/tools/test_base.py
Normal file
18
libs/langchain/tests/unit_tests/tools/test_base.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from langchain.tools.base import __all__
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"BaseTool",
|
||||
"SchemaAnnotationError",
|
||||
"StructuredTool",
|
||||
"Tool",
|
||||
"ToolException",
|
||||
"_SchemaConfig",
|
||||
"_create_subset_model",
|
||||
"_get_filtered_args",
|
||||
"create_schema_from_function",
|
||||
"tool",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "anthropic-iterative-search"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "A virtual research assistant with the ability to search Wikipedia and answer questions"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -18,6 +18,12 @@ langchain-cli = {extras = ["serve"], version = "^0.0.13"}
|
||||
export_module = "anthropic_iterative_search"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "research"
|
||||
author = "LangChain"
|
||||
integrations = ["Anthropic", "Wikipedia"]
|
||||
tags = ["research", "agents"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# basic-critique-revise
|
||||
|
||||
TODO: What does this package do
|
||||
Iteratively generate schema candidates and revise them based on errors.
|
||||
|
||||
## Environment Setup
|
||||
|
||||
TODO: What environment variables need to be set (if any)
|
||||
This template uses OpenAI function calling, so you will need to set the `OPENAI_API_KEY` environment variable in order to use this template.
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "basic_critique_revise"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Iteratively generate schema candidates and revise based on errors"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -19,6 +19,12 @@ sse-starlette = "^1.6.5"
|
||||
export_module = "basic_critique_revise"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "research"
|
||||
author = "LangChain"
|
||||
integrations = ["OpenAI", "Function Calling", "Pydantic"]
|
||||
tags = ["research", "function-calling"]
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "cassandra-entomology-rag"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "RAG using Apache Cassandra® or Astra DB"
|
||||
authors = [
|
||||
"Stefano Lottini <stefano.lottini@datastax.com>",
|
||||
]
|
||||
@@ -21,6 +21,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "cassandra_entomology_rag"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "DataStax"
|
||||
integrations = ["OpenAI", "Cassandra"]
|
||||
tags = ["vectordbs"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "cassandra-synonym-caching"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "LLM caching backed by Apache Cassandra® or Astra DB"
|
||||
authors = [
|
||||
"Stefano Lottini <stefano.lottini@datastax.com>",
|
||||
]
|
||||
@@ -21,6 +21,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "cassandra_synonym_caching"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "DataStax"
|
||||
integrations = ["OpenAI", "Cassandra"]
|
||||
tags = ["vectordbs"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "chain-of-note-wiki"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Implementation of Chain of Note prompting for Wikipedia."
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -21,6 +21,12 @@ sse-starlette = "^1.6.5"
|
||||
export_module = "chain_of_note_wiki"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "LangChain"
|
||||
integrations = ["Anthropic", "Wikipedia", "LangChain Hub"]
|
||||
tags = ["paper", "prompt-hub"]
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "chat-bot-feedback"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Evaluate your chatbot without human feedback"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -21,6 +21,12 @@ sse-starlette = "^1.6.5"
|
||||
export_module = "chat_bot_feedback.chain"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "evaluation"
|
||||
author = "LangChain"
|
||||
integrations = ["OpenAI", "LangSmith"]
|
||||
tags = ["langsmith"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "csv-agent"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Analyze csv data with Pandas and OpenAI"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -24,6 +24,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "csv_agent"
|
||||
export_attr = "agent_executor"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "question-answering"
|
||||
author = "LangChain"
|
||||
integrations = ["OpenAI", "Pandas"]
|
||||
tags = ["data", "agents"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "elastic-query-generator"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Interact with Elasticsearch analytics databases using natural language"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -18,6 +18,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "elastic_query_generator"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "query"
|
||||
author = "LangChain"
|
||||
integrations = ["Elasticsearch", "OpenAI"]
|
||||
tags = ["query-generation"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "extraction-anthropic-functions"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "Use Anthropic function calling for tasks like extraction or tagging"
|
||||
authors = [
|
||||
"Lance Martin <lance@langchain.dev>",
|
||||
]
|
||||
@@ -21,6 +21,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "extraction_anthropic_functions"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "extraction"
|
||||
author = "LangChain"
|
||||
integrations = ["Anthropic", "Function Calling"]
|
||||
tags = ["function-calling", "tagging", "extraction"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "extraction-openai-functions"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "Use OpenAI function calling for tasks like extraction or tagging"
|
||||
authors = [
|
||||
"Lance Martin <lance@langchain.dev>",
|
||||
]
|
||||
@@ -19,6 +19,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "extraction_openai_functions"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "extraction"
|
||||
author = "LangChain"
|
||||
integrations = ["OpenAI", "Function Calling"]
|
||||
tags = ["function-calling", "tagging", "extraction"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "guardrails-output-parser"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Use guardrails-ai to validate LLM output"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -21,6 +21,12 @@ sse-starlette = "^1.6.5"
|
||||
export_module = "guardrails_output_parser.chain"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "validation"
|
||||
author = "LangChain"
|
||||
integrations = ["Guardrails", "OpenAI"]
|
||||
tags = ["moderation"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "hybrid-search-weaviate"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "Improve accuracy and relevance with Weaviate hybrid search"
|
||||
authors = ["Erika Cardenas <erika@weaviate.io>"]
|
||||
readme = "README.md"
|
||||
|
||||
@@ -24,6 +24,12 @@ version = "^1.0.0"
|
||||
export_module = "hybrid_search_weaviate"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "search"
|
||||
author = "Weaviate"
|
||||
integrations = ["Weaviate", "OpenAI"]
|
||||
tags = ["hybrid-search", "vectordb"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "hyde"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Retrieval with Hypothetical Document Embeddings (HyDE)"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -22,6 +22,12 @@ sse-starlette = "^1.6.5"
|
||||
export_module = "hyde.chain"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "LangChain"
|
||||
integrations = ["OpenAI", "ChromaDB"]
|
||||
tags = ["paper"]
|
||||
|
||||
[tool.poe.tasks.start]
|
||||
cmd = "uvicorn langchain_cli.dev_scripts:create_demo_server --reload --port $port --host $host"
|
||||
args = [
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "llama2-functions"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "Extraction with a JSON-output LLaMA2 model"
|
||||
authors = [
|
||||
"Lance Martin <lance@langchain.dev>",
|
||||
]
|
||||
@@ -19,6 +19,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "llama2_functions"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "extraction"
|
||||
author = "LangChain"
|
||||
integrations = ["Llama.cpp", "Replicate"]
|
||||
tags = ["local-llm", "function-calling"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "mongo-parent-document-retrieval"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "RAG using MongoDB and OpenAI"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -22,6 +22,12 @@ sse-starlette = "^1.6.5"
|
||||
export_module = "mongo_parent_document_retrieval"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "LangChain"
|
||||
integrations = ["MongoDB", "OpenAI"]
|
||||
tags = ["vectordb"]
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "neo4j-advanced-rag"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "Balance precise embeddings and context retention with advanced strategies"
|
||||
authors = [
|
||||
"Tomaz Bratanic <tomaz.bratanic@neo4j.com>",
|
||||
]
|
||||
@@ -18,6 +18,12 @@ neo4j = "^5.14.0"
|
||||
export_module = "neo4j_advanced_rag"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "Neo4j"
|
||||
integrations = ["Neo4j", "OpenAI"]
|
||||
tags = ["vectordb", "parent", "summarization"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "neo4j-cypher-ft"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "Interact with a Neo4j graph database using natural language"
|
||||
authors = [
|
||||
"Tomaz Bratanic <tomaz.bratanic@neo4j.com>",
|
||||
]
|
||||
@@ -20,6 +20,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "neo4j_cypher_ft"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "query"
|
||||
author = "Neo4j"
|
||||
integrations = ["Neo4j", "OpenAI"]
|
||||
tags = ["search", "graph-database", "query"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "neo4j-cypher-memory"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "Conversational interface for a Neo4j graph database"
|
||||
authors = [
|
||||
"Tomaz Bratanic <tomaz.bratanic@neo4j.com>",
|
||||
]
|
||||
@@ -20,6 +20,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "neo4j_cypher_memory"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "query"
|
||||
author = "Neo4j"
|
||||
integrations = ["Neo4j", "OpenAI"]
|
||||
tags = ["conversation", "graph-database"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "neo4j-cypher"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "Natural language interface for a Neo4j graph database"
|
||||
authors = [
|
||||
"Tomaz Bratanic <tomaz.bratanic@neo4j.com>",
|
||||
]
|
||||
@@ -20,6 +20,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "neo4j_cypher"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "query"
|
||||
author = "Neo4j"
|
||||
integrations = ["Neo4j", "OpenAI"]
|
||||
tags = ["search", "graph-database"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "neo4j-generation"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Knowledge graph extraction with Neo4j AuraDB"
|
||||
authors = [
|
||||
"Tomaz Bratanic <tomaz.bratanic@neo4j.com>",
|
||||
]
|
||||
@@ -20,6 +20,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "neo4j_generation.chain"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "extraction"
|
||||
author = "Neo4j"
|
||||
integrations = ["Neo4j", "OpenAI"]
|
||||
tags = ["graph-database", "search"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "neo4j-parent"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "Balance precise embeddings and context retention with Neo4j hybrid search"
|
||||
authors = [
|
||||
"Tomaz Bratanic <tomaz.bratanic@neo4j.com>",
|
||||
]
|
||||
@@ -21,6 +21,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "neo4j_parent"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "Neo4j"
|
||||
integrations = ["Neo4j", "OpenAI"]
|
||||
tags = ["hybrid-search", "graph-database"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "neo4j-vector-memory"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "Store conversational flows in a Neo4j graph database"
|
||||
authors = [
|
||||
"Tomaz Bratanic <tomaz.bratanic@neo4j.com>",
|
||||
]
|
||||
@@ -21,6 +21,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "neo4j_vector_memory"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "Neo4j"
|
||||
integrations = ["Neo4j", "OpenAI"]
|
||||
tags = ["graph-database", "conversation"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "openai-functions-agent"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "Agent using OpenAI function calling to execute functions, including search"
|
||||
authors = [
|
||||
"Lance Martin <lance@langchain.dev>",
|
||||
]
|
||||
@@ -20,6 +20,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "openai_functions_agent"
|
||||
export_attr = "agent_executor"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "research"
|
||||
author = "LangChain"
|
||||
integrations = ["OpenAI", "Tavily"]
|
||||
tags = ["search", "agents", "function-calling"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "openai-functions-tool-retrieval-agent"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Use large numbers of tools with tool retrieval strategies"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -21,6 +21,12 @@ sse-starlette = "^1.6.5"
|
||||
export_module = "openai_functions_tool_retrieval_agent"
|
||||
export_attr = "agent_executor"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "agents"
|
||||
author = "LangChain"
|
||||
integrations = ["OpenAI", "FAISS"]
|
||||
tags = ["agents", "function-calling"]
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "pii_protected_chatbot"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Flag PII before passing it to the LLM"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -20,6 +20,12 @@ sse-starlette = "^1.6.5"
|
||||
export_module = "pii_protected_chatbot.chain"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "chatbot"
|
||||
author = "LangChain"
|
||||
integrations = ["OpenAI", "Microsoft Presidio"]
|
||||
tags = ["data", "redaction"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "pirate-speak-configurable"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Use Configurable Alternatives to allow clients to choose their Runnables"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -21,6 +21,12 @@ sse-starlette = "^1.6.5"
|
||||
export_module = "pirate_speak_configurable"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "chatbot"
|
||||
author = "LangChain"
|
||||
integrations = ["Cohere", "Anthropic", "OpenAI"]
|
||||
tags = ["configurable"]
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "pirate-speak"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Get started with a simple template that speaks like a pirate"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -19,6 +19,12 @@ sse-starlette = "^1.6.5"
|
||||
export_module = "pirate_speak.chain"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "chatbot"
|
||||
author = "LangChain"
|
||||
integrations = ["OpenAI"]
|
||||
tags = ["getting-started"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "plate-chain"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "Parse data from laboratory plates into standardized formats"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -20,6 +20,12 @@ sse-starlette = "^1.6.5"
|
||||
export_module = "plate_chain"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "extraction"
|
||||
author = "Sphinx Bio"
|
||||
integrations = ["OpenAI"]
|
||||
tags = ["bio", "data"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "astradb_entomology_rag"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "RAG using AstraDB"
|
||||
authors = [
|
||||
"Stefano Lottini <stefano.lottini@datastax.com>",
|
||||
]
|
||||
@@ -21,6 +21,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "astradb_entomology_rag"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "Datastax"
|
||||
integrations = ["AstraDB"]
|
||||
tags = ["vectordbs"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "rag-aws-bedrock"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "RAG using AWS Bedrock"
|
||||
authors = [
|
||||
"Lance Martin <lance@langchain.dev>",
|
||||
]
|
||||
@@ -22,6 +22,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "rag_aws_bedrock"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "LangChain"
|
||||
integrations = ["OpenAI", "FAISS"]
|
||||
tags = ["vectordbs"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "rag-aws-kendra"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
description = "RAG using AWS-Kendra and anthropic"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -21,6 +21,12 @@ sse-starlette = "^1.6.5"
|
||||
export_module = "rag_aws_kendra.chain"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "LangChain"
|
||||
integrations = ["AWS"]
|
||||
tags = ["vectordbs"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "rag-chroma-private"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "Private RAG using local LLM, embeddings, vectorstore"
|
||||
authors = [
|
||||
"Lance Martin <lance@langchain.dev>",
|
||||
]
|
||||
@@ -22,6 +22,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "rag_chroma_private"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "LangChain"
|
||||
integrations = ["OpenAI", "Chroma", "Gpt4all", "Ollama"]
|
||||
tags = ["vectordbs"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "rag-chroma"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "RAG using Chroma"
|
||||
authors = [
|
||||
"Erick Friis <erick@langchain.dev>",
|
||||
]
|
||||
@@ -21,6 +21,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "rag_chroma"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "LangChain"
|
||||
integrations = ["OpenAI", "Chroma"]
|
||||
tags = ["vectordbs"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[tool.poetry]
|
||||
name = "rag-codellama-fireworks"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
description = "RAG using OSS LLMs via Fireworks"
|
||||
authors = [
|
||||
"Lance Martin <lance@langchain.dev>",
|
||||
]
|
||||
@@ -22,6 +22,12 @@ langchain-cli = ">=0.0.15"
|
||||
export_module = "rag_codellama_fireworks"
|
||||
export_attr = "chain"
|
||||
|
||||
[tool.templates-hub]
|
||||
use-case = "rag"
|
||||
author = "Elastic"
|
||||
integrations = ["OpenAI", "Fireworks"]
|
||||
tags = ["vectordbs"]
|
||||
|
||||
[build-system]
|
||||
requires = [
|
||||
"poetry-core",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user