mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-04 12:18:24 +00:00
Session to project (#6249)
Sessions are being renamed to projects in the tracer
This commit is contained in:
parent
9c09861946
commit
b4fe7f3a09
@ -106,7 +106,7 @@ def wandb_tracing_enabled(
|
||||
|
||||
@contextmanager
|
||||
def tracing_v2_enabled(
|
||||
session_name: Optional[str] = None,
|
||||
project_name: Optional[str] = None,
|
||||
*,
|
||||
example_id: Optional[Union[str, UUID]] = None,
|
||||
) -> Generator[None, None, None]:
|
||||
@ -120,7 +120,7 @@ def tracing_v2_enabled(
|
||||
example_id = UUID(example_id)
|
||||
cb = LangChainTracer(
|
||||
example_id=example_id,
|
||||
session_name=session_name,
|
||||
project_name=project_name,
|
||||
)
|
||||
tracing_v2_callback_var.set(cb)
|
||||
yield
|
||||
@ -131,12 +131,12 @@ def tracing_v2_enabled(
|
||||
def trace_as_chain_group(
|
||||
group_name: str,
|
||||
*,
|
||||
session_name: Optional[str] = None,
|
||||
project_name: Optional[str] = None,
|
||||
example_id: Optional[Union[str, UUID]] = None,
|
||||
) -> Generator[CallbackManager, None, None]:
|
||||
"""Get a callback manager for a chain group in a context manager."""
|
||||
cb = LangChainTracer(
|
||||
session_name=session_name,
|
||||
project_name=project_name,
|
||||
example_id=example_id,
|
||||
)
|
||||
cm = CallbackManager.configure(
|
||||
@ -152,12 +152,12 @@ def trace_as_chain_group(
|
||||
async def atrace_as_chain_group(
|
||||
group_name: str,
|
||||
*,
|
||||
session_name: Optional[str] = None,
|
||||
project_name: Optional[str] = None,
|
||||
example_id: Optional[Union[str, UUID]] = None,
|
||||
) -> AsyncGenerator[AsyncCallbackManager, None]:
|
||||
"""Get a callback manager for a chain group in a context manager."""
|
||||
cb = LangChainTracer(
|
||||
session_name=session_name,
|
||||
project_name=project_name,
|
||||
example_id=example_id,
|
||||
)
|
||||
cm = AsyncCallbackManager.configure(
|
||||
@ -1039,10 +1039,10 @@ def _configure(
|
||||
tracing_v2_enabled_ = (
|
||||
env_var_is_set("LANGCHAIN_TRACING_V2") or tracer_v2 is not None
|
||||
)
|
||||
tracer_session = os.environ.get("LANGCHAIN_SESSION")
|
||||
tracer_project = os.environ.get(
|
||||
"LANGCHAIN_PROJECT", os.environ.get("LANGCHAIN_SESSION", "default")
|
||||
)
|
||||
debug = _get_debug()
|
||||
if tracer_session is None:
|
||||
tracer_session = "default"
|
||||
if (
|
||||
verbose
|
||||
or debug
|
||||
@ -1072,7 +1072,7 @@ def _configure(
|
||||
callback_manager.add_handler(tracer, True)
|
||||
else:
|
||||
handler = LangChainTracerV1()
|
||||
handler.load_session(tracer_session)
|
||||
handler.load_session(tracer_project)
|
||||
callback_manager.add_handler(handler, True)
|
||||
if wandb_tracing_enabled_ and not any(
|
||||
isinstance(handler, WandbTracer) for handler in callback_manager.handlers
|
||||
@ -1090,7 +1090,7 @@ def _configure(
|
||||
callback_manager.add_handler(tracer_v2, True)
|
||||
else:
|
||||
try:
|
||||
handler = LangChainTracer(session_name=tracer_session)
|
||||
handler = LangChainTracer(project_name=tracer_project)
|
||||
callback_manager.add_handler(handler, True)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
|
@ -45,7 +45,7 @@ class LangChainTracer(BaseTracer):
|
||||
def __init__(
|
||||
self,
|
||||
example_id: Optional[Union[UUID, str]] = None,
|
||||
session_name: Optional[str] = None,
|
||||
project_name: Optional[str] = None,
|
||||
client: Optional[LangChainPlusClient] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
@ -55,7 +55,9 @@ class LangChainTracer(BaseTracer):
|
||||
self.example_id = (
|
||||
UUID(example_id) if isinstance(example_id, str) else example_id
|
||||
)
|
||||
self.session_name = session_name or os.getenv("LANGCHAIN_SESSION", "default")
|
||||
self.project_name = project_name or os.getenv(
|
||||
"LANGCHAIN_PROJECT", os.getenv("LANGCHAIN_SESSION", "default")
|
||||
)
|
||||
# set max_workers to 1 to process tasks in order
|
||||
self.executor = ThreadPoolExecutor(max_workers=1)
|
||||
self.client = client or LangChainPlusClient()
|
||||
@ -103,7 +105,7 @@ class LangChainTracer(BaseTracer):
|
||||
extra["runtime"] = get_runtime_environment()
|
||||
run_dict["extra"] = extra
|
||||
try:
|
||||
self.client.create_run(**run_dict, session_name=self.session_name)
|
||||
self.client.create_run(**run_dict, project_name=self.project_name)
|
||||
except Exception as e:
|
||||
# Errors are swallowed by the thread executor so we need to log them here
|
||||
log_error_once("post", e)
|
||||
|
@ -237,18 +237,18 @@ async def _gather_with_concurrency(
|
||||
return results
|
||||
|
||||
|
||||
async def _tracer_initializer(session_name: Optional[str]) -> Optional[LangChainTracer]:
|
||||
async def _tracer_initializer(project_name: Optional[str]) -> Optional[LangChainTracer]:
|
||||
"""
|
||||
Initialize a tracer to share across tasks.
|
||||
|
||||
Args:
|
||||
session_name: The session name for the tracer.
|
||||
project_name: The project name for the tracer.
|
||||
|
||||
Returns:
|
||||
A LangChainTracer instance with an active session.
|
||||
A LangChainTracer instance with an active project.
|
||||
"""
|
||||
if session_name:
|
||||
tracer = LangChainTracer(session_name=session_name)
|
||||
if project_name:
|
||||
tracer = LangChainTracer(project_name=project_name)
|
||||
return tracer
|
||||
else:
|
||||
return None
|
||||
@ -260,12 +260,12 @@ async def arun_on_examples(
|
||||
*,
|
||||
concurrency_level: int = 5,
|
||||
num_repetitions: int = 1,
|
||||
session_name: Optional[str] = None,
|
||||
project_name: Optional[str] = None,
|
||||
verbose: bool = False,
|
||||
tags: Optional[List[str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Run the chain on examples and store traces to the specified session name.
|
||||
Run the chain on examples and store traces to the specified project name.
|
||||
|
||||
Args:
|
||||
examples: Examples to run the model or chain over
|
||||
@ -276,7 +276,7 @@ async def arun_on_examples(
|
||||
num_repetitions: Number of times to run the model on each example.
|
||||
This is useful when testing success rates or generating confidence
|
||||
intervals.
|
||||
session_name: Session name to use when tracing runs.
|
||||
project_name: Project name to use when tracing runs.
|
||||
verbose: Whether to print progress.
|
||||
tags: Tags to add to the traces.
|
||||
|
||||
@ -307,7 +307,7 @@ async def arun_on_examples(
|
||||
|
||||
await _gather_with_concurrency(
|
||||
concurrency_level,
|
||||
functools.partial(_tracer_initializer, session_name),
|
||||
functools.partial(_tracer_initializer, project_name),
|
||||
*(functools.partial(process_example, e) for e in examples),
|
||||
)
|
||||
return results
|
||||
@ -386,11 +386,11 @@ def run_on_examples(
|
||||
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
|
||||
*,
|
||||
num_repetitions: int = 1,
|
||||
session_name: Optional[str] = None,
|
||||
project_name: Optional[str] = None,
|
||||
verbose: bool = False,
|
||||
tags: Optional[List[str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Run the chain on examples and store traces to the specified session name.
|
||||
"""Run the chain on examples and store traces to the specified project name.
|
||||
|
||||
Args:
|
||||
examples: Examples to run model or chain over.
|
||||
@ -401,14 +401,14 @@ def run_on_examples(
|
||||
num_repetitions: Number of times to run the model on each example.
|
||||
This is useful when testing success rates or generating confidence
|
||||
intervals.
|
||||
session_name: Session name to use when tracing runs.
|
||||
project_name: Project name to use when tracing runs.
|
||||
verbose: Whether to print progress.
|
||||
tags: Tags to add to the run traces.
|
||||
Returns:
|
||||
A dictionary mapping example ids to the model outputs.
|
||||
"""
|
||||
results: Dict[str, Any] = {}
|
||||
tracer = LangChainTracer(session_name=session_name) if session_name else None
|
||||
tracer = LangChainTracer(project_name=project_name) if project_name else None
|
||||
for i, example in enumerate(examples):
|
||||
result = run_llm_or_chain(
|
||||
example,
|
||||
@ -425,13 +425,13 @@ def run_on_examples(
|
||||
return results
|
||||
|
||||
|
||||
def _get_session_name(
|
||||
session_name: Optional[str],
|
||||
def _get_project_name(
|
||||
project_name: Optional[str],
|
||||
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
|
||||
dataset_name: str,
|
||||
) -> str:
|
||||
if session_name is not None:
|
||||
return session_name
|
||||
if project_name is not None:
|
||||
return project_name
|
||||
current_time = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
|
||||
if isinstance(llm_or_chain_factory, BaseLanguageModel):
|
||||
model_name = llm_or_chain_factory.__class__.__name__
|
||||
@ -446,13 +446,13 @@ async def arun_on_dataset(
|
||||
*,
|
||||
concurrency_level: int = 5,
|
||||
num_repetitions: int = 1,
|
||||
session_name: Optional[str] = None,
|
||||
project_name: Optional[str] = None,
|
||||
verbose: bool = False,
|
||||
client: Optional[LangChainPlusClient] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Run the chain on a dataset and store traces to the specified session name.
|
||||
Run the chain on a dataset and store traces to the specified project name.
|
||||
|
||||
Args:
|
||||
client: Client to use to read the dataset.
|
||||
@ -464,7 +464,7 @@ async def arun_on_dataset(
|
||||
num_repetitions: Number of times to run the model on each example.
|
||||
This is useful when testing success rates or generating confidence
|
||||
intervals.
|
||||
session_name: Name of the session to store the traces in.
|
||||
project_name: Name of the project to store the traces in.
|
||||
Defaults to {dataset_name}-{chain class name}-{datetime}.
|
||||
verbose: Whether to print progress.
|
||||
client: Client to use to read the dataset. If not provided, a new
|
||||
@ -472,11 +472,10 @@ async def arun_on_dataset(
|
||||
tags: Tags to add to each run in the sesssion.
|
||||
|
||||
Returns:
|
||||
A dictionary containing the run's session name and the resulting model outputs.
|
||||
A dictionary containing the run's project name and the resulting model outputs.
|
||||
"""
|
||||
client_ = client or LangChainPlusClient()
|
||||
session_name = _get_session_name(session_name, llm_or_chain_factory, dataset_name)
|
||||
client_.create_session(session_name, mode="eval")
|
||||
project_name = _get_project_name(project_name, llm_or_chain_factory, dataset_name)
|
||||
dataset = client_.read_dataset(dataset_name=dataset_name)
|
||||
examples = client_.list_examples(dataset_id=str(dataset.id))
|
||||
|
||||
@ -485,12 +484,12 @@ async def arun_on_dataset(
|
||||
llm_or_chain_factory,
|
||||
concurrency_level=concurrency_level,
|
||||
num_repetitions=num_repetitions,
|
||||
session_name=session_name,
|
||||
project_name=project_name,
|
||||
verbose=verbose,
|
||||
tags=tags,
|
||||
)
|
||||
return {
|
||||
"session_name": session_name,
|
||||
"project_name": project_name,
|
||||
"results": results,
|
||||
}
|
||||
|
||||
@ -500,12 +499,12 @@ def run_on_dataset(
|
||||
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
|
||||
*,
|
||||
num_repetitions: int = 1,
|
||||
session_name: Optional[str] = None,
|
||||
project_name: Optional[str] = None,
|
||||
verbose: bool = False,
|
||||
client: Optional[LangChainPlusClient] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Run the chain on a dataset and store traces to the specified session name.
|
||||
"""Run the chain on a dataset and store traces to the specified project name.
|
||||
|
||||
Args:
|
||||
dataset_name: Name of the dataset to run the chain on.
|
||||
@ -516,7 +515,7 @@ def run_on_dataset(
|
||||
num_repetitions: Number of times to run the model on each example.
|
||||
This is useful when testing success rates or generating confidence
|
||||
intervals.
|
||||
session_name: Name of the session to store the traces in.
|
||||
project_name: Name of the project to store the traces in.
|
||||
Defaults to {dataset_name}-{chain class name}-{datetime}.
|
||||
verbose: Whether to print progress.
|
||||
client: Client to use to access the dataset. If None, a new client
|
||||
@ -524,22 +523,21 @@ def run_on_dataset(
|
||||
tags: Tags to add to each run in the sesssion.
|
||||
|
||||
Returns:
|
||||
A dictionary containing the run's session name and the resulting model outputs.
|
||||
A dictionary containing the run's project name and the resulting model outputs.
|
||||
"""
|
||||
client_ = client or LangChainPlusClient()
|
||||
session_name = _get_session_name(session_name, llm_or_chain_factory, dataset_name)
|
||||
client_.create_session(session_name, mode="eval")
|
||||
project_name = _get_project_name(project_name, llm_or_chain_factory, dataset_name)
|
||||
dataset = client_.read_dataset(dataset_name=dataset_name)
|
||||
examples = client_.list_examples(dataset_id=str(dataset.id))
|
||||
results = run_on_examples(
|
||||
examples,
|
||||
llm_or_chain_factory,
|
||||
num_repetitions=num_repetitions,
|
||||
session_name=session_name,
|
||||
project_name=project_name,
|
||||
verbose=verbose,
|
||||
tags=tags,
|
||||
)
|
||||
return {
|
||||
"session_name": session_name,
|
||||
"project_name": project_name,
|
||||
"results": results,
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "1a4596ea-a631-416d-a2a4-3577c140493d",
|
||||
"metadata": {
|
||||
@ -34,6 +35,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "2d77d064-41b4-41fb-82e6-2d16461269ec",
|
||||
"metadata": {
|
||||
@ -48,19 +50,21 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "7935e832-9ae1-4557-8d08-890c425f18e2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**NOTE:** You can also use the `tracing_v2_enabled` context manager to capture sessions within a given context:\n",
|
||||
"**NOTE:** You can also use the `tracing_v2_enabled` context manager to capture projects within a given context:\n",
|
||||
"```\n",
|
||||
"from langchain.callbacks.manager import tracing_v2_enabled\n",
|
||||
"with tracing_v2_enabled(\"My Session Name\"):\n",
|
||||
"with tracing_v2_enabled(\"My Project Name\"):\n",
|
||||
" ...\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "221b638a-2ae4-46ef-bf6a-d59bf85d587f",
|
||||
"metadata": {},
|
||||
@ -102,7 +106,7 @@
|
||||
"from langchainplus_sdk import LangChainPlusClient\n",
|
||||
"\n",
|
||||
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"os.environ[\"LANGCHAIN_SESSION\"] = \"Tracing Walkthrough\"\n",
|
||||
"os.environ[\"LANGCHAIN_PROJECT\"] = \"Tracing Walkthrough\"\n",
|
||||
"# os.environ[\"LANGCHAIN_ENDPOINT\"] = \"https://api.langchain.plus\" # Uncomment this line if you want to use the hosted version\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = \"<YOUR-LANGCHAINPLUS-API-KEY>\" # Uncomment this line if you want to use the hosted version.\n",
|
||||
"\n",
|
||||
@ -171,13 +175,14 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6c43c311-4e09-4d57-9ef3-13afb96ff430",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating the Dataset\n",
|
||||
"\n",
|
||||
"Now that you've captured a session entitled 'Tracing Walkthrough', it's time to create a dataset. We will do so using the `create_dataset` method below."
|
||||
"Now that you've captured a project entitled 'Tracing Walkthrough', it's time to create a dataset. We will do so using the `create_dataset` method below."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -207,7 +212,7 @@
|
||||
" dataset_name, description=\"A calculator example dataset\"\n",
|
||||
")\n",
|
||||
"runs = client.list_runs(\n",
|
||||
" session_name=os.environ[\"LANGCHAIN_SESSION\"],\n",
|
||||
" project_name=os.environ[\"LANGCHAIN_PROJECT\"],\n",
|
||||
" execution_order=1, # Only return the top-level runs\n",
|
||||
" error=False, # Only runs that succeed\n",
|
||||
")\n",
|
||||
@ -223,6 +228,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "db79dea2-fbaa-4c12-9083-f6154b51e2d3",
|
||||
"metadata": {
|
||||
@ -235,7 +241,7 @@
|
||||
"Alternatively, you could create or edit the dataset in the UI using the following steps:\n",
|
||||
"\n",
|
||||
" 1. Navigate to the UI by clicking on the link below.\n",
|
||||
" 2. Select the 'search_and_math_chain' session from the list.\n",
|
||||
" 2. Select the 'search_and_math_chain' project from the list.\n",
|
||||
" 3. Next to the fist example, click \"+ to Dataset\".\n",
|
||||
" 4. Click \"Create Dataset\" and create a title **\"calculator-example-dataset\"**.\n",
|
||||
" 5. Add the other examples to the dataset as well\n",
|
||||
@ -303,6 +309,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "07885b10",
|
||||
"metadata": {
|
||||
@ -339,6 +346,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "84094a4a-1d76-461c-bc37-8c537939b466",
|
||||
"metadata": {},
|
||||
@ -350,7 +358,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": null,
|
||||
"id": "112d7bdf-7e50-4c1a-9285-5bac8473f2ee",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@ -366,13 +374,13 @@
|
||||
"\u001b[0;34m\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
|
||||
"\u001b[0;34m\u001b[0m \u001b[0mconcurrency_level\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'int'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m5\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
|
||||
"\u001b[0;34m\u001b[0m \u001b[0mnum_repetitions\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'int'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
|
||||
"\u001b[0;34m\u001b[0m \u001b[0msession_name\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'Optional[str]'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
|
||||
"\u001b[0;34m\u001b[0m \u001b[0mproject_name\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'Optional[str]'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
|
||||
"\u001b[0;34m\u001b[0m \u001b[0mverbose\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'bool'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
|
||||
"\u001b[0;34m\u001b[0m \u001b[0mclient\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'Optional[LangChainPlusClient]'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
|
||||
"\u001b[0;34m\u001b[0m \u001b[0mtags\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'Optional[List[str]]'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
|
||||
"\u001b[0;34m\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;34m'Dict[str, Any]'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0;31mDocstring:\u001b[0m\n",
|
||||
"Run the chain on a dataset and store traces to the specified session name.\n",
|
||||
"Run the chain on a dataset and store traces to the specified project name.\n",
|
||||
"\n",
|
||||
"Args:\n",
|
||||
" client: Client to use to read the dataset.\n",
|
||||
@ -384,7 +392,7 @@
|
||||
" num_repetitions: Number of times to run the model on each example.\n",
|
||||
" This is useful when testing success rates or generating confidence\n",
|
||||
" intervals.\n",
|
||||
" session_name: Name of the session to store the traces in.\n",
|
||||
" project_name: Name of the project to store the traces in.\n",
|
||||
" Defaults to {dataset_name}-{chain class name}-{datetime}.\n",
|
||||
" verbose: Whether to print progress.\n",
|
||||
" client: Client to use to read the dataset. If not provided, a new\n",
|
||||
@ -392,7 +400,7 @@
|
||||
" tags: Tags to add to each run in the sesssion.\n",
|
||||
"\n",
|
||||
"Returns:\n",
|
||||
" A dictionary containing the run's session name and the resulting model outputs.\n",
|
||||
" A dictionary containing the run's project name and the resulting model outputs.\n",
|
||||
"\u001b[0;31mFile:\u001b[0m ~/code/lc/lckg/langchain/client/runner_utils.py\n",
|
||||
"\u001b[0;31mType:\u001b[0m function"
|
||||
]
|
||||
@ -479,6 +487,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "cdacd159-eb4d-49e9-bb2a-c55322c40ed4",
|
||||
"metadata": {
|
||||
@ -487,7 +496,7 @@
|
||||
"source": [
|
||||
"### Reviewing the Chain Results\n",
|
||||
"\n",
|
||||
"You can review the results of the run in the tracing UI below and navigating to the session \n",
|
||||
"You can review the results of the run in the tracing UI below and navigating to the project \n",
|
||||
"with the title **\"Search + Calculator Agent Evaluation\"**"
|
||||
]
|
||||
},
|
||||
@ -519,6 +528,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "63ed6561-6574-43b3-a653-fe410aa8a617",
|
||||
"metadata": {},
|
||||
@ -598,9 +608,7 @@
|
||||
"from tqdm.notebook import tqdm\n",
|
||||
"\n",
|
||||
"feedbacks = []\n",
|
||||
"runs = client.list_runs(\n",
|
||||
" session_name=chain_results[\"session_name\"], execution_order=1, error=False\n",
|
||||
")\n",
|
||||
"runs = client.list_runs(project_name=chain_results[\"project_name\"], execution_order=1, error=False)\n",
|
||||
"for run in tqdm(runs):\n",
|
||||
" if run.outputs is None:\n",
|
||||
" continue\n",
|
||||
|
10
poetry.lock
generated
10
poetry.lock
generated
@ -4362,13 +4362,13 @@ tests = ["doctest", "pytest", "pytest-mock"]
|
||||
|
||||
[[package]]
|
||||
name = "langchainplus-sdk"
|
||||
version = "0.0.15"
|
||||
description = "Client library to connect to the LangChainPlus LLM Tracing and Evaluation Platform."
|
||||
version = "0.0.17"
|
||||
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
files = [
|
||||
{file = "langchainplus_sdk-0.0.15-py3-none-any.whl", hash = "sha256:e69bdbc8af6007ef2f774248d2483bbaf2d75712b1acc9ea50eda3b9f6dc567d"},
|
||||
{file = "langchainplus_sdk-0.0.15.tar.gz", hash = "sha256:ce40e9e3b6d42741f0a2aa89f83a12f2648f38690a9dd57e5fe3a56f2f232908"},
|
||||
{file = "langchainplus_sdk-0.0.17-py3-none-any.whl", hash = "sha256:899675fe850bb0829691ce7643d5c3b4425de1535b6f2d6ce1e5f5457ffb05bf"},
|
||||
{file = "langchainplus_sdk-0.0.17.tar.gz", hash = "sha256:6520c864a23dcadbe6fb7233a117347f6acc32725a97758e59354704c50de303"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@ -11771,4 +11771,4 @@ text-helpers = ["chardet"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "09d46ad12369c6a16513558618553623cd520c2855bff3b8fe8248e1b18cbb94"
|
||||
content-hash = "6e495e4f58127a5d2001385404b973896e275f5ca71a6ebe856cb114977189d1"
|
||||
|
@ -106,7 +106,7 @@ pyspark = {version = "^3.4.0", optional = true}
|
||||
clarifai = {version = "9.1.0", optional = true}
|
||||
tigrisdb = {version = "^1.0.0b6", optional = true}
|
||||
nebula3-python = {version = "^3.4.0", optional = true}
|
||||
langchainplus-sdk = ">=0.0.13"
|
||||
langchainplus-sdk = ">=0.0.17"
|
||||
awadb = {version = "^0.3.3", optional = true}
|
||||
azure-search-documents = {version = "11.4.0a20230509004", source = "azure-sdk-dev", optional = true}
|
||||
openllm = {version = ">=0.1.6", optional = true}
|
||||
|
@ -176,7 +176,7 @@ async def test_arun_on_dataset(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
{"result": f"Result for example {example.id}"} for _ in range(n_repetitions)
|
||||
]
|
||||
|
||||
def mock_create_session(*args: Any, **kwargs: Any) -> None:
|
||||
def mock_create_project(*args: Any, **kwargs: Any) -> None:
|
||||
pass
|
||||
|
||||
with mock.patch.object(
|
||||
@ -186,7 +186,7 @@ async def test_arun_on_dataset(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
), mock.patch(
|
||||
"langchain.client.runner_utils._arun_llm_or_chain", new=mock_arun_chain
|
||||
), mock.patch.object(
|
||||
LangChainPlusClient, "create_session", new=mock_create_session
|
||||
LangChainPlusClient, "create_project", new=mock_create_project
|
||||
):
|
||||
client = LangChainPlusClient(api_url="http://localhost:1984", api_key="123")
|
||||
chain = mock.MagicMock()
|
||||
@ -195,7 +195,7 @@ async def test_arun_on_dataset(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
dataset_name="test",
|
||||
llm_or_chain_factory=lambda: chain,
|
||||
concurrency_level=2,
|
||||
session_name="test_session",
|
||||
project_name="test_project",
|
||||
num_repetitions=num_repetitions,
|
||||
client=client,
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user