From 72ca44768c4aedafba43038429bb6daa60d6fb87 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Fri, 28 Jul 2023 18:27:40 -0700 Subject: [PATCH] spelling --- libs/langchain/langchain/evaluation/criteria/prompt.py | 2 +- libs/langchain/langchain/smith/evaluation/runner_utils.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/libs/langchain/langchain/evaluation/criteria/prompt.py b/libs/langchain/langchain/evaluation/criteria/prompt.py index 1d2a4c3ddc3..46e5fbff1fc 100644 --- a/libs/langchain/langchain/evaluation/criteria/prompt.py +++ b/libs/langchain/langchain/evaluation/criteria/prompt.py @@ -81,7 +81,7 @@ def get_prompt_template( } if strategy not in strat_map: raise ValueError( - f"Unrecognized evalution strategy {strategy}" + f"Unrecognized evaluation strategy {strategy}" f"\nMust be one of {list(strat_map.keys())}" ) template = _LABELED_TEMPLATE if requires_references else _TEMPLATE diff --git a/libs/langchain/langchain/smith/evaluation/runner_utils.py b/libs/langchain/langchain/smith/evaluation/runner_utils.py index cc32e514c63..756d91f2d3f 100644 --- a/libs/langchain/langchain/smith/evaluation/runner_utils.py +++ b/libs/langchain/langchain/smith/evaluation/runner_utils.py @@ -1053,12 +1053,12 @@ def _run_on_examples( llm_or_chain_factory, examples, evaluation, data_type ) examples = _validate_example_inputs(examples, llm_or_chain_factory, input_mapper) - evalution_handler = EvaluatorCallbackHandler( + evaluation_handler = EvaluatorCallbackHandler( evaluators=run_evaluators or [], client=client, project_name=evaluator_project_name, ) - callbacks: List[BaseCallbackHandler] = [tracer, evalution_handler] + callbacks: List[BaseCallbackHandler] = [tracer, evaluation_handler] for i, example in enumerate(examples): result = _run_llm_or_chain( example, @@ -1072,7 +1072,7 @@ def _run_on_examples( print(f"{i+1} processed", flush=True, end="\r") results[str(example.id)] = result tracer.wait_for_futures() - evalution_handler.wait_for_futures() + evaluation_handler.wait_for_futures() return results