mirror of
				https://github.com/hwchase17/langchain.git
				synced 2025-11-04 10:10:09 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			281 lines
		
	
	
		
			9.3 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			281 lines
		
	
	
		
			9.3 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
{
 | 
						|
 "cells": [
 | 
						|
  {
 | 
						|
   "cell_type": "markdown",
 | 
						|
   "id": "657d2c8c-54b4-42a3-9f02-bdefa0ed6728",
 | 
						|
   "metadata": {},
 | 
						|
   "source": [
 | 
						|
    "# Custom Pairwise Evaluator\n",
 | 
						|
    "\n",
 | 
						|
    "You can make your own pairwise string evaluators by inheriting from `PairwiseStringEvaluator` class and overwriting the `_evaluate_string_pairs` method (and the `_aevaluate_string_pairs` method if you want to use the evaluator asynchronously).\n",
 | 
						|
    "\n",
 | 
						|
    "In this example, you will make a simple custom evaluator that just returns whether the first prediction has more whitespace tokenized 'words' than the second.\n",
 | 
						|
    "\n",
 | 
						|
    "You can check out the reference docs for the [PairwiseStringEvaluator interface](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.schema.PairwiseStringEvaluator.html#langchain.evaluation.schema.PairwiseStringEvaluator) for more info.\n"
 | 
						|
   ]
 | 
						|
  },
 | 
						|
  {
 | 
						|
   "cell_type": "code",
 | 
						|
   "execution_count": 1,
 | 
						|
   "id": "93f3a653-d198-4291-973c-8d1adba338b2",
 | 
						|
   "metadata": {
 | 
						|
    "tags": []
 | 
						|
   },
 | 
						|
   "outputs": [],
 | 
						|
   "source": [
 | 
						|
    "from typing import Optional, Any\n",
 | 
						|
    "from langchain.evaluation import PairwiseStringEvaluator\n",
 | 
						|
    "\n",
 | 
						|
    "\n",
 | 
						|
    "class LengthComparisonPairwiseEvalutor(PairwiseStringEvaluator):\n",
 | 
						|
    "    \"\"\"\n",
 | 
						|
    "    Custom evaluator to compare two strings.\n",
 | 
						|
    "    \"\"\"\n",
 | 
						|
    "\n",
 | 
						|
    "    def _evaluate_string_pairs(\n",
 | 
						|
    "        self,\n",
 | 
						|
    "        *,\n",
 | 
						|
    "        prediction: str,\n",
 | 
						|
    "        prediction_b: str,\n",
 | 
						|
    "        reference: Optional[str] = None,\n",
 | 
						|
    "        input: Optional[str] = None,\n",
 | 
						|
    "        **kwargs: Any,\n",
 | 
						|
    "    ) -> dict:\n",
 | 
						|
    "        score = int(len(prediction.split()) > len(prediction_b.split()))\n",
 | 
						|
    "        return {\"score\": score}"
 | 
						|
   ]
 | 
						|
  },
 | 
						|
  {
 | 
						|
   "cell_type": "code",
 | 
						|
   "execution_count": 2,
 | 
						|
   "id": "7d4a77c3-07a7-4076-8e7f-f9bca0d6c290",
 | 
						|
   "metadata": {
 | 
						|
    "tags": []
 | 
						|
   },
 | 
						|
   "outputs": [
 | 
						|
    {
 | 
						|
     "data": {
 | 
						|
      "text/plain": [
 | 
						|
       "{'score': 1}"
 | 
						|
      ]
 | 
						|
     },
 | 
						|
     "execution_count": 2,
 | 
						|
     "metadata": {},
 | 
						|
     "output_type": "execute_result"
 | 
						|
    }
 | 
						|
   ],
 | 
						|
   "source": [
 | 
						|
    "evaluator = LengthComparisonPairwiseEvalutor()\n",
 | 
						|
    "\n",
 | 
						|
    "evaluator.evaluate_string_pairs(\n",
 | 
						|
    "    prediction=\"The quick brown fox jumped over the lazy dog.\",\n",
 | 
						|
    "    prediction_b=\"The quick brown fox jumped over the dog.\",\n",
 | 
						|
    ")"
 | 
						|
   ]
 | 
						|
  },
 | 
						|
  {
 | 
						|
   "cell_type": "markdown",
 | 
						|
   "id": "d90f128f-6f49-42a1-b05a-3aea568ee03b",
 | 
						|
   "metadata": {},
 | 
						|
   "source": [
 | 
						|
    "## LLM-Based Example\n",
 | 
						|
    "\n",
 | 
						|
    "That example was simple to illustrate the API, but it wasn't very useful in practice. Below, use an LLM with some custom instructions to form a simple preference scorer similar to the built-in [PairwiseStringEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.comparison.eval_chain.PairwiseStringEvalChain.html#langchain.evaluation.comparison.eval_chain.PairwiseStringEvalChain). We will use `ChatAnthropic` for the evaluator chain."
 | 
						|
   ]
 | 
						|
  },
 | 
						|
  {
 | 
						|
   "cell_type": "code",
 | 
						|
   "execution_count": 3,
 | 
						|
   "id": "b4b43098-4d96-417b-a8a9-b3e75779cfe8",
 | 
						|
   "metadata": {
 | 
						|
    "tags": []
 | 
						|
   },
 | 
						|
   "outputs": [],
 | 
						|
   "source": [
 | 
						|
    "# %pip install anthropic\n",
 | 
						|
    "# %env ANTHROPIC_API_KEY=YOUR_API_KEY"
 | 
						|
   ]
 | 
						|
  },
 | 
						|
  {
 | 
						|
   "cell_type": "code",
 | 
						|
   "execution_count": 4,
 | 
						|
   "id": "b6e978ab-48f1-47ff-9506-e13b1a50be6e",
 | 
						|
   "metadata": {
 | 
						|
    "tags": []
 | 
						|
   },
 | 
						|
   "outputs": [],
 | 
						|
   "source": [
 | 
						|
    "from typing import Optional, Any\n",
 | 
						|
    "from langchain.evaluation import PairwiseStringEvaluator\n",
 | 
						|
    "from langchain.chat_models import ChatAnthropic\n",
 | 
						|
    "from langchain.chains import LLMChain\n",
 | 
						|
    "\n",
 | 
						|
    "\n",
 | 
						|
    "class CustomPreferenceEvaluator(PairwiseStringEvaluator):\n",
 | 
						|
    "    \"\"\"\n",
 | 
						|
    "    Custom evaluator to compare two strings using a custom LLMChain.\n",
 | 
						|
    "    \"\"\"\n",
 | 
						|
    "\n",
 | 
						|
    "    def __init__(self) -> None:\n",
 | 
						|
    "        llm = ChatAnthropic(model=\"claude-2\", temperature=0)\n",
 | 
						|
    "        self.eval_chain = LLMChain.from_string(\n",
 | 
						|
    "            llm,\n",
 | 
						|
    "            \"\"\"Which option is preferred? Do not take order into account. Evaluate based on accuracy and helpfulness. If neither is preferred, respond with C. Provide your reasoning, then finish with Preference: A/B/C\n",
 | 
						|
    "\n",
 | 
						|
    "Input: How do I get the path of the parent directory in python 3.8?\n",
 | 
						|
    "Option A: You can use the following code:\n",
 | 
						|
    "```python\n",
 | 
						|
    "import os\n",
 | 
						|
    "\n",
 | 
						|
    "os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n",
 | 
						|
    "```\n",
 | 
						|
    "Option B: You can use the following code:\n",
 | 
						|
    "```python\n",
 | 
						|
    "from pathlib import Path\n",
 | 
						|
    "Path(__file__).absolute().parent\n",
 | 
						|
    "```\n",
 | 
						|
    "Reasoning: Both options return the same result. However, since option B is more concise and easily understand, it is preferred.\n",
 | 
						|
    "Preference: B\n",
 | 
						|
    "\n",
 | 
						|
    "Which option is preferred? Do not take order into account. Evaluate based on accuracy and helpfulness. If neither is preferred, respond with C. Provide your reasoning, then finish with Preference: A/B/C\n",
 | 
						|
    "Input: {input}\n",
 | 
						|
    "Option A: {prediction}\n",
 | 
						|
    "Option B: {prediction_b}\n",
 | 
						|
    "Reasoning:\"\"\",\n",
 | 
						|
    "        )\n",
 | 
						|
    "\n",
 | 
						|
    "    @property\n",
 | 
						|
    "    def requires_input(self) -> bool:\n",
 | 
						|
    "        return True\n",
 | 
						|
    "\n",
 | 
						|
    "    @property\n",
 | 
						|
    "    def requires_reference(self) -> bool:\n",
 | 
						|
    "        return False\n",
 | 
						|
    "\n",
 | 
						|
    "    def _evaluate_string_pairs(\n",
 | 
						|
    "        self,\n",
 | 
						|
    "        *,\n",
 | 
						|
    "        prediction: str,\n",
 | 
						|
    "        prediction_b: str,\n",
 | 
						|
    "        reference: Optional[str] = None,\n",
 | 
						|
    "        input: Optional[str] = None,\n",
 | 
						|
    "        **kwargs: Any,\n",
 | 
						|
    "    ) -> dict:\n",
 | 
						|
    "        result = self.eval_chain(\n",
 | 
						|
    "            {\n",
 | 
						|
    "                \"input\": input,\n",
 | 
						|
    "                \"prediction\": prediction,\n",
 | 
						|
    "                \"prediction_b\": prediction_b,\n",
 | 
						|
    "                \"stop\": [\"Which option is preferred?\"],\n",
 | 
						|
    "            },\n",
 | 
						|
    "            **kwargs,\n",
 | 
						|
    "        )\n",
 | 
						|
    "\n",
 | 
						|
    "        response_text = result[\"text\"]\n",
 | 
						|
    "        reasoning, preference = response_text.split(\"Preference:\", maxsplit=1)\n",
 | 
						|
    "        preference = preference.strip()\n",
 | 
						|
    "        score = 1.0 if preference == \"A\" else (0.0 if preference == \"B\" else None)\n",
 | 
						|
    "        return {\"reasoning\": reasoning.strip(), \"value\": preference, \"score\": score}"
 | 
						|
   ]
 | 
						|
  },
 | 
						|
  {
 | 
						|
   "cell_type": "code",
 | 
						|
   "execution_count": 6,
 | 
						|
   "id": "5cbd8b1d-2cb0-4f05-b435-a1a00074d94a",
 | 
						|
   "metadata": {
 | 
						|
    "tags": []
 | 
						|
   },
 | 
						|
   "outputs": [],
 | 
						|
   "source": [
 | 
						|
    "evaluator = CustomPreferenceEvaluator()"
 | 
						|
   ]
 | 
						|
  },
 | 
						|
  {
 | 
						|
   "cell_type": "code",
 | 
						|
   "execution_count": 7,
 | 
						|
   "id": "2c0a7fb7-b976-4443-9f0e-e707a6dfbdf7",
 | 
						|
   "metadata": {
 | 
						|
    "tags": []
 | 
						|
   },
 | 
						|
   "outputs": [
 | 
						|
    {
 | 
						|
     "data": {
 | 
						|
      "text/plain": [
 | 
						|
       "{'reasoning': 'Option B is preferred over option A for importing from a relative directory, because it is more straightforward and concise.\\n\\nOption A uses the importlib module, which allows importing a module by specifying the full name as a string. While this works, it is less clear compared to option B.\\n\\nOption B directly imports from the relative path using dot notation, which clearly shows that it is a relative import. This is the recommended way to do relative imports in Python.\\n\\nIn summary, option B is more accurate and helpful as it uses the standard Python relative import syntax.',\n",
 | 
						|
       " 'value': 'B',\n",
 | 
						|
       " 'score': 0.0}"
 | 
						|
      ]
 | 
						|
     },
 | 
						|
     "execution_count": 7,
 | 
						|
     "metadata": {},
 | 
						|
     "output_type": "execute_result"
 | 
						|
    }
 | 
						|
   ],
 | 
						|
   "source": [
 | 
						|
    "evaluator.evaluate_string_pairs(\n",
 | 
						|
    "    input=\"How do I import from a relative directory?\",\n",
 | 
						|
    "    prediction=\"use importlib! importlib.import_module('.my_package', '.')\",\n",
 | 
						|
    "    prediction_b=\"from .sibling import foo\",\n",
 | 
						|
    ")"
 | 
						|
   ]
 | 
						|
  },
 | 
						|
  {
 | 
						|
   "cell_type": "code",
 | 
						|
   "execution_count": 13,
 | 
						|
   "id": "f13a1346-7dbe-451d-b3a3-99e8fc7b753b",
 | 
						|
   "metadata": {
 | 
						|
    "tags": []
 | 
						|
   },
 | 
						|
   "outputs": [
 | 
						|
    {
 | 
						|
     "name": "stdout",
 | 
						|
     "output_type": "stream",
 | 
						|
     "text": [
 | 
						|
      "CustomPreferenceEvaluator requires an input string.\n"
 | 
						|
     ]
 | 
						|
    }
 | 
						|
   ],
 | 
						|
   "source": [
 | 
						|
    "# Setting requires_input to return True adds additional validation to avoid returning a grade when insufficient data is provided to the chain.\n",
 | 
						|
    "\n",
 | 
						|
    "try:\n",
 | 
						|
    "    evaluator.evaluate_string_pairs(\n",
 | 
						|
    "        prediction=\"use importlib! importlib.import_module('.my_package', '.')\",\n",
 | 
						|
    "        prediction_b=\"from .sibling import foo\",\n",
 | 
						|
    "    )\n",
 | 
						|
    "except ValueError as e:\n",
 | 
						|
    "    print(e)"
 | 
						|
   ]
 | 
						|
  },
 | 
						|
  {
 | 
						|
   "cell_type": "code",
 | 
						|
   "execution_count": null,
 | 
						|
   "id": "e7829cc3-ebd1-4628-ae97-15166202e9cc",
 | 
						|
   "metadata": {},
 | 
						|
   "outputs": [],
 | 
						|
   "source": []
 | 
						|
  }
 | 
						|
 ],
 | 
						|
 "metadata": {
 | 
						|
  "kernelspec": {
 | 
						|
   "display_name": "Python 3 (ipykernel)",
 | 
						|
   "language": "python",
 | 
						|
   "name": "python3"
 | 
						|
  },
 | 
						|
  "language_info": {
 | 
						|
   "codemirror_mode": {
 | 
						|
    "name": "ipython",
 | 
						|
    "version": 3
 | 
						|
   },
 | 
						|
   "file_extension": ".py",
 | 
						|
   "mimetype": "text/x-python",
 | 
						|
   "name": "python",
 | 
						|
   "nbconvert_exporter": "python",
 | 
						|
   "pygments_lexer": "ipython3",
 | 
						|
   "version": "3.11.2"
 | 
						|
  }
 | 
						|
 },
 | 
						|
 "nbformat": 4,
 | 
						|
 "nbformat_minor": 5
 | 
						|
}
 |