diff --git a/docs/docs/modules/model_io/output_parsers/types/retry.ipynb b/docs/docs/modules/model_io/output_parsers/types/retry.ipynb index 6a1a7f94a92..a0582ba7f0c 100644 --- a/docs/docs/modules/model_io/output_parsers/types/retry.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/retry.ipynb @@ -174,7 +174,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.output_parsers import RetryWithErrorOutputParser" + "from langchain.output_parsers import RetryOutputParser" ] }, { @@ -184,9 +184,7 @@ "metadata": {}, "outputs": [], "source": [ - "retry_parser = RetryWithErrorOutputParser.from_llm(\n", - " parser=parser, llm=OpenAI(temperature=0)\n", - ")" + "retry_parser = RetryOutputParser.from_llm(parser=parser, llm=OpenAI(temperature=0))" ] }, { @@ -210,6 +208,41 @@ "retry_parser.parse_with_prompt(bad_response, prompt_value)" ] }, + { + "cell_type": "markdown", + "id": "16827256-5801-4388-b6fa-608991e29961", + "metadata": {}, + "source": [ + "We can also add the RetryOutputParser easily with a custom chain which transform the raw LLM/ChatModel output into a more workable format." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "7eaff2fb-56d3-481c-99a1-a968a49d0654", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Action(action='search', action_input='leo di caprio girlfriend')\n" + ] + } + ], + "source": [ + "from langchain_core.runnables import RunnableLambda, RunnableParallel\n", + "\n", + "completion_chain = prompt | OpenAI(temperature=0)\n", + "\n", + "main_chain = RunnableParallel(\n", + " completion=completion_chain, prompt_value=prompt\n", + ") | RunnableLambda(lambda x: retry_parser.parse_with_prompt(**x))\n", + "\n", + "\n", + "main_chain.invoke({\"query\": \"who is leo di caprios gf?\"})" + ] + }, { "cell_type": "code", "execution_count": null, @@ -235,7 +268,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.1" + "version": "3.9.13" } }, "nbformat": 4,