langchain/templates/guardrails-output-parser/guardrails_output_parser/chain.py
Bagatur 480626dc99
docs, community[patch], experimental[patch], langchain[patch], cli[pa… (#15412)
…tch]: import models from community

ran
```bash
git grep -l 'from langchain\.chat_models' | xargs -L 1 sed -i '' "s/from\ langchain\.chat_models/from\ langchain_community.chat_models/g"
git grep -l 'from langchain\.llms' | xargs -L 1 sed -i '' "s/from\ langchain\.llms/from\ langchain_community.llms/g"
git grep -l 'from langchain\.embeddings' | xargs -L 1 sed -i '' "s/from\ langchain\.embeddings/from\ langchain_community.embeddings/g"
git checkout master libs/langchain/tests/unit_tests/llms
git checkout master libs/langchain/tests/unit_tests/chat_models
git checkout master libs/langchain/tests/unit_tests/embeddings/test_imports.py
make format
cd libs/langchain; make format
cd ../experimental; make format
cd ../core; make format
```
2024-01-02 15:32:16 -05:00

41 lines
1.0 KiB
Python

from langchain.output_parsers import GuardrailsOutputParser
from langchain.prompts import PromptTemplate
from langchain_community.llms import OpenAI
# Define rail string
rail_str = """
<rail version="0.1">
<output>
<string
description="Profanity-free translation"
format="is-profanity-free"
name="translated_statement"
on-fail-is-profanity-free="fix">
</string>
</output>
<prompt>
Translate the given statement into English:
${statement_to_be_translated}
${gr.complete_json_suffix}
</prompt>
</rail>
"""
# Create the GuardrailsOutputParser object from the rail string
output_parser = GuardrailsOutputParser.from_rail_string(rail_str)
# Define the prompt, model and chain
prompt = PromptTemplate(
template=output_parser.guard.prompt.escape(),
input_variables=output_parser.guard.prompt.variable_names,
)
chain = prompt | OpenAI() | output_parser
# This is needed because GuardrailsOutputParser does not have an inferrable type
chain = chain.with_types(output_type=dict)