mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-13 14:50:00 +00:00
Refactored example_generator
(#8099)
Refactored `example_generator.py`. The same as #7961 `example_generator.py` is in the root code folder. This creates the `langchain.example_generator: Example Generator ` group on the API Reference navigation ToC, on the same level as `Chains` and `Agents` which is not correct. Refactoring: - moved `example_generator.py` content into `chains/example_generator.py` (not in `utils` because the `example_generator` has dependencies on other LangChain classes. It also doesn't work for moving into `utilities/`) - added the backwards compatibility ref in the original `example_generator.py` @hwchase17
This commit is contained in:
parent
1cc7d4c9eb
commit
3eb4112a1f
@ -26,6 +26,7 @@ from langchain.chains.conversational_retrieval.base import (
|
||||
ChatVectorDBChain,
|
||||
ConversationalRetrievalChain,
|
||||
)
|
||||
from langchain.chains.example_generator import generate_example
|
||||
from langchain.chains.flare.base import FlareChain
|
||||
from langchain.chains.graph_qa.base import GraphQAChain
|
||||
from langchain.chains.graph_qa.cypher import GraphCypherQAChain
|
||||
@ -84,9 +85,9 @@ __all__ = [
|
||||
"GraphCypherQAChain",
|
||||
"GraphQAChain",
|
||||
"GraphSparqlQAChain",
|
||||
"HugeGraphQAChain",
|
||||
"HypotheticalDocumentEmbedder",
|
||||
"KuzuQAChain",
|
||||
"HugeGraphQAChain",
|
||||
"LLMBashChain",
|
||||
"LLMChain",
|
||||
"LLMCheckerChain",
|
||||
@ -95,6 +96,8 @@ __all__ = [
|
||||
"LLMRouterChain",
|
||||
"LLMSummarizationCheckerChain",
|
||||
"MapReduceChain",
|
||||
"MapReduceDocumentsChain",
|
||||
"MapRerankDocumentsChain",
|
||||
"MultiPromptChain",
|
||||
"MultiRetrievalQAChain",
|
||||
"MultiRouteChain",
|
||||
@ -105,6 +108,8 @@ __all__ = [
|
||||
"PALChain",
|
||||
"QAGenerationChain",
|
||||
"QAWithSourcesChain",
|
||||
"ReduceDocumentsChain",
|
||||
"RefineDocumentsChain",
|
||||
"RetrievalQA",
|
||||
"RetrievalQAWithSourcesChain",
|
||||
"RouterChain",
|
||||
@ -112,20 +117,17 @@ __all__ = [
|
||||
"SQLDatabaseSequentialChain",
|
||||
"SequentialChain",
|
||||
"SimpleSequentialChain",
|
||||
"StuffDocumentsChain",
|
||||
"TransformChain",
|
||||
"VectorDBQA",
|
||||
"VectorDBQAWithSourcesChain",
|
||||
"create_citation_fuzzy_match_chain",
|
||||
"create_extraction_chain",
|
||||
"create_extraction_chain_pydantic",
|
||||
"create_qa_with_sources_chain",
|
||||
"create_qa_with_structure_chain",
|
||||
"create_tagging_chain",
|
||||
"create_tagging_chain_pydantic",
|
||||
"generate_example",
|
||||
"load_chain",
|
||||
"create_citation_fuzzy_match_chain",
|
||||
"create_qa_with_structure_chain",
|
||||
"create_qa_with_sources_chain",
|
||||
"StuffDocumentsChain",
|
||||
"MapRerankDocumentsChain",
|
||||
"MapReduceDocumentsChain",
|
||||
"RefineDocumentsChain",
|
||||
"ReduceDocumentsChain",
|
||||
]
|
||||
|
22
libs/langchain/langchain/chains/example_generator.py
Normal file
22
libs/langchain/langchain/chains/example_generator.py
Normal file
@ -0,0 +1,22 @@
|
||||
from typing import List
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.prompts.few_shot import FewShotPromptTemplate
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
|
||||
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
|
||||
|
||||
|
||||
def generate_example(
|
||||
examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
|
||||
) -> str:
|
||||
"""Return another example given a list of examples for a prompt."""
|
||||
prompt = FewShotPromptTemplate(
|
||||
examples=examples,
|
||||
suffix=TEST_GEN_TEMPLATE_SUFFIX,
|
||||
input_variables=[],
|
||||
example_prompt=prompt_template,
|
||||
)
|
||||
chain = LLMChain(llm=llm, prompt=prompt)
|
||||
return chain.predict()
|
@ -1,23 +1,4 @@
|
||||
"""Utility functions for working with prompts."""
|
||||
from typing import List
|
||||
"""Keep here for backwards compatibility."""
|
||||
from langchain.chains.example_generator import generate_example
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.prompts.few_shot import FewShotPromptTemplate
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
|
||||
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
|
||||
|
||||
|
||||
def generate_example(
|
||||
examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
|
||||
) -> str:
|
||||
"""Return another example given a list of examples for a prompt."""
|
||||
prompt = FewShotPromptTemplate(
|
||||
examples=examples,
|
||||
suffix=TEST_GEN_TEMPLATE_SUFFIX,
|
||||
input_variables=[],
|
||||
example_prompt=prompt_template,
|
||||
)
|
||||
chain = LLMChain(llm=llm, prompt=prompt)
|
||||
return chain.predict()
|
||||
__all__ = ["generate_example"]
|
||||
|
Loading…
Reference in New Issue
Block a user