mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-04 02:33:05 +00:00
PubMed
document loader (#8893)
- added `PubMed Document Loader` artifacts; ut-s; examples - fixed `PubMed utility`; ut-s @hwchase17
This commit is contained in:
parent
a7824f16f2
commit
2d078c7767
139
docs/extras/integrations/document_loaders/pubmed.ipynb
Normal file
139
docs/extras/integrations/document_loaders/pubmed.ipynb
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "3df0dcf8",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# PubMed\n",
|
||||||
|
"\n",
|
||||||
|
">[PubMed®](https://pubmed.ncbi.nlm.nih.gov/) by `The National Center for Biotechnology Information, National Library of Medicine` comprises more than 35 million citations for biomedical literature from `MEDLINE`, life science journals, and online books. Citations may include links to full text content from `PubMed Central` and publisher web sites."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "aecaff63",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.document_loaders import PubMedLoader"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "f2f7e8d3",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"loader = PubMedLoader(\"chatgpt\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "ed115aa1",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"docs = loader.load()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "b68d3264-b893-45e4-8ab0-077b25a586dc",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"3"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"len(docs)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "9f4626d2-068d-4aed-9ffe-ad754ad4b4cd",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"{'uid': '37548997',\n",
|
||||||
|
" 'Title': 'Performance of ChatGPT on the Situational Judgement Test-A Professional Dilemmas-Based Examination for Doctors in the United Kingdom.',\n",
|
||||||
|
" 'Published': '2023-08-07',\n",
|
||||||
|
" 'Copyright Information': '©Robin J Borchert, Charlotte R Hickman, Jack Pepys, Timothy J Sadler. Originally published in JMIR Medical Education (https://mededu.jmir.org), 07.08.2023.'}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"docs[1].metadata"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"id": "8000f687-b500-4cce-841b-70d6151304da",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"\"BACKGROUND: ChatGPT is a large language model that has performed well on professional examinations in the fields of medicine, law, and business. However, it is unclear how ChatGPT would perform on an examination assessing professionalism and situational judgement for doctors.\\nOBJECTIVE: We evaluated the performance of ChatGPT on the Situational Judgement Test (SJT): a national examination taken by all final-year medical students in the United Kingdom. This examination is designed to assess attributes such as communication, teamwork, patient safety, prioritization skills, professionalism, and ethics.\\nMETHODS: All questions from the UK Foundation Programme Office's (UKFPO's) 2023 SJT practice examination were inputted into ChatGPT. For each question, ChatGPT's answers and rationales were recorded and assessed on the basis of the official UK Foundation Programme Office scoring template. Questions were categorized into domains of Good Medical Practice on the basis of the domains referenced in the rationales provided in the scoring sheet. Questions without clear domain links were screened by reviewers and assigned one or multiple domains. ChatGPT's overall performance, as well as its performance across the domains of Good Medical Practice, was evaluated.\\nRESULTS: Overall, ChatGPT performed well, scoring 76% on the SJT but scoring full marks on only a few questions (9%), which may reflect possible flaws in ChatGPT's situational judgement or inconsistencies in the reasoning across questions (or both) in the examination itself. ChatGPT demonstrated consistent performance across the 4 outlined domains in Good Medical Practice for doctors.\\nCONCLUSIONS: Further research is needed to understand the potential applications of large language models, such as ChatGPT, in medical education for standardizing questions and providing consistent rationales for examinations assessing professionalism and ethics.\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"docs[1].page_content"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "1070e571-697d-4c33-9a4f-0b2dd6909629",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.12"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
30
docs/extras/integrations/providers/pubmed.md
Normal file
30
docs/extras/integrations/providers/pubmed.md
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
# PubMed
|
||||||
|
|
||||||
|
# PubMed
|
||||||
|
|
||||||
|
>[PubMed®](https://pubmed.ncbi.nlm.nih.gov/) by `The National Center for Biotechnology Information, National Library of Medicine`
|
||||||
|
> comprises more than 35 million citations for biomedical literature from `MEDLINE`, life science journals, and online books.
|
||||||
|
> Citations may include links to full text content from `PubMed Central` and publisher web sites.
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
You need to install a python package.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install xmltodict
|
||||||
|
```
|
||||||
|
|
||||||
|
### Retriever
|
||||||
|
|
||||||
|
See a [usage example](/docs/integrations/retrievers/pubmed).
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.retrievers import PubMedRetriever
|
||||||
|
```
|
||||||
|
|
||||||
|
### Document Loader
|
||||||
|
|
||||||
|
See a [usage example](/docs/integrations/document_loaders/pubmed).
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.document_loaders import PubMedLoader
|
||||||
|
```
|
@ -7,14 +7,15 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# PubMed\n",
|
"# PubMed\n",
|
||||||
"\n",
|
"\n",
|
||||||
"This notebook goes over how to use `PubMed` as a retriever\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"`PubMed®` comprises more than 35 million citations for biomedical literature from `MEDLINE`, life science journals, and online books. Citations may include links to full text content from `PubMed Central` and publisher web sites."
|
">[PubMed®](https://pubmed.ncbi.nlm.nih.gov/) by `The National Center for Biotechnology Information, National Library of Medicine` comprises more than 35 million citations for biomedical literature from `MEDLINE`, life science journals, and online books. Citations may include links to full text content from `PubMed Central` and publisher web sites.\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook goes over how to use `PubMed` as a retriever"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": 12,
|
||||||
"id": "aecaff63",
|
"id": "aecaff63",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
@ -24,7 +25,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": 34,
|
||||||
"id": "f2f7e8d3",
|
"id": "f2f7e8d3",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
@ -34,19 +35,19 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 9,
|
"execution_count": 35,
|
||||||
"id": "ed115aa1",
|
"id": "ed115aa1",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"[Document(page_content='', metadata={'uid': '37268021', 'title': 'Dermatology in the wake of an AI revolution: who gets a say?', 'pub_date': '<Year>2023</Year><Month>May</Month><Day>31</Day>'}),\n",
|
"[Document(page_content='', metadata={'uid': '37549050', 'Title': 'ChatGPT: \"To Be or Not to Be\" in Bikini Bottom.', 'Published': '--', 'Copyright Information': ''}),\n",
|
||||||
" Document(page_content='', metadata={'uid': '37267643', 'title': 'What is ChatGPT and what do we do with it? Implications of the age of AI for nursing and midwifery practice and education: An editorial.', 'pub_date': '<Year>2023</Year><Month>May</Month><Day>30</Day>'}),\n",
|
" Document(page_content=\"BACKGROUND: ChatGPT is a large language model that has performed well on professional examinations in the fields of medicine, law, and business. However, it is unclear how ChatGPT would perform on an examination assessing professionalism and situational judgement for doctors.\\nOBJECTIVE: We evaluated the performance of ChatGPT on the Situational Judgement Test (SJT): a national examination taken by all final-year medical students in the United Kingdom. This examination is designed to assess attributes such as communication, teamwork, patient safety, prioritization skills, professionalism, and ethics.\\nMETHODS: All questions from the UK Foundation Programme Office's (UKFPO's) 2023 SJT practice examination were inputted into ChatGPT. For each question, ChatGPT's answers and rationales were recorded and assessed on the basis of the official UK Foundation Programme Office scoring template. Questions were categorized into domains of Good Medical Practice on the basis of the domains referenced in the rationales provided in the scoring sheet. Questions without clear domain links were screened by reviewers and assigned one or multiple domains. ChatGPT's overall performance, as well as its performance across the domains of Good Medical Practice, was evaluated.\\nRESULTS: Overall, ChatGPT performed well, scoring 76% on the SJT but scoring full marks on only a few questions (9%), which may reflect possible flaws in ChatGPT's situational judgement or inconsistencies in the reasoning across questions (or both) in the examination itself. ChatGPT demonstrated consistent performance across the 4 outlined domains in Good Medical Practice for doctors.\\nCONCLUSIONS: Further research is needed to understand the potential applications of large language models, such as ChatGPT, in medical education for standardizing questions and providing consistent rationales for examinations assessing professionalism and ethics.\", metadata={'uid': '37548997', 'Title': 'Performance of ChatGPT on the Situational Judgement Test-A Professional Dilemmas-Based Examination for Doctors in the United Kingdom.', 'Published': '2023-08-07', 'Copyright Information': '©Robin J Borchert, Charlotte R Hickman, Jack Pepys, Timothy J Sadler. Originally published in JMIR Medical Education (https://mededu.jmir.org), 07.08.2023.'}),\n",
|
||||||
" Document(page_content='The nursing field has undergone notable changes over time and is projected to undergo further modifications in the future, owing to the advent of sophisticated technologies and growing healthcare needs. The advent of ChatGPT, an AI-powered language model, is expected to exert a significant influence on the nursing profession, specifically in the domains of patient care and instruction. The present article delves into the ramifications of ChatGPT within the nursing domain and accentuates its capacity and constraints to transform the discipline.', metadata={'uid': '37266721', 'title': 'The Impact of ChatGPT on the Nursing Profession: Revolutionizing Patient Care and Education.', 'pub_date': '<Year>2023</Year><Month>Jun</Month><Day>02</Day>'})]"
|
" Document(page_content='', metadata={'uid': '37548971', 'Title': \"Large Language Models Answer Medical Questions Accurately, but Can't Match Clinicians' Knowledge.\", 'Published': '2023-08-07', 'Copyright Information': ''})]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 9,
|
"execution_count": 35,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"output_type": "execute_result"
|
"output_type": "execute_result"
|
||||||
}
|
}
|
||||||
@ -54,6 +55,14 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"retriever.get_relevant_documents(\"chatgpt\")"
|
"retriever.get_relevant_documents(\"chatgpt\")"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "a9ff7a25-bb4b-4cd5-896d-72f70f4af49b",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
@ -72,7 +81,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.6"
|
"version": "3.10.12"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -196,7 +196,7 @@ def _get_golden_query(**kwargs: Any) -> BaseTool:
|
|||||||
return GoldenQueryRun(api_wrapper=GoldenQueryAPIWrapper(**kwargs))
|
return GoldenQueryRun(api_wrapper=GoldenQueryAPIWrapper(**kwargs))
|
||||||
|
|
||||||
|
|
||||||
def _get_pupmed(**kwargs: Any) -> BaseTool:
|
def _get_pubmed(**kwargs: Any) -> BaseTool:
|
||||||
return PubmedQueryRun(api_wrapper=PubMedAPIWrapper(**kwargs))
|
return PubmedQueryRun(api_wrapper=PubMedAPIWrapper(**kwargs))
|
||||||
|
|
||||||
|
|
||||||
@ -313,10 +313,7 @@ _EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[st
|
|||||||
["top_k_results", "load_max_docs", "load_all_available_meta"],
|
["top_k_results", "load_max_docs", "load_all_available_meta"],
|
||||||
),
|
),
|
||||||
"golden-query": (_get_golden_query, ["golden_api_key"]),
|
"golden-query": (_get_golden_query, ["golden_api_key"]),
|
||||||
"pupmed": (
|
"pubmed": (_get_pubmed, ["top_k_results"]),
|
||||||
_get_pupmed,
|
|
||||||
["top_k_results", "load_max_docs", "load_all_available_meta"],
|
|
||||||
),
|
|
||||||
"human": (_get_human_tool, ["prompt_func", "input_func"]),
|
"human": (_get_human_tool, ["prompt_func", "input_func"]),
|
||||||
"awslambda": (
|
"awslambda": (
|
||||||
_get_lambda_api,
|
_get_lambda_api,
|
||||||
|
@ -122,6 +122,7 @@ from langchain.document_loaders.pdf import (
|
|||||||
)
|
)
|
||||||
from langchain.document_loaders.powerpoint import UnstructuredPowerPointLoader
|
from langchain.document_loaders.powerpoint import UnstructuredPowerPointLoader
|
||||||
from langchain.document_loaders.psychic import PsychicLoader
|
from langchain.document_loaders.psychic import PsychicLoader
|
||||||
|
from langchain.document_loaders.pubmed import PubMedLoader
|
||||||
from langchain.document_loaders.pyspark_dataframe import PySparkDataFrameLoader
|
from langchain.document_loaders.pyspark_dataframe import PySparkDataFrameLoader
|
||||||
from langchain.document_loaders.python import PythonLoader
|
from langchain.document_loaders.python import PythonLoader
|
||||||
from langchain.document_loaders.readthedocs import ReadTheDocsLoader
|
from langchain.document_loaders.readthedocs import ReadTheDocsLoader
|
||||||
@ -184,13 +185,14 @@ PagedPDFSplitter = PyPDFLoader
|
|||||||
TelegramChatLoader = TelegramChatFileLoader
|
TelegramChatLoader = TelegramChatFileLoader
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"AcreomLoader",
|
|
||||||
"AsyncHtmlLoader",
|
|
||||||
"AZLyricsLoader",
|
"AZLyricsLoader",
|
||||||
|
"AcreomLoader",
|
||||||
"AirbyteJSONLoader",
|
"AirbyteJSONLoader",
|
||||||
"AirtableLoader",
|
"AirtableLoader",
|
||||||
|
"AmazonTextractPDFLoader",
|
||||||
"ApifyDatasetLoader",
|
"ApifyDatasetLoader",
|
||||||
"ArxivLoader",
|
"ArxivLoader",
|
||||||
|
"AsyncHtmlLoader",
|
||||||
"AzureBlobStorageContainerLoader",
|
"AzureBlobStorageContainerLoader",
|
||||||
"AzureBlobStorageFileLoader",
|
"AzureBlobStorageFileLoader",
|
||||||
"BSHTMLLoader",
|
"BSHTMLLoader",
|
||||||
@ -207,10 +209,11 @@ __all__ = [
|
|||||||
"ChatGPTLoader",
|
"ChatGPTLoader",
|
||||||
"CoNLLULoader",
|
"CoNLLULoader",
|
||||||
"CollegeConfidentialLoader",
|
"CollegeConfidentialLoader",
|
||||||
|
"ConcurrentLoader",
|
||||||
"ConfluenceLoader",
|
"ConfluenceLoader",
|
||||||
"CubeSemanticLoader",
|
"CubeSemanticLoader",
|
||||||
"DatadogLogsLoader",
|
|
||||||
"DataFrameLoader",
|
"DataFrameLoader",
|
||||||
|
"DatadogLogsLoader",
|
||||||
"DiffbotLoader",
|
"DiffbotLoader",
|
||||||
"DirectoryLoader",
|
"DirectoryLoader",
|
||||||
"DiscordChatLoader",
|
"DiscordChatLoader",
|
||||||
@ -246,12 +249,12 @@ __all__ = [
|
|||||||
"JSONLoader",
|
"JSONLoader",
|
||||||
"JoplinLoader",
|
"JoplinLoader",
|
||||||
"LarkSuiteDocLoader",
|
"LarkSuiteDocLoader",
|
||||||
|
"MHTMLLoader",
|
||||||
"MWDumpLoader",
|
"MWDumpLoader",
|
||||||
"MastodonTootsLoader",
|
"MastodonTootsLoader",
|
||||||
"MathpixPDFLoader",
|
"MathpixPDFLoader",
|
||||||
"MaxComputeLoader",
|
"MaxComputeLoader",
|
||||||
"MergedDataLoader",
|
"MergedDataLoader",
|
||||||
"MHTMLLoader",
|
|
||||||
"ModernTreasuryLoader",
|
"ModernTreasuryLoader",
|
||||||
"NewsURLLoader",
|
"NewsURLLoader",
|
||||||
"NotebookLoader",
|
"NotebookLoader",
|
||||||
@ -263,26 +266,27 @@ __all__ = [
|
|||||||
"OneDriveFileLoader",
|
"OneDriveFileLoader",
|
||||||
"OneDriveLoader",
|
"OneDriveLoader",
|
||||||
"OnlinePDFLoader",
|
"OnlinePDFLoader",
|
||||||
"OutlookMessageLoader",
|
|
||||||
"OpenCityDataLoader",
|
"OpenCityDataLoader",
|
||||||
|
"OutlookMessageLoader",
|
||||||
"PDFMinerLoader",
|
"PDFMinerLoader",
|
||||||
"PDFMinerPDFasHTMLLoader",
|
"PDFMinerPDFasHTMLLoader",
|
||||||
"PDFPlumberLoader",
|
"PDFPlumberLoader",
|
||||||
"PagedPDFSplitter",
|
"PagedPDFSplitter",
|
||||||
"PlaywrightURLLoader",
|
"PlaywrightURLLoader",
|
||||||
"PsychicLoader",
|
"PsychicLoader",
|
||||||
|
"PubMedLoader",
|
||||||
"PyMuPDFLoader",
|
"PyMuPDFLoader",
|
||||||
"PyPDFDirectoryLoader",
|
"PyPDFDirectoryLoader",
|
||||||
"PyPDFLoader",
|
"PyPDFLoader",
|
||||||
"PyPDFium2Loader",
|
"PyPDFium2Loader",
|
||||||
"PySparkDataFrameLoader",
|
"PySparkDataFrameLoader",
|
||||||
"PythonLoader",
|
"PythonLoader",
|
||||||
|
"RSSFeedLoader",
|
||||||
"ReadTheDocsLoader",
|
"ReadTheDocsLoader",
|
||||||
"RecursiveUrlLoader",
|
"RecursiveUrlLoader",
|
||||||
"RedditPostsLoader",
|
"RedditPostsLoader",
|
||||||
"RoamLoader",
|
"RoamLoader",
|
||||||
"RocksetLoader",
|
"RocksetLoader",
|
||||||
"RSSFeedLoader",
|
|
||||||
"S3DirectoryLoader",
|
"S3DirectoryLoader",
|
||||||
"S3FileLoader",
|
"S3FileLoader",
|
||||||
"SRTLoader",
|
"SRTLoader",
|
||||||
@ -292,11 +296,11 @@ __all__ = [
|
|||||||
"SnowflakeLoader",
|
"SnowflakeLoader",
|
||||||
"SpreedlyLoader",
|
"SpreedlyLoader",
|
||||||
"StripeLoader",
|
"StripeLoader",
|
||||||
"TencentCOSDirectoryLoader",
|
|
||||||
"TencentCOSFileLoader",
|
|
||||||
"TelegramChatApiLoader",
|
"TelegramChatApiLoader",
|
||||||
"TelegramChatFileLoader",
|
"TelegramChatFileLoader",
|
||||||
"TelegramChatLoader",
|
"TelegramChatLoader",
|
||||||
|
"TencentCOSDirectoryLoader",
|
||||||
|
"TencentCOSFileLoader",
|
||||||
"TextLoader",
|
"TextLoader",
|
||||||
"ToMarkdownLoader",
|
"ToMarkdownLoader",
|
||||||
"TomlLoader",
|
"TomlLoader",
|
||||||
@ -330,6 +334,4 @@ __all__ = [
|
|||||||
"XorbitsLoader",
|
"XorbitsLoader",
|
||||||
"YoutubeAudioLoader",
|
"YoutubeAudioLoader",
|
||||||
"YoutubeLoader",
|
"YoutubeLoader",
|
||||||
"ConcurrentLoader",
|
|
||||||
"AmazonTextractPDFLoader",
|
|
||||||
]
|
]
|
||||||
|
39
libs/langchain/langchain/document_loaders/pubmed.py
Normal file
39
libs/langchain/langchain/document_loaders/pubmed.py
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
from typing import Iterator, List, Optional
|
||||||
|
|
||||||
|
from langchain.docstore.document import Document
|
||||||
|
from langchain.document_loaders.base import BaseLoader
|
||||||
|
from langchain.utilities.pubmed import PubMedAPIWrapper
|
||||||
|
|
||||||
|
|
||||||
|
class PubMedLoader(BaseLoader):
|
||||||
|
"""Loads a query result from PubMed biomedical library into a list of Documents.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
query: The query to be passed to the PubMed API.
|
||||||
|
load_max_docs: The maximum number of documents to load.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
load_max_docs: Optional[int] = 3,
|
||||||
|
):
|
||||||
|
"""Initialize the PubMedLoader.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query: The query to be passed to the PubMed API.
|
||||||
|
load_max_docs: The maximum number of documents to load.
|
||||||
|
Defaults to 3.
|
||||||
|
"""
|
||||||
|
self.query = query
|
||||||
|
self.load_max_docs = load_max_docs
|
||||||
|
self._client = PubMedAPIWrapper(
|
||||||
|
top_k_results=load_max_docs,
|
||||||
|
)
|
||||||
|
|
||||||
|
def load(self) -> List[Document]:
|
||||||
|
return list(self._client.lazy_load_docs(self.query))
|
||||||
|
|
||||||
|
def lazy_load(self) -> Iterator[Document]:
|
||||||
|
for doc in self._client.lazy_load_docs(self.query):
|
||||||
|
yield doc
|
@ -2,7 +2,7 @@ from typing import List
|
|||||||
|
|
||||||
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
|
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
|
||||||
from langchain.schema import BaseRetriever, Document
|
from langchain.schema import BaseRetriever, Document
|
||||||
from langchain.utilities.pupmed import PubMedAPIWrapper
|
from langchain.utilities.pubmed import PubMedAPIWrapper
|
||||||
|
|
||||||
|
|
||||||
class PubMedRetriever(BaseRetriever, PubMedAPIWrapper):
|
class PubMedRetriever(BaseRetriever, PubMedAPIWrapper):
|
||||||
|
@ -1,12 +1,10 @@
|
|||||||
"""Tool for the Pubmed API."""
|
|
||||||
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from pydantic import Field
|
from pydantic import Field
|
||||||
|
|
||||||
from langchain.callbacks.manager import CallbackManagerForToolRun
|
from langchain.callbacks.manager import CallbackManagerForToolRun
|
||||||
from langchain.tools.base import BaseTool
|
from langchain.tools.base import BaseTool
|
||||||
from langchain.utilities.pupmed import PubMedAPIWrapper
|
from langchain.utilities.pubmed import PubMedAPIWrapper
|
||||||
|
|
||||||
|
|
||||||
class PubmedQueryRun(BaseTool):
|
class PubmedQueryRun(BaseTool):
|
||||||
@ -14,11 +12,10 @@ class PubmedQueryRun(BaseTool):
|
|||||||
|
|
||||||
name = "PubMed"
|
name = "PubMed"
|
||||||
description = (
|
description = (
|
||||||
"A wrapper around PubMed.org "
|
"A wrapper around PubMed. "
|
||||||
"Useful for when you need to answer questions about Physics, Mathematics, "
|
"Useful for when you need to answer questions about medicine, health, "
|
||||||
"Computer Science, Quantitative Biology, Quantitative Finance, Statistics, "
|
"and biomedical topics "
|
||||||
"Electrical Engineering, and Economics "
|
"from biomedical literature, MEDLINE, life science journals, and online books. "
|
||||||
"from scientific articles on PubMed.org. "
|
|
||||||
"Input should be a search query."
|
"Input should be a search query."
|
||||||
)
|
)
|
||||||
api_wrapper: PubMedAPIWrapper = Field(default_factory=PubMedAPIWrapper)
|
api_wrapper: PubMedAPIWrapper = Field(default_factory=PubMedAPIWrapper)
|
||||||
@ -28,5 +25,5 @@ class PubmedQueryRun(BaseTool):
|
|||||||
query: str,
|
query: str,
|
||||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Use the Arxiv tool."""
|
"""Use the PubMed tool."""
|
||||||
return self.api_wrapper.run(query)
|
return self.api_wrapper.run(query)
|
||||||
|
@ -21,7 +21,7 @@ from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper
|
|||||||
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
|
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
|
||||||
from langchain.utilities.portkey import Portkey
|
from langchain.utilities.portkey import Portkey
|
||||||
from langchain.utilities.powerbi import PowerBIDataset
|
from langchain.utilities.powerbi import PowerBIDataset
|
||||||
from langchain.utilities.pupmed import PubMedAPIWrapper
|
from langchain.utilities.pubmed import PubMedAPIWrapper
|
||||||
from langchain.utilities.python import PythonREPL
|
from langchain.utilities.python import PythonREPL
|
||||||
from langchain.utilities.requests import Requests, RequestsWrapper, TextRequestsWrapper
|
from langchain.utilities.requests import Requests, RequestsWrapper, TextRequestsWrapper
|
||||||
from langchain.utilities.scenexplain import SceneXplainAPIWrapper
|
from langchain.utilities.scenexplain import SceneXplainAPIWrapper
|
||||||
|
@ -3,9 +3,10 @@ import logging
|
|||||||
import time
|
import time
|
||||||
import urllib.error
|
import urllib.error
|
||||||
import urllib.request
|
import urllib.request
|
||||||
from typing import List
|
from typing import Any, Dict, Iterator, List
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
from pydantic.class_validators import root_validator
|
||||||
|
|
||||||
from langchain.schema import Document
|
from langchain.schema import Document
|
||||||
|
|
||||||
@ -22,13 +23,19 @@ class PubMedAPIWrapper(BaseModel):
|
|||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
top_k_results: number of the top-scored document used for the PubMed tool
|
top_k_results: number of the top-scored document used for the PubMed tool
|
||||||
load_max_docs: a limit to the number of loaded documents
|
MAX_QUERY_LENGTH: maximum length of the query.
|
||||||
load_all_available_meta:
|
Default is 300 characters.
|
||||||
if True: the `metadata` of the loaded Documents gets all available meta info
|
doc_content_chars_max: maximum length of the document content.
|
||||||
(see https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch)
|
Content will be truncated if it exceeds this length.
|
||||||
if False: the `metadata` gets only the most informative fields.
|
Default is 2000 characters.
|
||||||
|
max_retry: maximum number of retries for a request. Default is 5.
|
||||||
|
sleep_time: time to wait between retries.
|
||||||
|
Default is 0.2 seconds.
|
||||||
|
email: email address to be used for the PubMed API.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
parse: Any #: :meta private:
|
||||||
|
|
||||||
base_url_esearch = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?"
|
base_url_esearch = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?"
|
||||||
base_url_efetch = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?"
|
base_url_efetch = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?"
|
||||||
max_retry = 5
|
max_retry = 5
|
||||||
@ -36,12 +43,24 @@ class PubMedAPIWrapper(BaseModel):
|
|||||||
|
|
||||||
# Default values for the parameters
|
# Default values for the parameters
|
||||||
top_k_results: int = 3
|
top_k_results: int = 3
|
||||||
load_max_docs: int = 25
|
MAX_QUERY_LENGTH = 300
|
||||||
ARXIV_MAX_QUERY_LENGTH = 300
|
|
||||||
doc_content_chars_max: int = 2000
|
doc_content_chars_max: int = 2000
|
||||||
load_all_available_meta: bool = False
|
|
||||||
email: str = "your_email@example.com"
|
email: str = "your_email@example.com"
|
||||||
|
|
||||||
|
@root_validator()
|
||||||
|
def validate_environment(cls, values: Dict) -> Dict:
|
||||||
|
"""Validate that the python package exists in environment."""
|
||||||
|
try:
|
||||||
|
import xmltodict
|
||||||
|
|
||||||
|
values["parse"] = xmltodict.parse
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"Could not import xmltodict python package. "
|
||||||
|
"Please install it with `pip install xmltodict`."
|
||||||
|
)
|
||||||
|
return values
|
||||||
|
|
||||||
def run(self, query: str) -> str:
|
def run(self, query: str) -> str:
|
||||||
"""
|
"""
|
||||||
Run PubMed search and get the article meta information.
|
Run PubMed search and get the article meta information.
|
||||||
@ -52,9 +71,11 @@ class PubMedAPIWrapper(BaseModel):
|
|||||||
try:
|
try:
|
||||||
# Retrieve the top-k results for the query
|
# Retrieve the top-k results for the query
|
||||||
docs = [
|
docs = [
|
||||||
f"Published: {result['pub_date']}\nTitle: {result['title']}\n"
|
f"Published: {result['Published']}\n"
|
||||||
f"Summary: {result['summary']}"
|
f"Title: {result['Title']}\n"
|
||||||
for result in self.load(query[: self.ARXIV_MAX_QUERY_LENGTH])
|
f"Copyright Information: {result['Copyright Information']}\n"
|
||||||
|
f"Summary::\n{result['Summary']}"
|
||||||
|
for result in self.load(query[: self.MAX_QUERY_LENGTH])
|
||||||
]
|
]
|
||||||
|
|
||||||
# Join the results and limit the character count
|
# Join the results and limit the character count
|
||||||
@ -66,10 +87,10 @@ class PubMedAPIWrapper(BaseModel):
|
|||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
return f"PubMed exception: {ex}"
|
return f"PubMed exception: {ex}"
|
||||||
|
|
||||||
def load(self, query: str) -> List[dict]:
|
def lazy_load(self, query: str) -> Iterator[dict]:
|
||||||
"""
|
"""
|
||||||
Search PubMed for documents matching the query.
|
Search PubMed for documents matching the query.
|
||||||
Return a list of dictionaries containing the document metadata.
|
Return an iterator of dictionaries containing the document metadata.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
url = (
|
url = (
|
||||||
@ -82,22 +103,27 @@ class PubMedAPIWrapper(BaseModel):
|
|||||||
text = result.read().decode("utf-8")
|
text = result.read().decode("utf-8")
|
||||||
json_text = json.loads(text)
|
json_text = json.loads(text)
|
||||||
|
|
||||||
articles = []
|
|
||||||
webenv = json_text["esearchresult"]["webenv"]
|
webenv = json_text["esearchresult"]["webenv"]
|
||||||
for uid in json_text["esearchresult"]["idlist"]:
|
for uid in json_text["esearchresult"]["idlist"]:
|
||||||
article = self.retrieve_article(uid, webenv)
|
yield self.retrieve_article(uid, webenv)
|
||||||
articles.append(article)
|
|
||||||
|
|
||||||
# Convert the list of articles to a JSON string
|
def load(self, query: str) -> List[dict]:
|
||||||
return articles
|
"""
|
||||||
|
Search PubMed for documents matching the query.
|
||||||
|
Return a list of dictionaries containing the document metadata.
|
||||||
|
"""
|
||||||
|
return list(self.lazy_load(query))
|
||||||
|
|
||||||
def _transform_doc(self, doc: dict) -> Document:
|
def _dict2document(self, doc: dict) -> Document:
|
||||||
summary = doc.pop("summary")
|
summary = doc.pop("Summary")
|
||||||
return Document(page_content=summary, metadata=doc)
|
return Document(page_content=summary, metadata=doc)
|
||||||
|
|
||||||
|
def lazy_load_docs(self, query: str) -> Iterator[Document]:
|
||||||
|
for d in self.lazy_load(query=query):
|
||||||
|
yield self._dict2document(d)
|
||||||
|
|
||||||
def load_docs(self, query: str) -> List[Document]:
|
def load_docs(self, query: str) -> List[Document]:
|
||||||
document_dicts = self.load(query=query)
|
return list(self.lazy_load_docs(query=query))
|
||||||
return [self._transform_doc(d) for d in document_dicts]
|
|
||||||
|
|
||||||
def retrieve_article(self, uid: str, webenv: str) -> dict:
|
def retrieve_article(self, uid: str, webenv: str) -> dict:
|
||||||
url = (
|
url = (
|
||||||
@ -115,7 +141,7 @@ class PubMedAPIWrapper(BaseModel):
|
|||||||
break
|
break
|
||||||
except urllib.error.HTTPError as e:
|
except urllib.error.HTTPError as e:
|
||||||
if e.code == 429 and retry < self.max_retry:
|
if e.code == 429 and retry < self.max_retry:
|
||||||
# Too Many Requests error
|
# Too Many Requests errors
|
||||||
# wait for an exponentially increasing amount of time
|
# wait for an exponentially increasing amount of time
|
||||||
print(
|
print(
|
||||||
f"Too Many Requests, "
|
f"Too Many Requests, "
|
||||||
@ -128,39 +154,31 @@ class PubMedAPIWrapper(BaseModel):
|
|||||||
raise e
|
raise e
|
||||||
|
|
||||||
xml_text = result.read().decode("utf-8")
|
xml_text = result.read().decode("utf-8")
|
||||||
|
text_dict = self.parse(xml_text)
|
||||||
|
return self._parse_article(uid, text_dict)
|
||||||
|
|
||||||
# Get title
|
def _parse_article(self, uid: str, text_dict: dict) -> dict:
|
||||||
title = ""
|
ar = text_dict["PubmedArticleSet"]["PubmedArticle"]["MedlineCitation"][
|
||||||
if "<ArticleTitle>" in xml_text and "</ArticleTitle>" in xml_text:
|
"Article"
|
||||||
start_tag = "<ArticleTitle>"
|
]
|
||||||
end_tag = "</ArticleTitle>"
|
summary = "\n".join(
|
||||||
title = xml_text[
|
[
|
||||||
xml_text.index(start_tag) + len(start_tag) : xml_text.index(end_tag)
|
f"{txt['@Label']}: {txt['#text']}"
|
||||||
|
for txt in ar.get("Abstract", {}).get("AbstractText", [])
|
||||||
|
if "#text" in txt and "@Label" in txt
|
||||||
]
|
]
|
||||||
|
)
|
||||||
|
a_d = ar.get("ArticleDate", {})
|
||||||
|
pub_date = "-".join(
|
||||||
|
[a_d.get("Year", ""), a_d.get("Month", ""), a_d.get("Day", "")]
|
||||||
|
)
|
||||||
|
|
||||||
# Get abstract
|
return {
|
||||||
abstract = ""
|
|
||||||
if "<AbstractText>" in xml_text and "</AbstractText>" in xml_text:
|
|
||||||
start_tag = "<AbstractText>"
|
|
||||||
end_tag = "</AbstractText>"
|
|
||||||
abstract = xml_text[
|
|
||||||
xml_text.index(start_tag) + len(start_tag) : xml_text.index(end_tag)
|
|
||||||
]
|
|
||||||
|
|
||||||
# Get publication date
|
|
||||||
pub_date = ""
|
|
||||||
if "<PubDate>" in xml_text and "</PubDate>" in xml_text:
|
|
||||||
start_tag = "<PubDate>"
|
|
||||||
end_tag = "</PubDate>"
|
|
||||||
pub_date = xml_text[
|
|
||||||
xml_text.index(start_tag) + len(start_tag) : xml_text.index(end_tag)
|
|
||||||
]
|
|
||||||
|
|
||||||
# Return article as dictionary
|
|
||||||
article = {
|
|
||||||
"uid": uid,
|
"uid": uid,
|
||||||
"title": title,
|
"Title": ar.get("ArticleTitle", ""),
|
||||||
"summary": abstract,
|
"Published": pub_date,
|
||||||
"pub_date": pub_date,
|
"Copyright Information": ar.get("Abstract", {}).get(
|
||||||
|
"CopyrightInformation", ""
|
||||||
|
),
|
||||||
|
"Summary": summary,
|
||||||
}
|
}
|
||||||
return article
|
|
597
libs/langchain/poetry.lock
generated
597
libs/langchain/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@ -132,6 +132,7 @@ feedparser = {version = "^6.0.10", optional = true}
|
|||||||
newspaper3k = {version = "^0.2.8", optional = true}
|
newspaper3k = {version = "^0.2.8", optional = true}
|
||||||
amazon-textract-caller = {version = "<2", optional = true}
|
amazon-textract-caller = {version = "<2", optional = true}
|
||||||
xata = {version = "^1.0.0a7", optional = true}
|
xata = {version = "^1.0.0a7", optional = true}
|
||||||
|
xmltodict = {version = "^0.13.0", optional = true}
|
||||||
|
|
||||||
[tool.poetry.group.test.dependencies]
|
[tool.poetry.group.test.dependencies]
|
||||||
# The only dependencies that should be added are
|
# The only dependencies that should be added are
|
||||||
@ -371,6 +372,7 @@ extended_testing = [
|
|||||||
"newspaper3k",
|
"newspaper3k",
|
||||||
"feedparser",
|
"feedparser",
|
||||||
"xata",
|
"xata",
|
||||||
|
"xmltodict",
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.ruff]
|
[tool.ruff]
|
||||||
|
@ -0,0 +1,54 @@
|
|||||||
|
"""Integration test for PubMed API Wrapper."""
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from langchain.document_loaders import PubMedLoader
|
||||||
|
from langchain.schema import Document
|
||||||
|
|
||||||
|
xmltodict = pytest.importorskip("xmltodict")
|
||||||
|
|
||||||
|
|
||||||
|
def test_load_success() -> None:
|
||||||
|
"""Test that returns the correct answer"""
|
||||||
|
api_client = PubMedLoader(query="chatgpt")
|
||||||
|
docs = api_client.load()
|
||||||
|
print(docs)
|
||||||
|
assert len(docs) == api_client.load_max_docs == 3
|
||||||
|
assert_docs(docs)
|
||||||
|
|
||||||
|
|
||||||
|
def test_load_success_load_max_docs() -> None:
|
||||||
|
"""Test that returns the correct answer"""
|
||||||
|
api_client = PubMedLoader(query="chatgpt", load_max_docs=2)
|
||||||
|
docs = api_client.load()
|
||||||
|
print(docs)
|
||||||
|
assert len(docs) == api_client.load_max_docs == 2
|
||||||
|
assert_docs(docs)
|
||||||
|
|
||||||
|
|
||||||
|
def test_load_returns_no_result() -> None:
|
||||||
|
"""Test that gives no result."""
|
||||||
|
api_client = PubMedLoader(query="1605.08386WWW")
|
||||||
|
docs = api_client.load()
|
||||||
|
assert len(docs) == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_load_no_content() -> None:
|
||||||
|
"""Returns a Document without content."""
|
||||||
|
api_client = PubMedLoader(query="37548971")
|
||||||
|
docs = api_client.load()
|
||||||
|
print(docs)
|
||||||
|
assert len(docs) > 0
|
||||||
|
assert docs[0].page_content == ""
|
||||||
|
|
||||||
|
|
||||||
|
def assert_docs(docs: List[Document]) -> None:
|
||||||
|
for doc in docs:
|
||||||
|
assert doc.metadata
|
||||||
|
assert set(doc.metadata) == {
|
||||||
|
"Copyright Information",
|
||||||
|
"uid",
|
||||||
|
"Title",
|
||||||
|
"Published",
|
||||||
|
}
|
@ -0,0 +1,41 @@
|
|||||||
|
"""Integration test for PubMed API Wrapper."""
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from langchain.retrievers import PubMedRetriever
|
||||||
|
from langchain.schema import Document
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def retriever() -> PubMedRetriever:
|
||||||
|
return PubMedRetriever()
|
||||||
|
|
||||||
|
|
||||||
|
def assert_docs(docs: List[Document]) -> None:
|
||||||
|
for doc in docs:
|
||||||
|
assert doc.metadata
|
||||||
|
assert set(doc.metadata) == {
|
||||||
|
"Copyright Information",
|
||||||
|
"uid",
|
||||||
|
"Title",
|
||||||
|
"Published",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def test_load_success(retriever: PubMedRetriever) -> None:
|
||||||
|
docs = retriever.get_relevant_documents(query="chatgpt")
|
||||||
|
assert len(docs) == 3
|
||||||
|
assert_docs(docs)
|
||||||
|
|
||||||
|
|
||||||
|
def test_load_success_top_k_results(retriever: PubMedRetriever) -> None:
|
||||||
|
retriever.top_k_results = 2
|
||||||
|
docs = retriever.get_relevant_documents(query="chatgpt")
|
||||||
|
assert len(docs) == 2
|
||||||
|
assert_docs(docs)
|
||||||
|
|
||||||
|
|
||||||
|
def test_load_no_result(retriever: PubMedRetriever) -> None:
|
||||||
|
docs = retriever.get_relevant_documents("1605.08386WWW")
|
||||||
|
assert not docs
|
@ -1,50 +0,0 @@
|
|||||||
"""Integration test for PubMed API Wrapper."""
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.retrievers import PubMedRetriever
|
|
||||||
from langchain.schema import Document
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def retriever() -> PubMedRetriever:
|
|
||||||
return PubMedRetriever()
|
|
||||||
|
|
||||||
|
|
||||||
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
|
|
||||||
for doc in docs:
|
|
||||||
assert doc.page_content
|
|
||||||
assert doc.metadata
|
|
||||||
main_meta = {"Published", "Title", "Authors", "Summary"}
|
|
||||||
assert set(doc.metadata).issuperset(main_meta)
|
|
||||||
if all_meta:
|
|
||||||
assert len(set(doc.metadata)) > len(main_meta)
|
|
||||||
else:
|
|
||||||
assert len(set(doc.metadata)) == len(main_meta)
|
|
||||||
|
|
||||||
|
|
||||||
def test_load_success(retriever: PubMedRetriever) -> None:
|
|
||||||
docs = retriever.get_relevant_documents(query="1605.08386")
|
|
||||||
assert len(docs) == 1
|
|
||||||
assert_docs(docs, all_meta=False)
|
|
||||||
|
|
||||||
|
|
||||||
def test_load_success_all_meta(retriever: PubMedRetriever) -> None:
|
|
||||||
retriever.load_all_available_meta = True
|
|
||||||
retriever.load_max_docs = 2
|
|
||||||
docs = retriever.get_relevant_documents(query="ChatGPT")
|
|
||||||
assert len(docs) > 1
|
|
||||||
assert_docs(docs, all_meta=True)
|
|
||||||
|
|
||||||
|
|
||||||
def test_load_success_init_args() -> None:
|
|
||||||
retriever = PubMedRetriever(load_max_docs=1, load_all_available_meta=True)
|
|
||||||
docs = retriever.get_relevant_documents(query="ChatGPT")
|
|
||||||
assert len(docs) == 1
|
|
||||||
assert_docs(docs, all_meta=True)
|
|
||||||
|
|
||||||
|
|
||||||
def test_load_no_result(retriever: PubMedRetriever) -> None:
|
|
||||||
docs = retriever.get_relevant_documents("1605.08386WWW")
|
|
||||||
assert not docs
|
|
@ -5,9 +5,12 @@ import pytest
|
|||||||
|
|
||||||
from langchain.agents.load_tools import load_tools
|
from langchain.agents.load_tools import load_tools
|
||||||
from langchain.schema import Document
|
from langchain.schema import Document
|
||||||
|
from langchain.tools import PubmedQueryRun
|
||||||
from langchain.tools.base import BaseTool
|
from langchain.tools.base import BaseTool
|
||||||
from langchain.utilities import PubMedAPIWrapper
|
from langchain.utilities import PubMedAPIWrapper
|
||||||
|
|
||||||
|
xmltodict = pytest.importorskip("xmltodict")
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def api_client() -> PubMedAPIWrapper:
|
def api_client() -> PubMedAPIWrapper:
|
||||||
@ -17,15 +20,9 @@ def api_client() -> PubMedAPIWrapper:
|
|||||||
def test_run_success(api_client: PubMedAPIWrapper) -> None:
|
def test_run_success(api_client: PubMedAPIWrapper) -> None:
|
||||||
"""Test that returns the correct answer"""
|
"""Test that returns the correct answer"""
|
||||||
|
|
||||||
output = api_client.run("1605.08386")
|
output = api_client.run("chatgpt")
|
||||||
assert "Heat-bath random walks with Markov bases" in output
|
assert "Performance of ChatGPT on the Situational Judgement Test-A" in output
|
||||||
|
assert len(output) == api_client.doc_content_chars_max
|
||||||
|
|
||||||
def test_run_returns_several_docs(api_client: PubMedAPIWrapper) -> None:
|
|
||||||
"""Test that returns several docs"""
|
|
||||||
|
|
||||||
output = api_client.run("Caprice Stanley")
|
|
||||||
assert "On Mixing Behavior of a Family of Random Walks" in output
|
|
||||||
|
|
||||||
|
|
||||||
def test_run_returns_no_result(api_client: PubMedAPIWrapper) -> None:
|
def test_run_returns_no_result(api_client: PubMedAPIWrapper) -> None:
|
||||||
@ -37,30 +34,34 @@ def test_run_returns_no_result(api_client: PubMedAPIWrapper) -> None:
|
|||||||
|
|
||||||
def assert_docs(docs: List[Document]) -> None:
|
def assert_docs(docs: List[Document]) -> None:
|
||||||
for doc in docs:
|
for doc in docs:
|
||||||
assert doc.page_content
|
|
||||||
assert doc.metadata
|
assert doc.metadata
|
||||||
assert set(doc.metadata) == {"Published", "Title", "Authors", "Summary"}
|
assert set(doc.metadata) == {
|
||||||
|
"Copyright Information",
|
||||||
|
"uid",
|
||||||
|
"Title",
|
||||||
|
"Published",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def test_load_success(api_client: PubMedAPIWrapper) -> None:
|
def test_load_success(api_client: PubMedAPIWrapper) -> None:
|
||||||
"""Test that returns one document"""
|
"""Test that returns one document"""
|
||||||
|
|
||||||
docs = api_client.load_docs("1605.08386")
|
docs = api_client.load_docs("chatgpt")
|
||||||
assert len(docs) == 1
|
assert len(docs) == api_client.top_k_results == 3
|
||||||
assert_docs(docs)
|
assert_docs(docs)
|
||||||
|
|
||||||
|
|
||||||
def test_load_returns_no_result(api_client: PubMedAPIWrapper) -> None:
|
def test_load_returns_no_result(api_client: PubMedAPIWrapper) -> None:
|
||||||
"""Test that returns no docs"""
|
"""Test that returns no docs"""
|
||||||
|
|
||||||
docs = api_client.load("1605.08386WWW")
|
docs = api_client.load_docs("1605.08386WWW")
|
||||||
assert len(docs) == 0
|
assert len(docs) == 0
|
||||||
|
|
||||||
|
|
||||||
def test_load_returns_limited_docs() -> None:
|
def test_load_returns_limited_docs() -> None:
|
||||||
"""Test that returns several docs"""
|
"""Test that returns several docs"""
|
||||||
expected_docs = 2
|
expected_docs = 2
|
||||||
api_client = PubMedAPIWrapper(load_max_docs=expected_docs)
|
api_client = PubMedAPIWrapper(top_k_results=expected_docs)
|
||||||
docs = api_client.load_docs("ChatGPT")
|
docs = api_client.load_docs("ChatGPT")
|
||||||
assert len(docs) == expected_docs
|
assert len(docs) == expected_docs
|
||||||
assert_docs(docs)
|
assert_docs(docs)
|
||||||
@ -70,42 +71,31 @@ def test_load_returns_full_set_of_metadata() -> None:
|
|||||||
"""Test that returns several docs"""
|
"""Test that returns several docs"""
|
||||||
api_client = PubMedAPIWrapper(load_max_docs=1, load_all_available_meta=True)
|
api_client = PubMedAPIWrapper(load_max_docs=1, load_all_available_meta=True)
|
||||||
docs = api_client.load_docs("ChatGPT")
|
docs = api_client.load_docs("ChatGPT")
|
||||||
assert len(docs) == 1
|
assert len(docs) == 3
|
||||||
for doc in docs:
|
for doc in docs:
|
||||||
assert doc.page_content
|
|
||||||
assert doc.metadata
|
assert doc.metadata
|
||||||
assert set(doc.metadata).issuperset(
|
assert set(doc.metadata).issuperset(
|
||||||
{"Published", "Title", "Authors", "Summary"}
|
{"Copyright Information", "Published", "Title", "uid"}
|
||||||
)
|
)
|
||||||
print(doc.metadata)
|
|
||||||
assert len(set(doc.metadata)) > 4
|
|
||||||
|
|
||||||
|
|
||||||
def _load_pubmed_from_universal_entry(**kwargs: Any) -> BaseTool:
|
def _load_pubmed_from_universal_entry(**kwargs: Any) -> BaseTool:
|
||||||
tools = load_tools(["pupmed"], **kwargs)
|
tools = load_tools(["pubmed"], **kwargs)
|
||||||
assert len(tools) == 1, "loaded more than 1 tool"
|
assert len(tools) == 1, "loaded more than 1 tool"
|
||||||
return tools[0]
|
return tools[0]
|
||||||
|
|
||||||
|
|
||||||
def test_load_pupmed_from_universal_entry() -> None:
|
def test_load_pupmed_from_universal_entry() -> None:
|
||||||
pupmed_tool = _load_pubmed_from_universal_entry()
|
pubmed_tool = _load_pubmed_from_universal_entry()
|
||||||
output = pupmed_tool("Caprice Stanley")
|
output = pubmed_tool("chatgpt")
|
||||||
assert (
|
assert "Performance of ChatGPT on the Situational Judgement Test-A" in output
|
||||||
"On Mixing Behavior of a Family of Random Walks" in output
|
|
||||||
), "failed to fetch a valid result"
|
|
||||||
|
|
||||||
|
|
||||||
def test_load_pupmed_from_universal_entry_with_params() -> None:
|
def test_load_pupmed_from_universal_entry_with_params() -> None:
|
||||||
params = {
|
params = {
|
||||||
"top_k_results": 1,
|
"top_k_results": 1,
|
||||||
"load_max_docs": 10,
|
|
||||||
"load_all_available_meta": True,
|
|
||||||
}
|
}
|
||||||
pupmed_tool = _load_pubmed_from_universal_entry(**params)
|
pubmed_tool = _load_pubmed_from_universal_entry(**params)
|
||||||
assert isinstance(pupmed_tool, PubMedAPIWrapper)
|
assert isinstance(pubmed_tool, PubmedQueryRun)
|
||||||
wp = pupmed_tool.api_wrapper
|
wp = pubmed_tool.api_wrapper
|
||||||
assert wp.top_k_results == 1, "failed to assert top_k_results"
|
assert wp.top_k_results == 1, "failed to assert top_k_results"
|
||||||
assert wp.load_max_docs == 10, "failed to assert load_max_docs"
|
|
||||||
assert (
|
|
||||||
wp.load_all_available_meta is True
|
|
||||||
), "failed to assert load_all_available_meta"
|
|
Loading…
Reference in New Issue
Block a user