mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-03 12:07:36 +00:00
chore(docs): bump langgraph in docs & reformat all docs (#32044)
Trying to unblock documentation build pipeline * Bump langgraph dep in docs * Update langgraph in lock file (resolves an issue in API reference generation)
This commit is contained in:
@@ -229,9 +229,9 @@
|
||||
" \"smoke\",\n",
|
||||
" \"temp\",\n",
|
||||
"]\n",
|
||||
"assert all(\n",
|
||||
" [column in df.columns for column in expected_columns]\n",
|
||||
"), \"DataFrame does not have the expected columns\""
|
||||
"assert all([column in df.columns for column in expected_columns]), (\n",
|
||||
" \"DataFrame does not have the expected columns\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -487,7 +487,7 @@
|
||||
" print(\"*\" * 40)\n",
|
||||
" print(\n",
|
||||
" colored(\n",
|
||||
" f\"After {i+1} observations, Tommie's summary is:\\n{tommie.get_summary(force_refresh=True)}\",\n",
|
||||
" f\"After {i + 1} observations, Tommie's summary is:\\n{tommie.get_summary(force_refresh=True)}\",\n",
|
||||
" \"blue\",\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
|
@@ -389,7 +389,7 @@
|
||||
" ax = axs[idx]\n",
|
||||
" ax.imshow(img)\n",
|
||||
" # Assuming similarity is not available in the new data, removed sim_score\n",
|
||||
" ax.title.set_text(f\"\\nProduct ID: {data[\"id\"]}\\n Score: {score}\")\n",
|
||||
" ax.title.set_text(f\"\\nProduct ID: {data['id']}\\n Score: {score}\")\n",
|
||||
" ax.axis(\"off\") # Turn off axis\n",
|
||||
"\n",
|
||||
" # Hide any remaining empty subplots\n",
|
||||
|
@@ -148,11 +148,11 @@
|
||||
"\n",
|
||||
" instructions = \"None\"\n",
|
||||
" for i in range(max_meta_iters):\n",
|
||||
" print(f\"[Episode {i+1}/{max_meta_iters}]\")\n",
|
||||
" print(f\"[Episode {i + 1}/{max_meta_iters}]\")\n",
|
||||
" chain = initialize_chain(instructions, memory=None)\n",
|
||||
" output = chain.predict(human_input=task)\n",
|
||||
" for j in range(max_iters):\n",
|
||||
" print(f\"(Step {j+1}/{max_iters})\")\n",
|
||||
" print(f\"(Step {j + 1}/{max_iters})\")\n",
|
||||
" print(f\"Assistant: {output}\")\n",
|
||||
" print(\"Human: \")\n",
|
||||
" human_input = input()\n",
|
||||
|
@@ -183,7 +183,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"game_description = f\"\"\"Here is the topic for a Dungeons & Dragons game: {quest}.\n",
|
||||
" The characters are: {*character_names,}.\n",
|
||||
" The characters are: {(*character_names,)}.\n",
|
||||
" The story is narrated by the storyteller, {storyteller_name}.\"\"\"\n",
|
||||
"\n",
|
||||
"player_descriptor_system_message = SystemMessage(\n",
|
||||
@@ -334,7 +334,7 @@
|
||||
" You are the storyteller, {storyteller_name}.\n",
|
||||
" Please make the quest more specific. Be creative and imaginative.\n",
|
||||
" Please reply with the specified quest in {word_limit} words or less. \n",
|
||||
" Speak directly to the characters: {*character_names,}.\n",
|
||||
" Speak directly to the characters: {(*character_names,)}.\n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
|
@@ -200,7 +200,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"game_description = f\"\"\"Here is the topic for the presidential debate: {topic}.\n",
|
||||
"The presidential candidates are: {', '.join(character_names)}.\"\"\"\n",
|
||||
"The presidential candidates are: {\", \".join(character_names)}.\"\"\"\n",
|
||||
"\n",
|
||||
"player_descriptor_system_message = SystemMessage(\n",
|
||||
" content=\"You can add detail to the description of each presidential candidate.\"\n",
|
||||
@@ -595,7 +595,7 @@
|
||||
" Frame the debate topic as a problem to be solved.\n",
|
||||
" Be creative and imaginative.\n",
|
||||
" Please reply with the specified topic in {word_limit} words or less. \n",
|
||||
" Speak directly to the presidential candidates: {*character_names,}.\n",
|
||||
" Speak directly to the presidential candidates: {(*character_names,)}.\n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
|
@@ -395,8 +395,7 @@
|
||||
"prompt_messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=(\n",
|
||||
" \"You are a world class algorithm to answer \"\n",
|
||||
" \"questions in a specific format.\"\n",
|
||||
" \"You are a world class algorithm to answer questions in a specific format.\"\n",
|
||||
" )\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"Answer question using the following context\"),\n",
|
||||
|
@@ -227,7 +227,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation_description = f\"\"\"Here is the topic of conversation: {topic}\n",
|
||||
"The participants are: {', '.join(names.keys())}\"\"\"\n",
|
||||
"The participants are: {\", \".join(names.keys())}\"\"\"\n",
|
||||
"\n",
|
||||
"agent_descriptor_system_message = SystemMessage(\n",
|
||||
" content=\"You can add detail to the description of the conversation participant.\"\n",
|
||||
@@ -396,7 +396,7 @@
|
||||
" You are the moderator.\n",
|
||||
" Please make the topic more specific.\n",
|
||||
" Please reply with the specified quest in {word_limit} words or less. \n",
|
||||
" Speak directly to the participants: {*names,}.\n",
|
||||
" Speak directly to the participants: {(*names,)}.\n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
|
@@ -108,7 +108,7 @@ class GalleryGridDirective(SphinxDirective):
|
||||
|
||||
# Parse the template with Sphinx Design to create an output container
|
||||
# Prep the options for the template grid
|
||||
class_ = "gallery-directive" + f' {self.options.get("class-container", "")}'
|
||||
class_ = "gallery-directive" + f" {self.options.get('class-container', '')}"
|
||||
options = {"gutter": 2, "class-container": class_}
|
||||
options_str = "\n".join(f":{k}: {v}" for k, v in options.items())
|
||||
|
||||
|
@@ -267,7 +267,7 @@ def _construct_doc(
|
||||
.. _{package_namespace}:
|
||||
|
||||
======================================
|
||||
{package_namespace.replace('_', '-')}: {package_version}
|
||||
{package_namespace.replace("_", "-")}: {package_version}
|
||||
======================================
|
||||
|
||||
.. automodule:: {package_namespace}
|
||||
@@ -325,7 +325,7 @@ def _construct_doc(
|
||||
|
||||
index_autosummary += f"""
|
||||
:ref:`{package_namespace}_{module}`
|
||||
{'^' * (len(package_namespace) + len(module) + 8)}
|
||||
{"^" * (len(package_namespace) + len(module) + 8)}
|
||||
"""
|
||||
|
||||
if classes:
|
||||
@@ -364,7 +364,7 @@ def _construct_doc(
|
||||
|
||||
"""
|
||||
index_autosummary += f"""
|
||||
{class_['qualified_name']}
|
||||
{class_["qualified_name"]}
|
||||
"""
|
||||
|
||||
if functions:
|
||||
@@ -427,7 +427,7 @@ def _construct_doc(
|
||||
|
||||
"""
|
||||
index_autosummary += f"""
|
||||
{class_['qualified_name']}
|
||||
{class_["qualified_name"]}
|
||||
"""
|
||||
|
||||
if deprecated_functions:
|
||||
|
@@ -287,7 +287,7 @@
|
||||
"\n",
|
||||
"async for event in structured_llm.astream_events(\"Tell me a joke\"):\n",
|
||||
" if event[\"event\"] == \"on_chat_model_end\":\n",
|
||||
" print(f'Token usage: {event[\"data\"][\"output\"].usage_metadata}\\n')\n",
|
||||
" print(f\"Token usage: {event['data']['output'].usage_metadata}\\n\")\n",
|
||||
" elif event[\"event\"] == \"on_chain_end\" and event[\"name\"] == \"RunnableSequence\":\n",
|
||||
" print(event[\"data\"][\"output\"])\n",
|
||||
" else:\n",
|
||||
|
@@ -162,8 +162,7 @@
|
||||
"# Define the function that calls the model\n",
|
||||
"def call_model(state: MessagesState):\n",
|
||||
" system_prompt = (\n",
|
||||
" \"You are a helpful assistant. \"\n",
|
||||
" \"Answer all questions to the best of your ability.\"\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\"\n",
|
||||
" )\n",
|
||||
" messages = [SystemMessage(content=system_prompt)] + state[\"messages\"]\n",
|
||||
" response = model.invoke(messages)\n",
|
||||
@@ -322,8 +321,7 @@
|
||||
" # highlight-start\n",
|
||||
" trimmed_messages = trimmer.invoke(state[\"messages\"])\n",
|
||||
" system_prompt = (\n",
|
||||
" \"You are a helpful assistant. \"\n",
|
||||
" \"Answer all questions to the best of your ability.\"\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\"\n",
|
||||
" )\n",
|
||||
" messages = [SystemMessage(content=system_prompt)] + trimmed_messages\n",
|
||||
" # highlight-end\n",
|
||||
|
@@ -34,7 +34,7 @@
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" [f\"Document {i + 1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
|
@@ -212,7 +212,7 @@
|
||||
"vector_store = InMemoryVectorStore.from_documents(pages, OpenAIEmbeddings())\n",
|
||||
"docs = vector_store.similarity_search(\"What is LayoutParser?\", k=2)\n",
|
||||
"for doc in docs:\n",
|
||||
" print(f'Page {doc.metadata[\"page\"]}: {doc.page_content[:300]}\\n')"
|
||||
" print(f\"Page {doc.metadata['page']}: {doc.page_content[:300]}\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -303,7 +303,7 @@
|
||||
],
|
||||
"source": [
|
||||
"for doc in docs[:5]:\n",
|
||||
" print(f'{doc.metadata[\"category\"]}: {doc.page_content}')"
|
||||
" print(f\"{doc.metadata['category']}: {doc.page_content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -448,7 +448,7 @@
|
||||
"vector_store = InMemoryVectorStore.from_documents(setup_docs, OpenAIEmbeddings())\n",
|
||||
"retrieved_docs = vector_store.similarity_search(\"Install Tavily\", k=2)\n",
|
||||
"for doc in retrieved_docs:\n",
|
||||
" print(f'Page {doc.metadata[\"url\"]}: {doc.page_content[:300]}\\n')"
|
||||
" print(f\"Page {doc.metadata['url']}: {doc.page_content[:300]}\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -376,7 +376,7 @@
|
||||
"source": [
|
||||
"state = app.get_state(config).values\n",
|
||||
"\n",
|
||||
"print(f'Language: {state[\"language\"]}')\n",
|
||||
"print(f\"Language: {state['language']}\")\n",
|
||||
"for message in state[\"messages\"]:\n",
|
||||
" message.pretty_print()"
|
||||
]
|
||||
@@ -427,7 +427,7 @@
|
||||
"source": [
|
||||
"state = app.get_state(config).values\n",
|
||||
"\n",
|
||||
"print(f'Language: {state[\"language\"]}')\n",
|
||||
"print(f\"Language: {state['language']}\")\n",
|
||||
"for message in state[\"messages\"]:\n",
|
||||
" message.pretty_print()"
|
||||
]
|
||||
|
@@ -236,7 +236,7 @@
|
||||
" \"\"\"Retrieve information related to a query.\"\"\"\n",
|
||||
" retrieved_docs = vector_store.similarity_search(query, k=2)\n",
|
||||
" serialized = \"\\n\\n\".join(\n",
|
||||
" (f\"Source: {doc.metadata}\\n\" f\"Content: {doc.page_content}\")\n",
|
||||
" (f\"Source: {doc.metadata}\\nContent: {doc.page_content}\")\n",
|
||||
" for doc in retrieved_docs\n",
|
||||
" )\n",
|
||||
" return serialized, retrieved_docs"
|
||||
|
@@ -213,7 +213,7 @@
|
||||
"\n",
|
||||
"sources = [doc.metadata[\"source\"] for doc in result[\"context\"]]\n",
|
||||
"print(f\"Sources: {sources}\\n\\n\")\n",
|
||||
"print(f'Answer: {result[\"answer\"]}')"
|
||||
"print(f\"Answer: {result['answer']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -619,7 +619,7 @@
|
||||
" for i, doc in enumerate(docs):\n",
|
||||
" doc_str = f\"\"\"\\\n",
|
||||
" <source id=\\\"{i}\\\">\n",
|
||||
" <title>{doc.metadata['title']}</title>\n",
|
||||
" <title>{doc.metadata[\"title\"]}</title>\n",
|
||||
" <article_snippet>{doc.page_content}</article_snippet>\n",
|
||||
" </source>\"\"\"\n",
|
||||
" formatted.append(doc_str)\n",
|
||||
|
@@ -296,8 +296,8 @@
|
||||
"source": [
|
||||
"result = graph.invoke({\"question\": \"What is Task Decomposition?\"})\n",
|
||||
"\n",
|
||||
"print(f'Context: {result[\"context\"]}\\n\\n')\n",
|
||||
"print(f'Answer: {result[\"answer\"]}')"
|
||||
"print(f\"Context: {result['context']}\\n\\n\")\n",
|
||||
"print(f\"Answer: {result['answer']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -439,7 +439,7 @@
|
||||
" \"\"\"Retrieve information related to a query.\"\"\"\n",
|
||||
" retrieved_docs = vector_store.similarity_search(query, k=2)\n",
|
||||
" serialized = \"\\n\\n\".join(\n",
|
||||
" (f\"Source: {doc.metadata}\\n\" f\"Content: {doc.page_content}\")\n",
|
||||
" (f\"Source: {doc.metadata}\\nContent: {doc.page_content}\")\n",
|
||||
" for doc in retrieved_docs\n",
|
||||
" )\n",
|
||||
" return serialized, retrieved_docs"
|
||||
|
@@ -628,7 +628,7 @@
|
||||
],
|
||||
"source": [
|
||||
"for chunk in retrieval_chain.stream(\n",
|
||||
" \"Where did harrison work? \" \"Write 3 made up sentences about this place.\"\n",
|
||||
" \"Where did harrison work? Write 3 made up sentences about this place.\"\n",
|
||||
"):\n",
|
||||
" print(chunk, end=\"|\", flush=True)"
|
||||
]
|
||||
|
@@ -749,7 +749,7 @@
|
||||
"input_message = HumanMessage([{\"type\": \"text\", \"text\": query}])\n",
|
||||
"output = app.invoke({\"messages\": [input_message]}, config)\n",
|
||||
"output[\"messages\"][-1].pretty_print()\n",
|
||||
"print(f'\\n{output[\"messages\"][-1].usage_metadata[\"input_token_details\"]}')"
|
||||
"print(f\"\\n{output['messages'][-1].usage_metadata['input_token_details']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -795,7 +795,7 @@
|
||||
"input_message = HumanMessage([{\"type\": \"text\", \"text\": query}])\n",
|
||||
"output = app.invoke({\"messages\": [input_message]}, config)\n",
|
||||
"output[\"messages\"][-1].pretty_print()\n",
|
||||
"print(f'\\n{output[\"messages\"][-1].usage_metadata[\"input_token_details\"]}')"
|
||||
"print(f\"\\n{output['messages'][-1].usage_metadata['input_token_details']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -822,7 +822,7 @@
|
||||
"input_message = HumanMessage([{\"type\": \"text\", \"text\": query}])\n",
|
||||
"output = app.invoke({\"messages\": [input_message]}, config)\n",
|
||||
"output[\"messages\"][-1].pretty_print()\n",
|
||||
"print(f'\\n{output[\"messages\"][-1].usage_metadata[\"input_token_details\"]}')"
|
||||
"print(f\"\\n{output['messages'][-1].usage_metadata['input_token_details']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -883,7 +883,7 @@
|
||||
"llm_with_tools = llm.bind_tools([get_weather])\n",
|
||||
"response = llm_with_tools.invoke(\"What's the weather in San Francisco?\")\n",
|
||||
"print(response.tool_calls)\n",
|
||||
"print(f'\\nTotal tokens: {response.usage_metadata[\"total_tokens\"]}')"
|
||||
"print(f\"\\nTotal tokens: {response.usage_metadata['total_tokens']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1272,7 +1272,7 @@
|
||||
"llm_with_tools = llm.bind_tools([tool])\n",
|
||||
"\n",
|
||||
"response = llm_with_tools.invoke(\n",
|
||||
" \"Calculate the mean and standard deviation of \" \"[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\"\n",
|
||||
" \"Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
@@ -498,7 +498,7 @@
|
||||
"print(f\"Question:\\n{resp_dict['input']}\\n\\nAnswer:\\n{clipped_answer}\")\n",
|
||||
"for i, doc in enumerate(resp_dict[\"context\"]):\n",
|
||||
" print()\n",
|
||||
" print(f\"Source {i+1}:\")\n",
|
||||
" print(f\"Source {i + 1}:\")\n",
|
||||
" print(f\" text: {json.dumps(clip_text(doc.page_content, threshold=350))}\")\n",
|
||||
" for key in doc.metadata:\n",
|
||||
" if key != \"pk\":\n",
|
||||
|
@@ -43,7 +43,7 @@
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" [f\"Document {i + 1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
|
@@ -58,7 +58,7 @@
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" [f\"Document {i + 1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
|
@@ -56,7 +56,7 @@
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" [f\"Document {i + 1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
|
@@ -45,7 +45,7 @@
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" [f\"Document {i + 1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
|
@@ -52,7 +52,7 @@
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [\n",
|
||||
" f\"Document {i+1}:\\n\\n{d.page_content}\\nMetadata: {d.metadata}\"\n",
|
||||
" f\"Document {i + 1}:\\n\\n{d.page_content}\\nMetadata: {d.metadata}\"\n",
|
||||
" for i, d in enumerate(docs)\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
|
@@ -60,7 +60,7 @@
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" [f\"Document {i + 1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
|
@@ -71,7 +71,7 @@
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" [f\"Document {i + 1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
|
@@ -147,9 +147,9 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"assert os.environ[\n",
|
||||
" \"OPENAI_API_KEY\"\n",
|
||||
"], \"Set the OPENAI_API_KEY environment variable with your OpenAI API key.\""
|
||||
"assert os.environ[\"OPENAI_API_KEY\"], (\n",
|
||||
" \"Set the OPENAI_API_KEY environment variable with your OpenAI API key.\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -70,7 +70,7 @@
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" [f\"Document {i + 1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
|
@@ -56,7 +56,7 @@
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [\n",
|
||||
" f\"Document {i+1}:\\n\\n{d.page_content}\\nMetadata: {d.metadata}\"\n",
|
||||
" f\"Document {i + 1}:\\n\\n{d.page_content}\\nMetadata: {d.metadata}\"\n",
|
||||
" for i, d in enumerate(docs)\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
|
@@ -162,7 +162,7 @@
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" [f\"Document {i + 1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
|
@@ -49,7 +49,7 @@
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" [f\"Document {i + 1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
|
@@ -175,7 +175,7 @@
|
||||
"print(f\"Query: {query}\")\n",
|
||||
"print(f\"Top {len(results)} results:\")\n",
|
||||
"for i, doc in enumerate(results):\n",
|
||||
" print(f\"{i+1}. {doc.page_content} (Country: {doc.metadata['country']})\")"
|
||||
" print(f\"{i + 1}. {doc.page_content} (Country: {doc.metadata['country']})\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -214,7 +214,7 @@
|
||||
"print(f\"Query: {query}\")\n",
|
||||
"print(f\"Top {len(results)} results:\")\n",
|
||||
"for i, doc in enumerate(results):\n",
|
||||
" print(f\"{i+1}. {doc.page_content} (Country: {doc.metadata['country']})\")"
|
||||
" print(f\"{i + 1}. {doc.page_content} (Country: {doc.metadata['country']})\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -251,7 +251,7 @@
|
||||
"print(f\"Query: {query}\")\n",
|
||||
"print(f\"Top {len(results)} result:\")\n",
|
||||
"for i, doc in enumerate(results):\n",
|
||||
" print(f\"{i+1}. {doc.page_content} (Country: {doc.metadata['country']})\")"
|
||||
" print(f\"{i + 1}. {doc.page_content} (Country: {doc.metadata['country']})\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -293,7 +293,7 @@
|
||||
" print(f\"Async query: {query}\")\n",
|
||||
" print(f\"Top {len(results)} results:\")\n",
|
||||
" for i, doc in enumerate(results):\n",
|
||||
" print(f\"{i+1}. {doc.page_content} (Country: {doc.metadata['country']})\")\n",
|
||||
" print(f\"{i + 1}. {doc.page_content} (Country: {doc.metadata['country']})\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await retrieve_async()"
|
||||
|
@@ -109,7 +109,7 @@
|
||||
"source": [
|
||||
"# Print document embeddings\n",
|
||||
"for i, embedding in enumerate(document_embeddings):\n",
|
||||
" print(f\"Embedding for document {i+1}: {embedding}\")"
|
||||
" print(f\"Embedding for document {i + 1}: {embedding}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -211,7 +211,7 @@
|
||||
"source": [
|
||||
"# Print document embeddings\n",
|
||||
"for i, embedding in enumerate(document_embeddings):\n",
|
||||
" print(f\"Embedding for document {i+1}: {embedding}\")"
|
||||
" print(f\"Embedding for document {i + 1}: {embedding}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -279,7 +279,7 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"for i, d in enumerate(d_embed):\n",
|
||||
" print(f\"Document {i+1}:\")\n",
|
||||
" print(f\"Document {i + 1}:\")\n",
|
||||
" print(f\"Cosine similarity with query: {cosine_similarity([q_embed], [d])[0][0]}\")\n",
|
||||
" print(\"---\")"
|
||||
]
|
||||
|
@@ -229,7 +229,7 @@
|
||||
"except Exception as ex:\n",
|
||||
" print(\n",
|
||||
" \"Make sure the infinity instance is running. Verify by clicking on \"\n",
|
||||
" f\"{infinity_api_url.replace('v1','docs')} Exception: {ex}. \"\n",
|
||||
" f\"{infinity_api_url.replace('v1', 'docs')} Exception: {ex}. \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
|
@@ -152,7 +152,7 @@
|
||||
"source": [
|
||||
"embeddings = embedder.embed_documents(texts)\n",
|
||||
"for i, embedding in enumerate(embeddings):\n",
|
||||
" print(f\"Embedding for document {i+1}: {embedding}\")"
|
||||
" print(f\"Embedding for document {i + 1}: {embedding}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -172,7 +172,7 @@
|
||||
"\n",
|
||||
"print(\"Search results for query:\", query)\n",
|
||||
"for i, doc in enumerate(results):\n",
|
||||
" print(f\"Result {i+1}: {doc.page_content}\")"
|
||||
" print(f\"Result {i + 1}: {doc.page_content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -403,7 +403,7 @@
|
||||
" for j, emb_j in enumerate(embeddings_list):\n",
|
||||
" similarity = calculate_similarity(emb_i, emb_j)\n",
|
||||
" similarities.append(f\"{similarity:.4f}\")\n",
|
||||
" print(f\"Document {i+1}: {similarities}\")"
|
||||
" print(f\"Document {i + 1}: {similarities}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -91,7 +91,7 @@
|
||||
"source": [
|
||||
"embeddings = embedder.embed_documents(texts)\n",
|
||||
"for i, embedding in enumerate(embeddings):\n",
|
||||
" print(f\"Embedding for document {i+1}: {embedding}\")"
|
||||
" print(f\"Embedding for document {i + 1}: {embedding}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -110,19 +110,19 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3m I need to find the email and summarize it.\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find the email and summarize it.\n",
|
||||
"Action: Gmail: Find Email\n",
|
||||
"Action Input: Find the latest email from Silicon Valley Bank\u001B[0m\n",
|
||||
"Observation: \u001B[31;1m\u001B[1;3m{\"from__name\": \"Silicon Valley Bridge Bank, N.A.\", \"from__email\": \"sreply@svb.com\", \"body_plain\": \"Dear Clients, After chaotic, tumultuous & stressful days, we have clarity on path for SVB, FDIC is fully insuring all deposits & have an ask for clients & partners as we rebuild. Tim Mayopoulos <https://eml.svb.com/NjEwLUtBSy0yNjYAAAGKgoxUeBCLAyF_NxON97X4rKEaNBLG\", \"reply_to__email\": \"sreply@svb.com\", \"subject\": \"Meet the new CEO Tim Mayopoulos\", \"date\": \"Tue, 14 Mar 2023 23:42:29 -0500 (CDT)\", \"message_url\": \"https://mail.google.com/mail/u/0/#inbox/186e393b13cfdf0a\", \"attachment_count\": \"0\", \"to__emails\": \"ankush@langchain.dev\", \"message_id\": \"186e393b13cfdf0a\", \"labels\": \"IMPORTANT, CATEGORY_UPDATES, INBOX\"}\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3m I need to summarize the email and send it to the #test-zapier channel in Slack.\n",
|
||||
"Action Input: Find the latest email from Silicon Valley Bank\u001b[0m\n",
|
||||
"Observation: \u001b[31;1m\u001b[1;3m{\"from__name\": \"Silicon Valley Bridge Bank, N.A.\", \"from__email\": \"sreply@svb.com\", \"body_plain\": \"Dear Clients, After chaotic, tumultuous & stressful days, we have clarity on path for SVB, FDIC is fully insuring all deposits & have an ask for clients & partners as we rebuild. Tim Mayopoulos <https://eml.svb.com/NjEwLUtBSy0yNjYAAAGKgoxUeBCLAyF_NxON97X4rKEaNBLG\", \"reply_to__email\": \"sreply@svb.com\", \"subject\": \"Meet the new CEO Tim Mayopoulos\", \"date\": \"Tue, 14 Mar 2023 23:42:29 -0500 (CDT)\", \"message_url\": \"https://mail.google.com/mail/u/0/#inbox/186e393b13cfdf0a\", \"attachment_count\": \"0\", \"to__emails\": \"ankush@langchain.dev\", \"message_id\": \"186e393b13cfdf0a\", \"labels\": \"IMPORTANT, CATEGORY_UPDATES, INBOX\"}\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to summarize the email and send it to the #test-zapier channel in Slack.\n",
|
||||
"Action: Slack: Send Channel Message\n",
|
||||
"Action Input: Send a slack message to the #test-zapier channel with the text \"Silicon Valley Bank has announced that Tim Mayopoulos is the new CEO. FDIC is fully insuring all deposits and they have an ask for clients and partners as they rebuild.\"\u001B[0m\n",
|
||||
"Observation: \u001B[36;1m\u001B[1;3m{\"message__text\": \"Silicon Valley Bank has announced that Tim Mayopoulos is the new CEO. FDIC is fully insuring all deposits and they have an ask for clients and partners as they rebuild.\", \"message__permalink\": \"https://langchain.slack.com/archives/C04TSGU0RA7/p1678859932375259\", \"channel\": \"C04TSGU0RA7\", \"message__bot_profile__name\": \"Zapier\", \"message__team\": \"T04F8K3FZB5\", \"message__bot_id\": \"B04TRV4R74K\", \"message__bot_profile__deleted\": \"false\", \"message__bot_profile__app_id\": \"A024R9PQM\", \"ts_time\": \"2023-03-15T05:58:52Z\", \"message__bot_profile__icons__image_36\": \"https://avatars.slack-edge.com/2022-08-02/3888649620612_f864dc1bb794cf7d82b0_36.png\", \"message__blocks[]block_id\": \"kdZZ\", \"message__blocks[]elements[]type\": \"['rich_text_section']\"}\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
|
||||
"Final Answer: I have sent a summary of the last email from Silicon Valley Bank to the #test-zapier channel in Slack.\u001B[0m\n",
|
||||
"Action Input: Send a slack message to the #test-zapier channel with the text \"Silicon Valley Bank has announced that Tim Mayopoulos is the new CEO. FDIC is fully insuring all deposits and they have an ask for clients and partners as they rebuild.\"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m{\"message__text\": \"Silicon Valley Bank has announced that Tim Mayopoulos is the new CEO. FDIC is fully insuring all deposits and they have an ask for clients and partners as they rebuild.\", \"message__permalink\": \"https://langchain.slack.com/archives/C04TSGU0RA7/p1678859932375259\", \"channel\": \"C04TSGU0RA7\", \"message__bot_profile__name\": \"Zapier\", \"message__team\": \"T04F8K3FZB5\", \"message__bot_id\": \"B04TRV4R74K\", \"message__bot_profile__deleted\": \"false\", \"message__bot_profile__app_id\": \"A024R9PQM\", \"ts_time\": \"2023-03-15T05:58:52Z\", \"message__bot_profile__icons__image_36\": \"https://avatars.slack-edge.com/2022-08-02/3888649620612_f864dc1bb794cf7d82b0_36.png\", \"message__blocks[]block_id\": \"kdZZ\", \"message__blocks[]elements[]type\": \"['rich_text_section']\"}\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: I have sent a summary of the last email from Silicon Valley Bank to the #test-zapier channel in Slack.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -257,7 +257,7 @@
|
||||
" ),\n",
|
||||
" None,\n",
|
||||
" )\n",
|
||||
" instructions = f'Send this to {SLACK_HANDLE} in Slack: {inputs[\"draft_reply\"]}'\n",
|
||||
" instructions = f\"Send this to {SLACK_HANDLE} in Slack: {inputs['draft_reply']}\"\n",
|
||||
" return {\n",
|
||||
" \"slack_data\": ZapierNLARunAction(\n",
|
||||
" action_id=action[\"id\"],\n",
|
||||
@@ -286,18 +286,18 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new SimpleSequentialChain chain...\u001B[0m\n",
|
||||
"\u001B[36;1m\u001B[1;3m{\"from__name\": \"Silicon Valley Bridge Bank, N.A.\", \"from__email\": \"sreply@svb.com\", \"body_plain\": \"Dear Clients, After chaotic, tumultuous & stressful days, we have clarity on path for SVB, FDIC is fully insuring all deposits & have an ask for clients & partners as we rebuild. Tim Mayopoulos <https://eml.svb.com/NjEwLUtBSy0yNjYAAAGKgoxUeBCLAyF_NxON97X4rKEaNBLG\", \"reply_to__email\": \"sreply@svb.com\", \"subject\": \"Meet the new CEO Tim Mayopoulos\", \"date\": \"Tue, 14 Mar 2023 23:42:29 -0500 (CDT)\", \"message_url\": \"https://mail.google.com/mail/u/0/#inbox/186e393b13cfdf0a\", \"attachment_count\": \"0\", \"to__emails\": \"ankush@langchain.dev\", \"message_id\": \"186e393b13cfdf0a\", \"labels\": \"IMPORTANT, CATEGORY_UPDATES, INBOX\"}\u001B[0m\n",
|
||||
"\u001B[33;1m\u001B[1;3m\n",
|
||||
"\u001b[1m> Entering new SimpleSequentialChain chain...\u001b[0m\n",
|
||||
"\u001b[36;1m\u001b[1;3m{\"from__name\": \"Silicon Valley Bridge Bank, N.A.\", \"from__email\": \"sreply@svb.com\", \"body_plain\": \"Dear Clients, After chaotic, tumultuous & stressful days, we have clarity on path for SVB, FDIC is fully insuring all deposits & have an ask for clients & partners as we rebuild. Tim Mayopoulos <https://eml.svb.com/NjEwLUtBSy0yNjYAAAGKgoxUeBCLAyF_NxON97X4rKEaNBLG\", \"reply_to__email\": \"sreply@svb.com\", \"subject\": \"Meet the new CEO Tim Mayopoulos\", \"date\": \"Tue, 14 Mar 2023 23:42:29 -0500 (CDT)\", \"message_url\": \"https://mail.google.com/mail/u/0/#inbox/186e393b13cfdf0a\", \"attachment_count\": \"0\", \"to__emails\": \"ankush@langchain.dev\", \"message_id\": \"186e393b13cfdf0a\", \"labels\": \"IMPORTANT, CATEGORY_UPDATES, INBOX\"}\u001b[0m\n",
|
||||
"\u001b[33;1m\u001b[1;3m\n",
|
||||
"Dear Silicon Valley Bridge Bank, \n",
|
||||
"\n",
|
||||
"Thank you for your email and the update regarding your new CEO Tim Mayopoulos. We appreciate your dedication to keeping your clients and partners informed and we look forward to continuing our relationship with you. \n",
|
||||
"\n",
|
||||
"Best regards, \n",
|
||||
"[Your Name]\u001B[0m\n",
|
||||
"\u001B[38;5;200m\u001B[1;3m{\"message__text\": \"Dear Silicon Valley Bridge Bank, \\n\\nThank you for your email and the update regarding your new CEO Tim Mayopoulos. We appreciate your dedication to keeping your clients and partners informed and we look forward to continuing our relationship with you. \\n\\nBest regards, \\n[Your Name]\", \"message__permalink\": \"https://langchain.slack.com/archives/D04TKF5BBHU/p1678859968241629\", \"channel\": \"D04TKF5BBHU\", \"message__bot_profile__name\": \"Zapier\", \"message__team\": \"T04F8K3FZB5\", \"message__bot_id\": \"B04TRV4R74K\", \"message__bot_profile__deleted\": \"false\", \"message__bot_profile__app_id\": \"A024R9PQM\", \"ts_time\": \"2023-03-15T05:59:28Z\", \"message__blocks[]block_id\": \"p7i\", \"message__blocks[]elements[]elements[]type\": \"[['text']]\", \"message__blocks[]elements[]type\": \"['rich_text_section']\"}\u001B[0m\n",
|
||||
"[Your Name]\u001b[0m\n",
|
||||
"\u001b[38;5;200m\u001b[1;3m{\"message__text\": \"Dear Silicon Valley Bridge Bank, \\n\\nThank you for your email and the update regarding your new CEO Tim Mayopoulos. We appreciate your dedication to keeping your clients and partners informed and we look forward to continuing our relationship with you. \\n\\nBest regards, \\n[Your Name]\", \"message__permalink\": \"https://langchain.slack.com/archives/D04TKF5BBHU/p1678859968241629\", \"channel\": \"D04TKF5BBHU\", \"message__bot_profile__name\": \"Zapier\", \"message__team\": \"T04F8K3FZB5\", \"message__bot_id\": \"B04TRV4R74K\", \"message__bot_profile__deleted\": \"false\", \"message__bot_profile__app_id\": \"A024R9PQM\", \"ts_time\": \"2023-03-15T05:59:28Z\", \"message__blocks[]block_id\": \"p7i\", \"message__blocks[]elements[]elements[]type\": \"[['text']]\", \"message__blocks[]elements[]type\": \"['rich_text_section']\"}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -347,8 +347,7 @@
|
||||
" id=\"entry_03\",\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Robbers broke into the city bank and stole \"\n",
|
||||
" \"$1 million in cash.\",\n",
|
||||
" page_content=\"Robbers broke into the city bank and stole $1 million in cash.\",\n",
|
||||
" metadata={\"source\": \"news\"},\n",
|
||||
" id=\"entry_04\",\n",
|
||||
" ),\n",
|
||||
@@ -359,8 +358,7 @@
|
||||
" id=\"entry_05\",\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Is the new iPhone worth the price? Read this \"\n",
|
||||
" \"review to find out.\",\n",
|
||||
" page_content=\"Is the new iPhone worth the price? Read this review to find out.\",\n",
|
||||
" metadata={\"source\": \"website\"},\n",
|
||||
" id=\"entry_06\",\n",
|
||||
" ),\n",
|
||||
|
@@ -337,8 +337,8 @@
|
||||
"\n",
|
||||
"# Display results\n",
|
||||
"for i in range(0, len(results)):\n",
|
||||
" print(f\"Result {i+1}: \", results[i][0].json())\n",
|
||||
" print(f\"Score {i+1}: \", results[i][1])\n",
|
||||
" print(f\"Result {i + 1}: \", results[i][0].json())\n",
|
||||
" print(f\"Score {i + 1}: \", results[i][1])\n",
|
||||
" print(\"\\n\")"
|
||||
]
|
||||
},
|
||||
@@ -399,8 +399,8 @@
|
||||
"\n",
|
||||
"# Display results\n",
|
||||
"for i in range(0, len(results)):\n",
|
||||
" print(f\"Result {i+1}: \", results[i][0].json())\n",
|
||||
" print(f\"Score {i+1}: \", results[i][1])\n",
|
||||
" print(f\"Result {i + 1}: \", results[i][0].json())\n",
|
||||
" print(f\"Score {i + 1}: \", results[i][1])\n",
|
||||
" print(\"\\n\")"
|
||||
]
|
||||
},
|
||||
@@ -465,7 +465,7 @@
|
||||
"\n",
|
||||
"# Display results\n",
|
||||
"for i in range(0, len(results)):\n",
|
||||
" print(f\"Result {i+1}: \", results[i][0].json())\n",
|
||||
" print(f\"Result {i + 1}: \", results[i][0].json())\n",
|
||||
" print(\"\\n\")"
|
||||
]
|
||||
},
|
||||
@@ -519,7 +519,7 @@
|
||||
"\n",
|
||||
"# Display results\n",
|
||||
"for i in range(0, len(results)):\n",
|
||||
" print(f\"Result {i+1}: \", results[i][0].json())\n",
|
||||
" print(f\"Result {i + 1}: \", results[i][0].json())\n",
|
||||
" print(\"\\n\")"
|
||||
]
|
||||
},
|
||||
@@ -578,8 +578,8 @@
|
||||
"\n",
|
||||
"# Display results\n",
|
||||
"for i in range(0, len(results)):\n",
|
||||
" print(f\"Result {i+1}: \", results[i][0].json())\n",
|
||||
" print(f\"Score {i+1}: \", results[i][1])\n",
|
||||
" print(f\"Result {i + 1}: \", results[i][0].json())\n",
|
||||
" print(f\"Score {i + 1}: \", results[i][1])\n",
|
||||
" print(\"\\n\")"
|
||||
]
|
||||
},
|
||||
@@ -648,8 +648,8 @@
|
||||
"\n",
|
||||
"# Display results\n",
|
||||
"for i in range(0, len(results)):\n",
|
||||
" print(f\"Result {i+1}: \", results[i][0].json())\n",
|
||||
" print(f\"Score {i+1}: \", results[i][1])\n",
|
||||
" print(f\"Result {i + 1}: \", results[i][0].json())\n",
|
||||
" print(f\"Score {i + 1}: \", results[i][1])\n",
|
||||
" print(\"\\n\")"
|
||||
]
|
||||
},
|
||||
|
@@ -256,7 +256,7 @@
|
||||
"# The second parameter is the top-n to retrieve, and its default value is 4.\n",
|
||||
"vearch_standalone_res = vearch_standalone.similarity_search(query, 3)\n",
|
||||
"for idx, tmp in enumerate(vearch_standalone_res):\n",
|
||||
" print(f\"{'#'*20}第{idx+1}段相关文档{'#'*20}\\n\\n{tmp.page_content}\\n\")\n",
|
||||
" print(f\"{'#' * 20}第{idx + 1}段相关文档{'#' * 20}\\n\\n{tmp.page_content}\\n\")\n",
|
||||
"\n",
|
||||
"# combine your local knowleadge and query\n",
|
||||
"context = \"\".join([tmp.page_content for tmp in vearch_standalone_res])\n",
|
||||
@@ -269,7 +269,7 @@
|
||||
"query_c = \"你知道凌波微步吗,你知道都有谁会凌波微步?\"\n",
|
||||
"cluster_res = vearch_cluster.similarity_search(query_c, 3)\n",
|
||||
"for idx, tmp in enumerate(cluster_res):\n",
|
||||
" print(f\"{'#'*20}第{idx+1}段相关文档{'#'*20}\\n\\n{tmp.page_content}\\n\")\n",
|
||||
" print(f\"{'#' * 20}第{idx + 1}段相关文档{'#' * 20}\\n\\n{tmp.page_content}\\n\")\n",
|
||||
"\n",
|
||||
"# In practical applications, we usually limit the boundary value of similarity. The following method can set this value.\n",
|
||||
"cluster_res_with_bound = vearch_cluster.similarity_search_with_score(\n",
|
||||
@@ -384,7 +384,7 @@
|
||||
"query3 = \"你知道vearch是什么吗?\"\n",
|
||||
"res1 = vearch_standalone.similarity_search(query3, 3)\n",
|
||||
"for idx, tmp in enumerate(res1):\n",
|
||||
" print(f\"{'#'*20}第{idx+1}段相关文档{'#'*20}\\n\\n{tmp.page_content}\\n\")\n",
|
||||
" print(f\"{'#' * 20}第{idx + 1}段相关文档{'#' * 20}\\n\\n{tmp.page_content}\\n\")\n",
|
||||
"\n",
|
||||
"context1 = \"\".join([tmp.page_content for tmp in res1])\n",
|
||||
"new_query1 = f\"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\\n {context1} \\n 回答用户这个问题:{query3}\\n\\n\"\n",
|
||||
@@ -396,7 +396,7 @@
|
||||
"query3_c = \"你知道vearch是什么吗?\"\n",
|
||||
"res1_c = vearch_standalone.similarity_search(query3_c, 3)\n",
|
||||
"for idx, tmp in enumerate(res1_c):\n",
|
||||
" print(f\"{'#'*20}第{idx+1}段相关文档{'#'*20}\\n\\n{tmp.page_content}\\n\")\n",
|
||||
" print(f\"{'#' * 20}第{idx + 1}段相关文档{'#' * 20}\\n\\n{tmp.page_content}\\n\")\n",
|
||||
"\n",
|
||||
"context1_C = \"\".join([tmp.page_content for tmp in res1_c])\n",
|
||||
"new_query1_c = f\"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\\n {context1_C} \\n 回答用户这个问题:{query3_c}\\n\\n\"\n",
|
||||
|
@@ -486,7 +486,7 @@
|
||||
"source": [
|
||||
"# Add metadata\n",
|
||||
"for i, doc in enumerate(docs):\n",
|
||||
" doc.metadata[\"date\"] = f\"2023-{(i % 12)+1}-{(i % 28)+1}\"\n",
|
||||
" doc.metadata[\"date\"] = f\"2023-{(i % 12) + 1}-{(i % 28) + 1}\"\n",
|
||||
" doc.metadata[\"rating\"] = range(1, 6)[i % 5]\n",
|
||||
" doc.metadata[\"author\"] = [\"Joe Biden\", \"Unknown\"][min(i, 1)]"
|
||||
]
|
||||
|
@@ -238,7 +238,7 @@
|
||||
"\n",
|
||||
"# Print the first 100 characters of each result\n",
|
||||
"for i, doc in enumerate(docs):\n",
|
||||
" print(f\"\\nDocument {i+1}:\")\n",
|
||||
" print(f\"\\nDocument {i + 1}:\")\n",
|
||||
" print(doc.page_content[:100] + \"...\")"
|
||||
]
|
||||
},
|
||||
|
@@ -166,7 +166,7 @@
|
||||
" output_text = f\"\"\"### Question:\n",
|
||||
" {query}\n",
|
||||
" ### Answer: \n",
|
||||
" {result['text']}\n",
|
||||
" {result[\"text\"]}\n",
|
||||
" \"\"\"\n",
|
||||
" display(Markdown(output_text))\n",
|
||||
"\n",
|
||||
@@ -387,11 +387,11 @@
|
||||
" output_text = f\"\"\"### Question: \n",
|
||||
" {query}\n",
|
||||
" ### Answer: \n",
|
||||
" {result['answer']}\n",
|
||||
" {result[\"answer\"]}\n",
|
||||
" ### Sources: \n",
|
||||
" {result['sources']}\n",
|
||||
" {result[\"sources\"]}\n",
|
||||
" ### All relevant sources:\n",
|
||||
" {', '.join(list(set([doc.metadata['source'] for doc in result['source_documents']])))}\n",
|
||||
" {\", \".join(list(set([doc.metadata[\"source\"] for doc in result[\"source_documents\"]])))}\n",
|
||||
" \"\"\"\n",
|
||||
" display(Markdown(output_text))\n",
|
||||
"\n",
|
||||
@@ -469,11 +469,11 @@
|
||||
" output_text = f\"\"\"### Question: \n",
|
||||
" {query}\n",
|
||||
" ### Answer: \n",
|
||||
" {result['answer']}\n",
|
||||
" {result[\"answer\"]}\n",
|
||||
" ### Sources: \n",
|
||||
" {result['sources']}\n",
|
||||
" {result[\"sources\"]}\n",
|
||||
" ### All relevant sources:\n",
|
||||
" {', '.join(list(set([doc.metadata['source'] for doc in result['source_documents']])))}\n",
|
||||
" {\", \".join(list(set([doc.metadata[\"source\"] for doc in result[\"source_documents\"]])))}\n",
|
||||
" \"\"\"\n",
|
||||
" display(Markdown(output_text))\n",
|
||||
"\n",
|
||||
|
@@ -271,7 +271,7 @@
|
||||
" \"\"\"Retrieve information related to a query.\"\"\"\n",
|
||||
" retrieved_docs = vector_store.similarity_search(query, k=2)\n",
|
||||
" serialized = \"\\n\\n\".join(\n",
|
||||
" (f\"Source: {doc.metadata}\\n\" f\"Content: {doc.page_content}\")\n",
|
||||
" (f\"Source: {doc.metadata}\\nContent: {doc.page_content}\")\n",
|
||||
" for doc in retrieved_docs\n",
|
||||
" )\n",
|
||||
" return serialized, retrieved_docs"
|
||||
|
@@ -757,8 +757,8 @@
|
||||
"source": [
|
||||
"result = graph.invoke({\"question\": \"What is Task Decomposition?\"})\n",
|
||||
"\n",
|
||||
"print(f'Context: {result[\"context\"]}\\n\\n')\n",
|
||||
"print(f'Answer: {result[\"answer\"]}')"
|
||||
"print(f\"Context: {result['context']}\\n\\n\")\n",
|
||||
"print(f\"Answer: {result['answer']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -393,9 +393,9 @@
|
||||
" prompt = (\n",
|
||||
" \"Given the following user question, corresponding SQL query, \"\n",
|
||||
" \"and SQL result, answer the user question.\\n\\n\"\n",
|
||||
" f'Question: {state[\"question\"]}\\n'\n",
|
||||
" f'SQL Query: {state[\"query\"]}\\n'\n",
|
||||
" f'SQL Result: {state[\"result\"]}'\n",
|
||||
" f\"Question: {state['question']}\\n\"\n",
|
||||
" f\"SQL Query: {state['query']}\\n\"\n",
|
||||
" f\"SQL Result: {state['result']}\"\n",
|
||||
" )\n",
|
||||
" response = llm.invoke(prompt)\n",
|
||||
" return {\"answer\": response.content}"
|
||||
|
@@ -184,7 +184,7 @@
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
|
||||
"\n",
|
||||
"prompt_template = \"What color are Bob's eyes?\\n\\n\" \"Context: {context}\"\n",
|
||||
"prompt_template = \"What color are Bob's eyes?\\n\\nContext: {context}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(prompt_template)\n",
|
||||
"\n",
|
||||
"# The below chain formats context from a document into a prompt, then\n",
|
||||
|
@@ -608,7 +608,7 @@ From the opposite direction, scientists use `LangChain` in research and referenc
|
||||
f"""
|
||||
## {paper.title}
|
||||
|
||||
- **Authors:** {', '.join(paper.authors)}
|
||||
- **Authors:** {", ".join(paper.authors)}
|
||||
- **arXiv id:** [{paper.arxiv_id}]({paper.url}) **Published Date:** {paper.published_date}
|
||||
- **LangChain:**
|
||||
|
||||
|
@@ -128,4 +128,4 @@ def check_notebooks(directory: str) -> list:
|
||||
if __name__ == "__main__":
|
||||
bad_files = check_notebooks(DOCS_DIR)
|
||||
if bad_files:
|
||||
raise ImportError("Found bad imports:\n" f"{_serialize_bad_imports(bad_files)}")
|
||||
raise ImportError(f"Found bad imports:\n{_serialize_bad_imports(bad_files)}")
|
||||
|
Reference in New Issue
Block a user