From ec12b492f1c895027de744da0b9983c881a3f20c Mon Sep 17 00:00:00 2001 From: Erick Friis Date: Thu, 21 Nov 2024 15:56:57 -0800 Subject: [PATCH] docs: poetry publish --- docs/docs/contributing/how_to/index.mdx | 1 + .../how_to/integrations/index.mdx | 2 +- .../how_to/integrations/package.mdx | 244 ++++++++++++++++++ docs/docs/how_to/custom_chat_model.ipynb | 8 +- 4 files changed, 249 insertions(+), 6 deletions(-) create mode 100644 docs/docs/contributing/how_to/integrations/package.mdx diff --git a/docs/docs/contributing/how_to/index.mdx b/docs/docs/contributing/how_to/index.mdx index d9f903f4156..675586340f4 100644 --- a/docs/docs/contributing/how_to/index.mdx +++ b/docs/docs/contributing/how_to/index.mdx @@ -6,4 +6,5 @@ ## Integrations - [**Start Here**](integrations/index.mdx): Help us integrate with your favorite vendors and tools. +- [**Package**](integrations/package): Publish an integration package to PyPi - [**Standard Tests**](integrations/standard_tests): Ensure your integration passes an expected set of tests. diff --git a/docs/docs/contributing/how_to/integrations/index.mdx b/docs/docs/contributing/how_to/integrations/index.mdx index 159f4ac4e99..31d9cf2c0bf 100644 --- a/docs/docs/contributing/how_to/integrations/index.mdx +++ b/docs/docs/contributing/how_to/integrations/index.mdx @@ -66,7 +66,7 @@ that will render on this site (https://python.langchain.com/). As a prerequisite to adding your integration to our documentation, you must: 1. Confirm that your integration is in the [list of components](#components-to-integrate) we are currently accepting. -2. Ensure that your integration is in a separate package that can be installed with `pip install `. +2. [Publish your package to PyPi](./package.mdx) and make the repo public. 3. [Implement the standard tests](/docs/contributing/how_to/integrations/standard_tests) for your integration and successfully run them. 3. Write documentation for your integration in the `docs/docs/integrations/` directory of the LangChain monorepo. 4. Add a provider page for your integration in the `docs/docs/integrations/providers` directory of the LangChain monorepo. diff --git a/docs/docs/contributing/how_to/integrations/package.mdx b/docs/docs/contributing/how_to/integrations/package.mdx new file mode 100644 index 00000000000..09fe7f9a6f9 --- /dev/null +++ b/docs/docs/contributing/how_to/integrations/package.mdx @@ -0,0 +1,244 @@ +# How to bootstrap a new integration package + +This guide walks through the process of publishing a new LangChain integration +package to PyPi. + +Integration packages are just Python packages that can be installed with `pip install `, +which contain classes that are compatible with LangChain's core interfaces. + +In this guide, we will be using [Poetry](https://python-poetry.org/) for +dependency management and packaging, and you're welcome to use any other tools you prefer. + +## **Prerequisites** + +- [GitHub](https://github.com) account +- [PyPi](https://pypi.org/) account + +## Boostrapping a new Python package with Poetry + +First, install Poetry: + +```bash +pip install poetry +``` + +Next, create a new Python package with Poetry: + +```bash +poetry new langchain-parrot-link +``` + +Add main dependencies using Poetry, which will add them to your `pyproject.toml` file: + +```bash +poetry add langchain-core +``` + +We will also add some `test` dependencies in a separate poetry dependency group. If +you are not using Poetry, we recommend adding these in a way that won't package them +with your published package, or just installing them separately when you run tests. + +`langchain-tests` will provide the [standard tests](./standard_tests) we will use later. +We recommended pinning these to the latest version: + + + +```bash +poetry add --group test pytest langchain-tests=={latest version} +``` + +You're now ready to start writing your integration package! + +## Writing your integration + +Let's say you're building a simple integration package that provides a `ChatParrotLink` +chat model integration for LangChain. Here's a simple example of what your project +structure might look like: + +```plaintext +langchain-parrot-link/ +├── langchain_parrot_link/ +│ ├── __init__.py +│ └── chat_models.py +├── tests/ +│ ├── __init__.py +│ └── test_chat_models.py +├── pyproject.toml +└── README.md +``` + +All of these files should already exist from step 1, except for +`chat_models.py` and `test_chat_models.py`! We will implement `test_chat_models.py` +later, following the [standard tests](./standard_tests) guide. + +To implement `chat_models.py`, let's copy the implementation from our +[Custom Chat Model Guide](../../../how_to/custom_chat_model). + +
+ chat_models.py +```python title="langchain_parrot_link/chat_models.py" +from typing import Any, Dict, Iterator, List, Optional + +from langchain_core.callbacks import ( + CallbackManagerForLLMRun, +) +from langchain_core.language_models import BaseChatModel +from langchain_core.messages import AIMessageChunk, BaseMessage, AIMessage +from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult + + +class CustomChatModelAdvanced(BaseChatModel): + """A custom chat model that echoes the first `n` characters of the input. + + When contributing an implementation to LangChain, carefully document + the model including the initialization parameters, include + an example of how to initialize the model and include any relevant + links to the underlying models documentation or API. + + Example: + + .. code-block:: python + + model = CustomChatModel(n=2) + result = model.invoke([HumanMessage(content="hello")]) + result = model.batch([[HumanMessage(content="hello")], + [HumanMessage(content="world")]]) + """ + + model_name: str + """The name of the model""" + n: int + """The number of characters from the last message of the prompt to be echoed.""" + + def _generate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + """Override the _generate method to implement the chat model logic. + + This can be a call to an API, a call to a local model, or any other + implementation that generates a response to the input prompt. + + Args: + messages: the prompt composed of a list of messages. + stop: a list of strings on which the model should stop generating. + If generation stops due to a stop token, the stop token itself + SHOULD BE INCLUDED as part of the output. This is not enforced + across models right now, but it's a good practice to follow since + it makes it much easier to parse the output of the model + downstream and understand why generation stopped. + run_manager: A run manager with callbacks for the LLM. + """ + # Replace this with actual logic to generate a response from a list + # of messages. + last_message = messages[-1] + tokens = last_message.content[: self.n] + message = AIMessage( + content=tokens, + additional_kwargs={}, # Used to add additional payload (e.g., function calling request) + response_metadata={ # Use for response metadata + "time_in_seconds": 3, + }, + ) + ## + + generation = ChatGeneration(message=message) + return ChatResult(generations=[generation]) + + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + """Stream the output of the model. + + This method should be implemented if the model can generate output + in a streaming fashion. If the model does not support streaming, + do not implement it. In that case streaming requests will be automatically + handled by the _generate method. + + Args: + messages: the prompt composed of a list of messages. + stop: a list of strings on which the model should stop generating. + If generation stops due to a stop token, the stop token itself + SHOULD BE INCLUDED as part of the output. This is not enforced + across models right now, but it's a good practice to follow since + it makes it much easier to parse the output of the model + downstream and understand why generation stopped. + run_manager: A run manager with callbacks for the LLM. + """ + last_message = messages[-1] + tokens = last_message.content[: self.n] + + for token in tokens: + chunk = ChatGenerationChunk(message=AIMessageChunk(content=token)) + + if run_manager: + # This is optional in newer versions of LangChain + # The on_llm_new_token will be called automatically + run_manager.on_llm_new_token(token, chunk=chunk) + + yield chunk + + # Let's add some other information (e.g., response metadata) + chunk = ChatGenerationChunk( + message=AIMessageChunk(content="", response_metadata={"time_in_sec": 3}) + ) + if run_manager: + # This is optional in newer versions of LangChain + # The on_llm_new_token will be called automatically + run_manager.on_llm_new_token(token, chunk=chunk) + yield chunk + + @property + def _llm_type(self) -> str: + """Get the type of language model used by this chat model.""" + return "echoing-chat-model-advanced" + + @property + def _identifying_params(self) -> Dict[str, Any]: + """Return a dictionary of identifying parameters. + + This information is used by the LangChain callback system, which + is used for tracing purposes make it possible to monitor LLMs. + """ + return { + # The model name allows users to specify custom token counting + # rules in LLM monitoring applications (e.g., in LangSmith users + # can provide per token pricing for their model and monitor + # costs for the given LLM.) + "model_name": self.model_name, + } +``` +
+ +## Publishing your package to PyPi + +First, make sure you have a PyPi account and have logged in with Poetry: + +```bash +poetry config pypi-token.pypi +``` + +Next, build your package: + +```bash +poetry build +``` + +Finally, publish your package to PyPi: + +```bash +poetry publish +``` + +You're all set! Your package is now available on PyPi and can be installed with `pip install langchain-parrot-link`. + +## Next Steps + +Now that you've published your package, you can move on to the next step: [Implement the standard tests](./standard_tests) for your integration and successfully run them. diff --git a/docs/docs/how_to/custom_chat_model.ipynb b/docs/docs/how_to/custom_chat_model.ipynb index 6f8e68e01a3..e4a126ccef8 100644 --- a/docs/docs/how_to/custom_chat_model.ipynb +++ b/docs/docs/how_to/custom_chat_model.ipynb @@ -162,23 +162,21 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "25ba32e5-5a6d-49f4-bb68-911827b84d61", "metadata": { "tags": [] }, "outputs": [], "source": [ - "from typing import Any, AsyncIterator, Dict, Iterator, List, Optional\n", + "from typing import Any, Dict, Iterator, List, Optional\n", "\n", "from langchain_core.callbacks import (\n", - " AsyncCallbackManagerForLLMRun,\n", " CallbackManagerForLLMRun,\n", ")\n", - "from langchain_core.language_models import BaseChatModel, SimpleChatModel\n", + "from langchain_core.language_models import BaseChatModel\n", "from langchain_core.messages import AIMessageChunk, BaseMessage, HumanMessage\n", "from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult\n", - "from langchain_core.runnables import run_in_executor\n", "\n", "\n", "class CustomChatModelAdvanced(BaseChatModel):\n",