diff --git a/docs/docs/integrations/llms/bedrock.ipynb b/docs/docs/integrations/llms/bedrock.ipynb index 717fbe3480f..e32d5fa840d 100644 --- a/docs/docs/integrations/llms/bedrock.ipynb +++ b/docs/docs/integrations/llms/bedrock.ipynb @@ -111,7 +111,35 @@ "cell_type": "markdown", "metadata": {}, "source": [ - + "### Custom models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "custom_llm = Bedrock(\n", + " credentials_profile_name=\"bedrock-admin\",\n", + " provider=\"cohere\",\n", + " model_id=\"\", # ARN like 'arn:aws:bedrock:...' obtained via provisioning the custom model\n", + " model_kwargs={\"temperature\": 1},\n", + " streaming=True,\n", + " callbacks=[StreamingStdOutCallbackHandler()],\n", + ")\n", + "\n", + "conversation = ConversationChain(\n", + " llm=custom_llm, verbose=True, memory=ConversationBufferMemory()\n", + ")\n", + "conversation.predict(input=\"What is the recipe of mayonnaise?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Guardrails for Amazon Bedrock example \n", "\n", "## Guardrails for Amazon Bedrock (Preview) \n", "[Guardrails for Amazon Bedrock](https://aws.amazon.com/bedrock/guardrails/) evaluates user inputs and model responses based on use case specific policies, and provides an additional layer of safeguards regardless of the underlying model. Guardrails can be applied across models, including Anthropic Claude, Meta Llama 2, Cohere Command, AI21 Labs Jurassic, and Amazon Titan Text, as well as fine-tuned models.\n", @@ -139,7 +167,7 @@ " print(f\"Guardrails: {kwargs}\")\n", "\n", "\n", - "# guardrails for Amazon Bedrock with trace\n", + "# Guardrails for Amazon Bedrock with trace\n", "llm = Bedrock(\n", " credentials_profile_name=\"bedrock-admin\",\n", " model_id=\"\",\n", @@ -166,7 +194,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/libs/community/langchain_community/llms/bedrock.py b/libs/community/langchain_community/llms/bedrock.py index 002eb90fc0b..d3264703c7c 100644 --- a/libs/community/langchain_community/llms/bedrock.py +++ b/libs/community/langchain_community/llms/bedrock.py @@ -229,9 +229,17 @@ class BedrockBase(BaseModel, ABC): config: Optional[Config] = None """An optional botocore.config.Config instance to pass to the client.""" + provider: Optional[str] = None + """The model provider, e.g., amazon, cohere, ai21, etc. When not supplied, provider + is extracted from the first part of the model_id e.g. 'amazon' in + 'amazon.titan-text-express-v1'. This value should be provided for model ids that do + not have the provider in them, e.g., custom and provisioned models that have an ARN + associated with them.""" + model_id: str """Id of the model to call, e.g., amazon.titan-text-express-v1, this is - equivalent to the modelId property in the list-foundation-models api""" + equivalent to the modelId property in the list-foundation-models api. For custom and + provisioned models, an ARN value is expected.""" model_kwargs: Optional[Dict] = None """Keyword arguments to pass to the model.""" @@ -353,6 +361,14 @@ class BedrockBase(BaseModel, ABC): } def _get_provider(self) -> str: + if self.provider: + return self.provider + if self.model_id.startswith("arn"): + raise ValueError( + "Model provider should be supplied when passing a model ARN as " + "model_id" + ) + return self.model_id.split(".")[0] @property