diff --git a/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb b/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb index 008746f7479..885174f4a1b 100644 --- a/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb +++ b/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb @@ -18,6 +18,14 @@ "\n", "Note: This is separate from the Google PaLM integration. Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there. \n", "\n", + "ChatVertexAI exposes all foundational models available in Google Cloud:\n", + "\n", + "- Gemini (`gemini-pro` and `gemini-pro-vision`)\n", + "- PaLM 2 for Text (`text-bison`)\n", + "- Codey for Code Generation (`codechat-bison`)\n", + "\n", + "For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview).\n", + "\n", "By default, Google Cloud [does not use](https://cloud.google.com/vertex-ai/docs/generative-ai/data-governance#foundation_model_development) customer data to train its foundation models as part of Google Cloud`s AI/ML Privacy Commitment. More details about how Google processes data can also be found in [Google's Customer Data Processing Addendum (CDPA)](https://cloud.google.com/terms/data-processing-addendum).\n", "\n", "To use `Google Cloud Vertex AI` PaLM you must have the `langchain-google-vertexai` Python package installed and either:\n", @@ -35,9 +43,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "%pip install --upgrade --quiet langchain-google-vertexai" @@ -45,7 +51,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -64,7 +70,7 @@ "AIMessage(content=\" J'aime la programmation.\")" ] }, - "execution_count": 8, + "execution_count": null, "metadata": {}, "output_type": "execute_result" } @@ -98,7 +104,7 @@ "AIMessage(content=\"J'aime la programmation.\")" ] }, - "execution_count": 9, + "execution_count": null, "metadata": {}, "output_type": "execute_result" } @@ -123,7 +129,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -132,7 +138,7 @@ "AIMessage(content=' プログラミングが大好きです')" ] }, - "execution_count": 4, + "execution_count": null, "metadata": {}, "output_type": "execute_result" } @@ -159,28 +165,17 @@ }, { "cell_type": "markdown", - "metadata": { - "execution": { - "iopub.execute_input": "2023-06-17T21:09:25.423568Z", - "iopub.status.busy": "2023-06-17T21:09:25.423213Z", - "iopub.status.idle": "2023-06-17T21:09:25.429641Z", - "shell.execute_reply": "2023-06-17T21:09:25.429060Z", - "shell.execute_reply.started": "2023-06-17T21:09:25.423546Z" - }, - "tags": [] - }, + "metadata": {}, "source": [ "## Code generation chat models\n", - "You can now leverage the Codey API for code chat within Vertex AI. The model name is:\n", - "- codechat-bison: for code assistance" + "You can now leverage the Codey API for code chat within Vertex AI. The model available is:\n", + "- `codechat-bison`: for code assistance" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [ { "name": "stdout", @@ -242,7 +237,7 @@ " model_name=\"codechat-bison\", max_output_tokens=1000, temperature=0.5\n", ")\n", "\n", - "message = chat.invoke(\"Write a Python function to identify all prime numbers\")\n", + "message = chat.invoke(\"Write a Python function generating all prime numbers\")\n", "print(message.content)" ] }, @@ -266,7 +261,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -320,7 +315,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -353,7 +348,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -362,7 +357,7 @@ "MyModel(name='Erick', age=27)" ] }, - "execution_count": 3, + "execution_count": null, "metadata": {}, "output_type": "execute_result" } @@ -389,7 +384,7 @@ "source": [ "## Asynchronous calls\n", "\n", - "We can make asynchronous calls via the Runnables [Async Interface](/docs/expression_language/interface)" + "We can make asynchronous calls via the Runnables [Async Interface](/docs/expression_language/interface)." ] }, { @@ -414,10 +409,10 @@ { "data": { "text/plain": [ - "AIMessage(content=' Why do you love programming?')" + "AIMessage(content=' अहं प्रोग्रामनं प्रेमामि')" ] }, - "execution_count": 6, + "execution_count": null, "metadata": {}, "output_type": "execute_result" } @@ -428,6 +423,10 @@ ")\n", "human = \"{text}\"\n", "prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n", + "\n", + "chat = ChatVertexAI(\n", + " model_name=\"chat-bison\", max_output_tokens=1000, temperature=0.5\n", + ")\n", "chain = prompt | chat\n", "\n", "asyncio.run(\n", @@ -483,43 +482,15 @@ " sys.stdout.write(chunk.content)\n", " sys.stdout.flush()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { - "environment": { - "kernel": "python3", - "name": "common-cpu.m108", - "type": "gcloud", - "uri": "gcr.io/deeplearning-platform-release/base-cpu:m108" - }, "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" + "display_name": "", + "name": "" }, "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - }, - "vscode": { - "interpreter": { - "hash": "cc99336516f23363341912c6723b01ace86f02e26b4290be1efc0677e2e2ec24" - } + "name": "python" } }, "nbformat": 4,