From 6aa66fd2b0ad2a6068d61906b2f00264eb32f4b8 Mon Sep 17 00:00:00 2001 From: Hashem Alsaket Date: Wed, 5 Jul 2023 19:45:02 -0500 Subject: [PATCH] Update Hugging Face Hub notebook (#7236) Description: `flan-t5-xl` hangs, updated to `flan-t5-xxl`. Tested all stabilityai LLMs- all hang so removed from tutorial. Temperature > 0 to prevent unintended determinism. Issue: #3275 Tag maintainer: @baskaryan --- .../llms/integrations/huggingface_hub.ipynb | 53 ++----------------- 1 file changed, 4 insertions(+), 49 deletions(-) diff --git a/docs/extras/modules/model_io/models/llms/integrations/huggingface_hub.ipynb b/docs/extras/modules/model_io/models/llms/integrations/huggingface_hub.ipynb index 042c7160089..170fcb70103 100644 --- a/docs/extras/modules/model_io/models/llms/integrations/huggingface_hub.ipynb +++ b/docs/extras/modules/model_io/models/llms/integrations/huggingface_hub.ipynb @@ -82,9 +82,9 @@ "source": [ "from langchain import HuggingFaceHub\n", "\n", - "repo_id = \"google/flan-t5-xl\" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options\n", + "repo_id = \"google/flan-t5-xxl\" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options\n", "\n", - "llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={\"temperature\": 0, \"max_length\": 64})" + "llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={\"temperature\": 0.5, \"max_length\": 64})" ] }, { @@ -118,51 +118,6 @@ "Below are some examples of models you can access through the Hugging Face Hub integration." ] }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "4fa9337e-ccb5-4c52-9b7c-1653148bc256", - "metadata": {}, - "source": [ - "### StableLM, by Stability AI\n", - "\n", - "See [Stability AI's](https://huggingface.co/stabilityai) organization page for a list of available models." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "36a1ce01-bd46-451f-8ee6-61c8f4bd665a", - "metadata": {}, - "outputs": [], - "source": [ - "repo_id = \"stabilityai/stablelm-tuned-alpha-3b\"\n", - "# Others include stabilityai/stablelm-base-alpha-3b\n", - "# as well as 7B parameter versions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b5654cea-60b0-4f40-ab34-06ba1eca810d", - "metadata": {}, - "outputs": [], - "source": [ - "llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={\"temperature\": 0, \"max_length\": 64})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2f19d0dc-c987-433f-a8d6-b1214e8ee067", - "metadata": {}, - "outputs": [], - "source": [ - "# Reuse the prompt and question from above.\n", - "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", - "print(llm_chain.run(question))" - ] - }, { "attachments": {}, "cell_type": "markdown", @@ -185,7 +140,7 @@ "\n", "repo_id = \"databricks/dolly-v2-3b\"\n", "\n", - "llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={\"temperature\": 0, \"max_length\": 64})" + "llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={\"temperature\": 0.5, \"max_length\": 64})" ] }, { @@ -225,7 +180,7 @@ "from langchain import HuggingFaceHub\n", "\n", "repo_id = \"Writer/camel-5b-hf\" # See https://huggingface.co/Writer for other options\n", - "llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={\"temperature\": 0, \"max_length\": 64})" + "llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={\"temperature\": 0.5, \"max_length\": 64})" ] }, {