mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-13 22:59:05 +00:00
fix(docs): update llamacpp.ipynb
for installation options on Mac (#32341)
The previous code generated data invalid error. --------- Co-authored-by: Mason Daugherty <mason@langchain.dev> Co-authored-by: Mason Daugherty <github@mdrxy.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
parent
5a50802c9a
commit
f9b4e501a8
@ -44,9 +44,7 @@
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet llama-cpp-python"
|
||||
]
|
||||
"source": "%pip install --upgrade --quiet llama-cpp-python"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@ -64,9 +62,7 @@
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!CMAKE_ARGS=\"-DGGML_CUDA=on\" FORCE_CMAKE=1 pip install llama-cpp-python"
|
||||
]
|
||||
"source": "!CMAKE_ARGS=\"-DGGML_CUDA=on\" FORCE_CMAKE=1 pip install llama-cpp-python"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@ -80,9 +76,7 @@
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!CMAKE_ARGS=\"-DGGML_CUDA=on\" FORCE_CMAKE=1 pip install --upgrade --force-reinstall llama-cpp-python --no-cache-dir"
|
||||
]
|
||||
"source": "!CMAKE_ARGS=\"-DGGML_CUDA=on\" FORCE_CMAKE=1 pip install --upgrade --force-reinstall llama-cpp-python --no-cache-dir"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@ -100,9 +94,7 @@
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!CMAKE_ARGS=\"-DLLAMA_METAL=on\" FORCE_CMAKE=1 pip install llama-cpp-python"
|
||||
]
|
||||
"source": "!CMAKE_ARGS=\"-DLLAMA_METAL=on\" FORCE_CMAKE=1 pip install llama-cpp-python"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@ -116,9 +108,7 @@
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!CMAKE_ARGS=\"-DLLAMA_METAL=on\" FORCE_CMAKE=1 pip install --upgrade --force-reinstall llama-cpp-python --no-cache-dir"
|
||||
]
|
||||
"source": "!CMAKE_ARGS=\"-DLLAMA_METAL=on\" FORCE_CMAKE=1 pip install llama-cpp-python --force-reinstall --no-binary :all: --no-cache-dir"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@ -174,9 +164,7 @@
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!python -m pip install -e . --force-reinstall --no-cache-dir"
|
||||
]
|
||||
"source": "!python -m pip install -e . --force-reinstall --no-cache-dir"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@ -718,4 +706,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user