fixed mypy private_gpt for llama-index

This commit is contained in:
Somashekar B R 2024-09-24 23:53:27 +05:30
parent 80f15a1568
commit b807e50895
2 changed files with 130 additions and 6 deletions

135
poetry.lock generated
View File

@ -1209,7 +1209,7 @@ files = [
name = "distro"
version = "1.9.0"
description = "Distro - an OS platform information API"
optional = true
optional = false
python-versions = ">=3.6"
files = [
{file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
@ -2339,7 +2339,7 @@ i18n = ["Babel (>=2.7)"]
name = "jiter"
version = "0.5.0"
description = "Fast iterable JSON parser."
optional = true
optional = false
python-versions = ">=3.8"
files = [
{file = "jiter-0.5.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b599f4e89b3def9a94091e6ee52e1d7ad7bc33e238ebb9c4c63f211d74822c3f"},
@ -2610,6 +2610,43 @@ dev = ["black (>=23.3.0)", "httpx (>=0.24.1)", "mkdocs (>=1.4.3)", "mkdocs-mater
server = ["PyYAML (>=5.1)", "fastapi (>=0.100.0)", "pydantic-settings (>=2.0.1)", "sse-starlette (>=1.6.1)", "starlette-context (>=0.3.6,<0.4)", "uvicorn (>=0.22.0)"]
test = ["fastapi (>=0.100.0)", "httpx (>=0.24.1)", "pydantic-settings (>=2.0.1)", "pytest (>=7.4.0)", "scipy (>=1.10)", "sse-starlette (>=1.6.1)", "starlette-context (>=0.3.6,<0.4)"]
[[package]]
name = "llama-index"
version = "0.10.0"
description = "Interface between LLMs and your data"
optional = false
python-versions = ">=3.8.1,<3.12"
files = [
{file = "llama_index-0.10.0-py3-none-any.whl", hash = "sha256:50ae1ae491f378d28ae6ce8591fbcf408d901549ad333388aa711d1780b943a7"},
{file = "llama_index-0.10.0.tar.gz", hash = "sha256:625f871b1941bc7d9dde213cd0e1736e2d4f4e8d90ec609c687b6186f47fd73c"},
]
[package.dependencies]
llama-index-agent-openai = ">=0.1.0,<0.2.0"
llama-index-core = ">=0.10.0,<0.11.0"
llama-index-embeddings-openai = ">=0.1.0,<0.2.0"
llama-index-legacy = ">=0.9.48,<0.10.0"
llama-index-llms-openai = ">=0.1.0,<0.2.0"
llama-index-multi-modal-llms-openai = ">=0.1.0,<0.2.0"
llama-index-program-openai = ">=0.1.0,<0.2.0"
llama-index-question-gen-openai = ">=0.1.0,<0.2.0"
llama-index-readers-file = ">=0.1.0,<0.2.0"
[[package]]
name = "llama-index-agent-openai"
version = "0.1.7"
description = "llama-index agent openai integration"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "llama_index_agent_openai-0.1.7-py3-none-any.whl", hash = "sha256:6764d385699f4f40ee34bcd88309c820e9e71aa9675a4bde26d4f625d79190a8"},
{file = "llama_index_agent_openai-0.1.7.tar.gz", hash = "sha256:6962f02e94c097c6a823dad494568e62b83f4218eb852ef0dce90bd3ffb10406"},
]
[package.dependencies]
llama-index-core = ">=0.10.1,<0.11.0"
llama-index-llms-openai = ">=0.1.5,<0.2.0"
[[package]]
name = "llama-index-core"
version = "0.10.68.post1"
@ -2740,7 +2777,7 @@ llama-index-core = ">=0.10.1,<0.11.0"
name = "llama-index-embeddings-openai"
version = "0.1.11"
description = "llama-index embeddings openai integration"
optional = true
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "llama_index_embeddings_openai-0.1.11-py3-none-any.whl", hash = "sha256:e20806fc4baff6b8f5274decf2c1ca7c5c737648e01865475ffada164e32e173"},
@ -2750,6 +2787,45 @@ files = [
[package.dependencies]
llama-index-core = ">=0.10.1,<0.11.0"
[[package]]
name = "llama-index-legacy"
version = "0.9.48.post3"
description = "Interface between LLMs and your data"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "llama_index_legacy-0.9.48.post3-py3-none-any.whl", hash = "sha256:04221320d84d96ba9ee3e21e5055bd8527cbd769e8f1c60cf0368ed907e012a2"},
{file = "llama_index_legacy-0.9.48.post3.tar.gz", hash = "sha256:f6969f1085efb0abebd6367e46f3512020f3f6b9c086f458a519830dd61e8206"},
]
[package.dependencies]
aiohttp = ">=3.8.6,<4.0.0"
dataclasses-json = "*"
deprecated = ">=1.2.9.3"
dirtyjson = ">=1.0.8,<2.0.0"
fsspec = ">=2023.5.0"
httpx = "*"
nest-asyncio = ">=1.5.8,<2.0.0"
networkx = ">=3.0"
nltk = ">=3.8.1"
numpy = "*"
openai = ">=1.1.0"
pandas = "*"
requests = ">=2.31.0"
SQLAlchemy = {version = ">=1.4.49", extras = ["asyncio"]}
tenacity = ">=8.2.0,<9.0.0"
tiktoken = ">=0.3.3"
typing-extensions = ">=4.5.0"
typing-inspect = ">=0.8.0"
[package.extras]
gradientai = ["gradientai (>=1.4.0)"]
html = ["beautifulsoup4 (>=4.12.2,<5.0.0)"]
langchain = ["langchain (>=0.0.303)"]
local-models = ["optimum[onnxruntime] (>=1.13.2,<2.0.0)", "sentencepiece (>=0.1.99,<0.2.0)", "transformers[torch] (>=4.33.1,<5.0.0)"]
postgres = ["asyncpg (>=0.28.0,<0.29.0)", "pgvector (>=0.1.0,<0.2.0)", "psycopg2-binary (>=2.9.9,<3.0.0)"]
query-tools = ["guidance (>=0.0.64,<0.0.65)", "jsonpath-ng (>=1.6.0,<2.0.0)", "lm-format-enforcer (>=0.4.3,<0.5.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "scikit-learn", "spacy (>=3.7.1,<4.0.0)"]
[[package]]
name = "llama-index-llms-azure-openai"
version = "0.1.10"
@ -2832,7 +2908,7 @@ ollama = ">=0.3.0"
name = "llama-index-llms-openai"
version = "0.1.31"
description = "llama-index llms openai integration"
optional = true
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "llama_index_llms_openai-0.1.31-py3-none-any.whl", hash = "sha256:800815b1b964b7d8dddd0e02a09fb57ac5f2ec6f80db92cd704dae718846023f"},
@ -2859,6 +2935,53 @@ llama-index-core = ">=0.10.1,<0.11.0"
llama-index-llms-openai = ">=0.1.1,<0.2.0"
transformers = ">=4.37.0,<5.0.0"
[[package]]
name = "llama-index-multi-modal-llms-openai"
version = "0.1.9"
description = "llama-index multi-modal-llms openai integration"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "llama_index_multi_modal_llms_openai-0.1.9-py3-none-any.whl", hash = "sha256:614f40427a4671e72742780be8fda77297dbf2942519bffcb2c9de8696a9edff"},
{file = "llama_index_multi_modal_llms_openai-0.1.9.tar.gz", hash = "sha256:dbacf44d5c2cca07ca424eacd1337583002d70387a3c1868cf8ae743b1dbec4a"},
]
[package.dependencies]
llama-index-core = ">=0.10.1,<0.11.0"
llama-index-llms-openai = ">=0.1.1,<0.2.0"
[[package]]
name = "llama-index-program-openai"
version = "0.1.7"
description = "llama-index program openai integration"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "llama_index_program_openai-0.1.7-py3-none-any.whl", hash = "sha256:33489b573c1050a3f583ff68fcbc4bcbd49f29e74f3e5baea08ab0d5f363403c"},
{file = "llama_index_program_openai-0.1.7.tar.gz", hash = "sha256:bf7eb61a073381714be5a049d93b40044dfe51bd4333bee539d1532b7407621f"},
]
[package.dependencies]
llama-index-agent-openai = ">=0.1.1,<0.3.0"
llama-index-core = ">=0.10.57,<0.11.0"
llama-index-llms-openai = ">=0.1.1"
[[package]]
name = "llama-index-question-gen-openai"
version = "0.1.3"
description = "llama-index question_gen openai integration"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_question_gen_openai-0.1.3-py3-none-any.whl", hash = "sha256:1f83b49e8b2e665030d1ec8c54687d6985d9fa8426147b64e46628a9e489b302"},
{file = "llama_index_question_gen_openai-0.1.3.tar.gz", hash = "sha256:4486198117a45457d2e036ae60b93af58052893cc7d78fa9b6f47dd47b81e2e1"},
]
[package.dependencies]
llama-index-core = ">=0.10.1,<0.11.0"
llama-index-llms-openai = ">=0.1.1,<0.2.0"
llama-index-program-openai = ">=0.1.1,<0.2.0"
[[package]]
name = "llama-index-readers-file"
version = "0.1.33"
@ -3956,7 +4079,7 @@ sympy = "*"
name = "openai"
version = "1.46.1"
description = "The official Python library for the openai API"
optional = true
optional = false
python-versions = ">=3.7.1"
files = [
{file = "openai-1.46.1-py3-none-any.whl", hash = "sha256:7517f07117cf66012bbc55c49fd6b983eaac0f3d2a09c90cba1140d4455e4290"},
@ -7096,4 +7219,4 @@ vector-stores-qdrant = ["llama-index-vector-stores-qdrant"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.11,<3.12"
content-hash = "df2024950dd0e9c55ae48020af1a71510cf878d696d2aa076e65cd7ee30b08fe"
content-hash = "02e91dd870c08d8774d5f528db727222fa320fb3e686a5fe0366cb8cfa76fefc"

View File

@ -71,6 +71,7 @@ ollama = {version ="^0.3.0", optional = true}
# Optional HF Transformers
einops = {version = "^0.8.0", optional = true}
retry-async = "^0.1.4"
llama-index = "0.10.0"
[tool.poetry.extras]
ui = ["gradio", "ffmpy"]