feat: Add support for Vertica analytical database (#1538)

This commit is contained in:
DQ 2024-05-20 09:45:40 +08:00 committed by GitHub
parent 6fb3d33bf4
commit 8d8411fcd3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 658 additions and 5 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.3 KiB

View File

@ -56,6 +56,7 @@ class ConnectorManager(BaseComponent):
from dbgpt.datasource.rdbms.conn_starrocks import ( # noqa: F401
StarRocksConnector,
)
from dbgpt.datasource.rdbms.conn_vertica import VerticaConnector # noqa: F401
from .connect_config_db import ConnectConfigEntity # noqa: F401

View File

@ -0,0 +1,256 @@
"""Vertica connector."""
import logging
from typing import Any, Dict, Iterable, List, Optional, Tuple, cast
from urllib.parse import quote
from urllib.parse import quote_plus as urlquote
from sqlalchemy import text
from sqlalchemy.dialects import registry
from .base import RDBMSConnector
logger = logging.getLogger(__name__)
registry.register(
"vertica.vertica_python",
"dbgpt.datasource.rdbms.dialect.vertica.dialect_vertica_python",
"VerticaDialect",
)
class VerticaConnector(RDBMSConnector):
"""Vertica connector."""
driver = "vertica+vertica_python"
db_type = "vertica"
db_dialect = "vertica"
@classmethod
def from_uri_db(
cls,
host: str,
port: int,
user: str,
pwd: str,
db_name: str,
engine_args: Optional[dict] = None,
**kwargs: Any,
) -> "VerticaConnector":
"""Create a new VerticaConnector from host, port, user, pwd, db_name."""
db_url: str = (
f"{cls.driver}://{quote(user)}:{urlquote(pwd)}@{host}:{str(port)}/{db_name}"
)
return cast(VerticaConnector, cls.from_uri(db_url, engine_args, **kwargs))
@property
def dialect(self) -> str:
"""Return string representation of dialect to use."""
# inject instruction to prompt according to {dialect} in prompt template.
return "Vertica sql, \
correct postgresql sql is the another option \
if you don't know much about Vertica. \
尤其要注意表名称前面一定要带上模式名称! \
Note the most important requirement is that \
table name should keep its schema name in "
def _sync_tables_from_db(self) -> Iterable[str]:
table_results = self.session.execute(
text(
"""
SELECT table_schema||'.'||table_name
FROM v_catalog.tables
WHERE table_schema NOT LIKE 'v\_%'
UNION
SELECT table_schema||'.'||table_name
FROM v_catalog.views
WHERE table_schema NOT LIKE 'v\_%';
"""
)
)
self._all_tables = {row[0] for row in table_results}
self._metadata.reflect(bind=self._engine)
return self._all_tables
def get_grants(self):
"""Get grants."""
return []
def get_collation(self):
"""Get collation."""
return None
def get_users(self):
"""Get user info."""
try:
cursor = self.session.execute(text("SELECT name FROM v_internal.vs_users;"))
users = cursor.fetchall()
return [user[0] for user in users]
except Exception as e:
logger.warning(f"vertica get users error: {str(e)}")
return []
def get_fields(self, table_name) -> List[Tuple]:
"""Get column fields about specified table."""
session = self._db_sessions()
cursor = session.execute(
text(
f"""
SELECT column_name, data_type, column_default, is_nullable,
nvl(comment, column_name) as column_comment
FROM v_catalog.columns c
LEFT JOIN v_internal.vs_sub_comments s ON c.table_id = s.objectoid
AND c.column_name = s.childobject
WHERE table_schema||'.'||table_name = '{table_name}';
"""
)
)
fields = cursor.fetchall()
return [(field[0], field[1], field[2], field[3], field[4]) for field in fields]
def get_columns(self, table_name: str) -> List[Dict]:
"""Get columns about specified table.
Args:
table_name (str): table name
Returns:
columns: List[Dict], which contains name: str, type: str,
default_expression: str, is_in_primary_key: bool, comment: str
eg:[{'name': 'id', 'type': 'int', 'default_expression': '',
'is_in_primary_key': True, 'comment': 'id'}, ...]
"""
session = self._db_sessions()
cursor = session.execute(
text(
f"""
SELECT c.column_name, data_type, column_default
, (p.column_name IS NOT NULL) is_in_primary_key
, nvl(comment, c.column_name) as column_comment
FROM v_catalog.columns c
LEFT JOIN v_internal.vs_sub_comments s ON c.table_id = s.objectoid
AND c.column_name = s.childobject
LEFT JOIN v_catalog.primary_keys p ON c.table_schema = p.table_schema
AND c.table_name = p.table_name
AND c.column_name = p.column_name
WHERE c.table_schema||'.'||c.table_name = '{table_name}';
"""
)
)
fields = cursor.fetchall()
return [
{
"name": field[0],
"type": field[1],
"default_expression": field[2],
"is_in_primary_key": field[3],
"comment": field[4],
}
for field in fields
]
def get_charset(self):
"""Get character_set."""
return "utf-8"
def get_show_create_table(self, table_name: str):
"""Return show create table."""
cur = self.session.execute(
text(
f"""
SELECT column_name, data_type
FROM v_catalog.columns
WHERE table_schema||'.'||table_name = '{table_name}';
"""
)
)
rows = cur.fetchall()
create_table_query = f"CREATE TABLE {table_name} (\n"
for row in rows:
create_table_query += f" {row[0]} {row[1]},\n"
create_table_query = create_table_query.rstrip(",\n") + "\n)"
return create_table_query
def get_table_comments(self, db_name=None):
"""Return table comments."""
cursor = self.session.execute(
text(
f"""
SELECT table_schema||'.'||table_name
, nvl(comment, table_name) as column_comment
FROM v_catalog.tables t
LEFT JOIN v_internal.vs_comments c ON t.table_id = c.objectoid
WHERE table_schema = '{db_name}'
"""
)
)
table_comments = cursor.fetchall()
return [
(table_comment[0], table_comment[1]) for table_comment in table_comments
]
def get_table_comment(self, table_name: str) -> Dict:
"""Get table comments.
Args:
table_name (str): table name
Returns:
comment: Dict, which contains text: Optional[str], eg:["text": "comment"]
"""
cursor = self.session.execute(
text(
f"""
SELECT nvl(comment, table_name) as column_comment
FROM v_catalog.tables t
LEFT JOIN v_internal.vs_comments c ON t.table_id = c.objectoid
WHERE table_schema||'.'||table_nam e= '{table_name}'
"""
)
)
return {"text": cursor.scalar()}
def get_column_comments(self, db_name: str, table_name: str):
"""Return column comments."""
cursor = self.session.execute(
text(
f"""
SELECT column_name, nvl(comment, column_name) as column_comment
FROM v_catalog.columns c
LEFT JOIN v_internal.vs_sub_comments s ON c.table_id = s.objectoid
AND c.column_name = s.childobject
WHERE table_schema = '{db_name}' AND table_name = '{table_name}'
"""
)
)
column_comments = cursor.fetchall()
return [
(column_comment[0], column_comment[1]) for column_comment in column_comments
]
def get_database_names(self):
"""Get database names."""
session = self._db_sessions()
cursor = session.execute(text("SELECT schema_name FROM v_catalog.schemata;"))
results = cursor.fetchall()
return [d[0] for d in results if not d[0].startswith("v_")]
def get_current_db_name(self) -> str:
"""Get current database name."""
return self.session.execute(text("SELECT current_schema()")).scalar()
def table_simple_info(self):
"""Get table simple info."""
_sql = """
SELECT table_schema||'.'||table_name
, listagg(column_name using parameters max_length=65000)
FROM v_catalog.columns
WHERE table_schema NOT LIKE 'v\_%'
GROUP BY 1;
"""
cursor = self.session.execute(text(_sql))
results = cursor.fetchall()
return results
def get_indexes(self, table_name):
"""Get table indexes about specified table."""
return []

View File

@ -0,0 +1,179 @@
"""Base class for Vertica dialect."""
from __future__ import (
absolute_import,
annotations,
division,
print_function,
unicode_literals,
)
import logging
import re
from typing import Any, Optional
from sqlalchemy import sql
from sqlalchemy.engine import default, reflection
logger: logging.Logger = logging.getLogger(__name__)
class VerticaInspector(reflection.Inspector):
"""Reflection inspector for Vertica."""
dialect: VerticaDialect
def get_all_columns(self, table, schema: Optional[str] = None, **kw: Any):
r"""Return all table columns names within a particular schema."""
return self.dialect.get_all_columns(
self.bind, table, schema, info_cache=self.info_cache, **kw
)
def get_table_comment(self, table_name: str, schema: Optional[str] = None, **kw):
"""Return comment of a table in a schema."""
return self.dialect.get_table_comment(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def get_view_columns(
self, view: Optional[str] = None, schema: Optional[str] = None, **kw: Any
):
r"""Return all view columns names within a particular schema."""
return self.dialect.get_view_columns(
self.bind, view, schema, info_cache=self.info_cache, **kw
)
def get_view_comment(
self, view: Optional[str] = None, schema: Optional[str] = None, **kw
):
r"""Return view comments within a particular schema."""
return self.dialect.get_view_comment(
self.bind, view, schema, info_cache=self.info_cache, **kw
)
class VerticaDialect(default.DefaultDialect):
"""Vertica dialect."""
name = "vertica"
inspector = VerticaInspector
def __init__(self, json_serializer=None, json_deserializer=None, **kwargs):
"""Init object."""
default.DefaultDialect.__init__(self, **kwargs)
self._json_deserializer = json_deserializer
self._json_serializer = json_serializer
def initialize(self, connection):
"""Init dialect."""
super().initialize(connection)
def _get_default_schema_name(self, connection):
return connection.scalar(sql.text("SELECT current_schema()"))
def _get_server_version_info(self, connection):
v = connection.scalar(sql.text("SELECT version()"))
m = re.match(r".*Vertica Analytic Database v(\d+)\.(\d+)\.(\d)+.*", v)
if not m:
raise AssertionError(
"Could not determine version from string '%(ver)s'" % {"ver": v}
)
return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
def create_connect_args(self, url):
"""Create args of connection."""
opts = url.translate_connect_args(username="user")
opts.update(url.query)
return [], opts
def has_table(self, connection, table_name, schema=None):
"""Check availability of a table."""
return False
def has_sequence(self, connection, sequence_name, schema=None):
"""Check availability of a sequence."""
return False
def has_type(self, connection, type_name):
"""Check availability of a type."""
return False
def get_schema_names(self, connection, **kw):
"""Return names of all schemas."""
return []
def get_table_comment(self, connection, table_name, schema=None, **kw):
"""Return comment of a table in a schema."""
return {"text": table_name}
def get_table_names(self, connection, schema=None, **kw):
"""Get names of tables in a schema."""
return []
def get_temp_table_names(self, connection, schema=None, **kw):
"""Get names of temp tables in a schema."""
return []
def get_view_names(self, connection, schema=None, **kw):
"""Get names of views in a schema."""
return []
def get_view_definition(self, connection, view_name, schema=None, **kw):
"""Get definition of views in a schema."""
return view_name
def get_temp_view_names(self, connection, schema=None, **kw):
"""Get names of temp views in a schema."""
return []
def get_unique_constraints(self, connection, table_name, schema=None, **kw):
"""Get unique constrains of a table in a schema."""
return []
def get_check_constraints(self, connection, table_name, schema=None, **kw):
"""Get checks of a table in a schema."""
return []
def normalize_name(self, name):
"""Normalize name."""
name = name and name.rstrip()
if name is None:
return None
return name.lower()
def denormalize_name(self, name):
"""Denormalize name."""
return name
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""Get poreignn keys of a table in a schema."""
return []
def get_indexes(self, connection, table_name, schema, **kw):
"""Get indexes of a table in a schema."""
return []
def visit_create_index(self, create):
"""Disable index creation since that's not a thing in Vertica."""
return None
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
"""Get primary keye of a table in a schema."""
return None
def get_all_columns(self, connection, table, schema=None, **kw):
"""Get all columns of a table in a schema."""
return []
def get_columns(self, connection, table_name, schema=None, **kw):
"""Get all columns of a table in a schema."""
return self.get_all_columns(connection, table_name, schema)
def get_view_columns(self, connection, view, schema=None, **kw):
"""Get columns of views in a schema."""
return []
def get_view_comment(self, connection, view, schema=None, **kw):
"""Get comment of view."""
return {"text": view}

View File

@ -0,0 +1,23 @@
"""Vertica dialect."""
from __future__ import absolute_import, division, print_function
from .base import VerticaDialect as BaseVerticaDialect
# noinspection PyAbstractClass, PyClassHasNoInit
class VerticaDialect(BaseVerticaDialect):
"""Vertica dialect class."""
driver = "vertica_python"
# TODO: support SQL caching, for more info see:
# https://docs.sqlalchemy.org/en/14/core/connections.html#caching-for-third-party-dialects
supports_statement_cache = False
# No lastrowid support. TODO support SELECT LAST_INSERT_ID();
postfetch_lastrowid = False
@classmethod
def dbapi(cls):
"""Get Driver."""
vertica_python = __import__("vertica_python")
return vertica_python

View File

@ -23,6 +23,7 @@ class DBType(Enum):
Oracle = DbInfo("oracle")
MSSQL = DbInfo("mssql")
Postgresql = DbInfo("postgresql")
Vertica = DbInfo("vertica")
Clickhouse = DbInfo("clickhouse")
StarRocks = DbInfo("starrocks")
Spark = DbInfo("spark", True)

View File

@ -0,0 +1,78 @@
CREATE SCHEMA case_1_student_manager;
COMMENT ON SCHEMA case_1_student_manager is '学校管理系统';
SET SEARCH_PATH = case_1_student_manager;
CREATE TABLE students (
student_id INT PRIMARY KEY,
student_name VARCHAR(100),
major VARCHAR(100),
year_of_enrollment INT,
student_age INT
);
COMMENT ON TABLE students IS '学生信息表';
COMMENT ON COLUMN students.student_name IS '学生姓名';
COMMENT ON COLUMN students.major IS '专业';
COMMENT ON COLUMN students.year_of_enrollment IS '入学年份';
COMMENT ON COLUMN students.student_age IS '学生年龄';
CREATE TABLE courses (
course_id INT PRIMARY KEY,
course_name VARCHAR(100),
credit FLOAT
);
COMMENT ON TABLE courses IS '课程信息表';
COMMENT ON COLUMN courses.course_name IS '课程名称';
COMMENT ON COLUMN courses.credit IS '学分';
CREATE TABLE scores (
student_id INT,
course_id INT,
score INT,
semester VARCHAR(50),
PRIMARY KEY (student_id, course_id),
FOREIGN KEY (student_id) REFERENCES students(student_id),
FOREIGN KEY (course_id) REFERENCES courses(course_id)
);
COMMENT ON TABLE scores IS '学生成绩表';
COMMENT ON COLUMN scores.score IS '得分';
COMMENT ON COLUMN scores.semester IS '学期';
INSERT INTO students (student_id, student_name, major, year_of_enrollment, student_age) VALUES
(1, '张三', '计算机科学', 2020, 20),
(2, '李四', '计算机科学', 2021, 19),
(3, '王五', '物理学', 2020, 21),
(4, '赵六', '数学', 2021, 19),
(5, '周七', '计算机科学', 2022, 18),
(6, '吴八', '物理学', 2020, 21),
(7, '郑九', '数学', 2021, 19),
(8, '孙十', '计算机科学', 2022, 18),
(9, '刘十一', '物理学', 2020, 21),
(10, '陈十二', '数学', 2021, 19);
INSERT INTO courses (course_id, course_name, credit) VALUES
(1, '计算机基础', 3),
(2, '数据结构', 4),
(3, '高等物理', 3),
(4, '线性代数', 4),
(5, '微积分', 5),
(6, '编程语言', 4),
(7, '量子力学', 3),
(8, '概率论', 4),
(9, '数据库系统', 4),
(10, '计算机网络', 4);
INSERT INTO scores (student_id, course_id, score, semester) VALUES
(1, 1, 90, '2020年秋季'),
(1, 2, 85, '2021年春季'),
(2, 1, 88, '2021年秋季'),
(2, 2, 90, '2022年春季'),
(3, 3, 92, '2020年秋季'),
(3, 4, 85, '2021年春季'),
(4, 3, 88, '2021年秋季'),
(4, 4, 86, '2022年春季'),
(5, 1, 90, '2022年秋季'),
(5, 2, 87, '2023年春季');
COMMIT;

View File

@ -0,0 +1,78 @@
CREATE SCHEMA case_2_ecom;
COMMENT ON SCHEMA case_2_ecom is '电子商务系统';
SET SEARCH_PATH = case_2_ecom;
CREATE TABLE users (
user_id INT PRIMARY KEY,
user_name VARCHAR(100),
user_email VARCHAR(100),
registration_date DATE,
user_country VARCHAR(100)
);
COMMENT ON TABLE users IS '用户信息表';
COMMENT ON COLUMN users.user_name IS '用户名';
COMMENT ON COLUMN users.user_email IS '用户邮箱';
COMMENT ON COLUMN users.registration_date IS '注册日期';
COMMENT ON COLUMN users.user_country IS '用户国家';
CREATE TABLE products (
product_id INT PRIMARY KEY,
product_name VARCHAR(100),
product_price FLOAT
);
COMMENT ON TABLE products IS '商品信息表';
COMMENT ON COLUMN products.product_name IS '商品名称';
COMMENT ON COLUMN products.product_price IS '商品价格';
CREATE TABLE orders (
order_id INT PRIMARY KEY,
user_id INT,
product_id INT,
quantity INT,
order_date DATE,
FOREIGN KEY (user_id) REFERENCES users(user_id),
FOREIGN KEY (product_id) REFERENCES products(product_id)
);
COMMENT ON TABLE orders IS '订单信息表';
COMMENT ON COLUMN orders.quantity IS '数量';
COMMENT ON COLUMN orders.order_date IS '订单日期';
INSERT INTO users (user_id, user_name, user_email, registration_date, user_country) VALUES
(1, 'John', 'john@gmail.com', '2020-01-01', 'USA'),
(2, 'Mary', 'mary@gmail.com', '2021-01-01', 'UK'),
(3, 'Bob', 'bob@gmail.com', '2020-01-01', 'USA'),
(4, 'Alice', 'alice@gmail.com', '2021-01-01', 'UK'),
(5, 'Charlie', 'charlie@gmail.com', '2020-01-01', 'USA'),
(6, 'David', 'david@gmail.com', '2021-01-01', 'UK'),
(7, 'Eve', 'eve@gmail.com', '2020-01-01', 'USA'),
(8, 'Frank', 'frank@gmail.com', '2021-01-01', 'UK'),
(9, 'Grace', 'grace@gmail.com', '2020-01-01', 'USA'),
(10, 'Helen', 'helen@gmail.com', '2021-01-01', 'UK');
INSERT INTO products (product_id, product_name, product_price) VALUES
(1, 'iPhone', 699),
(2, 'Samsung Galaxy', 599),
(3, 'iPad', 329),
(4, 'Macbook', 1299),
(5, 'Apple Watch', 399),
(6, 'AirPods', 159),
(7, 'Echo', 99),
(8, 'Kindle', 89),
(9, 'Fire TV Stick', 39),
(10, 'Echo Dot', 49);
INSERT INTO orders (order_id, user_id, product_id, quantity, order_date) VALUES
(1, 1, 1, 1, '2022-01-01'),
(2, 1, 2, 1, '2022-02-01'),
(3, 2, 3, 2, '2022-03-01'),
(4, 2, 4, 1, '2022-04-01'),
(5, 3, 5, 2, '2022-05-01'),
(6, 3, 6, 3, '2022-06-01'),
(7, 4, 7, 2, '2022-07-01'),
(8, 4, 8, 1, '2022-08-01'),
(9, 5, 9, 2, '2022-09-01'),
(10, 5, 10, 3, '2022-10-01');
COMMIT;

View File

@ -0,0 +1,29 @@
CREATE SCHEMA test_case_info;
COMMENT ON SCHEMA test_case_info is '测试用例信息';
SET SEARCH_PATH = test_case_info;
CREATE TABLE test_cases (
case_id SERIAL /*INT AUTO_INCREMENT*/ PRIMARY KEY,
scenario_name VARCHAR(100),
scenario_description VARCHAR(6500),
test_question VARCHAR(500),
expected_sql VARCHAR(6500),
correct_output VARCHAR(6500)
);
COMMENT ON TABLE test_cases IS '测试用例表';
COMMENT ON COLUMN test_cases.scenario_name IS '场景名称';
COMMENT ON COLUMN test_cases.scenario_description IS '场景描述';
COMMENT ON COLUMN test_cases.test_question IS '测试问题';
COMMENT ON COLUMN test_cases.expected_sql IS '预期SQL';
COMMENT ON COLUMN test_cases.correct_output IS '正确输出';
INSERT INTO test_cases (scenario_name, scenario_description, test_question, expected_sql, correct_output) VALUES
('学校管理系统', '测试SQL助手的联合查询条件查询和排序功能', '查询所有学生的姓名,专业和成绩,按成绩降序排序', 'SELECT students.student_name, students.major, scores.score FROM students JOIN scores ON students.student_id = scores.student_id ORDER BY scores.score DESC;', '返回所有学生的姓名,专业和成绩,按成绩降序排序的结果'),
('学校管理系统', '测试SQL助手的联合查询条件查询和排序功能', '查询计算机科学专业的学生的平均成绩', 'SELECT AVG(scores.score) as avg_score FROM students JOIN scores ON students.student_id = scores.student_id WHERE students.major = ''计算机科学'';', '返回计算机科学专业学生的平均成绩'),
('学校管理系统', '测试SQL助手的联合查询条件查询和排序功能', '查询哪些学生在2023年秋季学期的课程学分总和超过15', 'SELECT students.student_name FROM students JOIN scores ON students.student_id = scores.student_id JOIN courses ON scores.course_id = courses.course_id WHERE scores.semester = ''2023年秋季'' GROUP BY students.student_id HAVING SUM(courses.credit) > 15;', '返回在2023年秋季学期的课程学分总和超过15的学生的姓名'),
('电商系统', '测试SQL助手的数据聚合和分组功能', '查询每个用户的总订单数量', 'SELECT users.user_name, COUNT(orders.order_id) as order_count FROM users JOIN orders ON users.user_id = orders.user_id GROUP BY users.user_id;', '返回每个用户的总订单数量'),
('电商系统', '测试SQL助手的数据聚合和分组功能', '查询每种商品的总销售额', 'SELECT products.product_name, SUM(products.product_price * orders.quantity) as total_sales FROM products JOIN orders ON products.product_id = orders.product_id GROUP BY products.product_id;', '返回每种商品的总销售额'),
('电商系统', '测试SQL助手的数据聚合和分组功能', '查询2023年最受欢迎的商品订单数量最多的商品', 'SELECT products.product_name FROM products JOIN orders ON products.product_id = orders.product_id WHERE YEAR(orders.order_date) = 2023 GROUP BY products.product_id ORDER BY COUNT(orders.order_id) DESC LIMIT 1;', '返回2023年最受欢迎的商品订单数量最多的商品的名称');
COMMIT;

View File

@ -7,6 +7,7 @@ The list of data sources we currently support is as follows.
| ------------------------------------------------------------------------------ |---------| ------------------------------------------- |
| [MySQL](https://www.mysql.com/) | Yes | MySQL is the world's most popular open source database. |
| [PostgresSQL](https://www.postgresql.org/) | Yes | The World's Most Advanced Open Source Relational Database |
| [Vertica](https://www.vertica.com/) | Yes | Vertica is a strongly consistent, ACID-compliant, SQL data warehouse, built for the scale and complexity of todays data-driven world. |
| [Spark](https://github.com/apache/spark) | Yes | Unified Engine for large-scale data analytics |
| [DuckDB](https://github.com/duckdb/duckdb) | Yes | DuckDB is an in-process SQL OLAP database management system |
| [Sqlite](https://github.com/sqlite/sqlite) | Yes | |
@ -22,4 +23,4 @@ The list of data sources we currently support is as follows.
| [Elasticsearch](https://github.com/elastic/elasticsearch) | No | Free and Open, Distributed, RESTful Search Engine |
| [OceanBase](https://github.com/OceanBase) | No | OceanBase is a distributed relational database. |
| [TiDB](https://github.com/pingcap/tidb) | No | TODO |
| [StarRocks](https://github.com/StarRocks/starrocks) | Yes | StarRocks is a next-gen, high-performance analytical data warehouse |
| [StarRocks](https://github.com/StarRocks/starrocks) | Yes | StarRocks is a next-gen, high-performance analytical data warehouse |

View File

@ -609,6 +609,7 @@ def all_datasource_requires():
"thrift",
"thrift_sasl",
"neo4j",
"vertica_python",
]

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.3 KiB

View File

@ -8,6 +8,7 @@ export type DBType =
| 'clickhouse'
| 'oracle'
| 'postgresql'
| 'vertica'
| 'db2'
| 'access'
| 'mongodb'

View File

@ -64,6 +64,11 @@ export const dbMapper: Record<DBType, { label: string; icon: string; desc: strin
icon: '/icons/postgresql.png',
desc: 'Powerful open-source relational database with extensibility and SQL standards.',
},
vertica: {
label: 'Vertica',
icon: '/icons/vertica.png',
desc: 'Vertica is a strongly consistent, ACID-compliant, SQL data warehouse, built for the scale and complexity of todays data-driven world.',
},
spark: { label: 'Spark', icon: '/icons/spark.png', desc: 'Unified engine for large-scale data analytics.' },
hive: { label: 'Hive', icon: '/icons/hive.png', desc: 'A distributed fault-tolerant data warehouse system.' },
space: { label: 'Space', icon: '/icons/knowledge.png', desc: 'knowledge analytics.' },