doc:update deploy doc

This commit is contained in:
aries_ckt 2023-11-03 14:51:25 +08:00
parent 6fe7bfd63d
commit 0874fa2526
12 changed files with 2000 additions and 125 deletions

928
docs/_static/css/custom.css vendored Normal file
View File

@ -0,0 +1,928 @@
/* override default colors used in the Sphinx theme */
:root {
--tabs-color-label-active: #0475DE;
--tabs-color-label-hover: #0475DE;
--buttons-color-blue: #0475DE;
--tabs-color-label-inactive: #9E9E9E;
--tabs-color-overline: #e0e0e0;
--tabs-color-underline: #e0e0e0;
--border-color-gray: #e0e0e0;
--background-color-light-gray:#fafafa;
--background-color-disabled: #9E9E9E;
--pst-color-link: 4, 117, 222;
--pst-color-primary: 4, 117, 222;
--pst-color-text-secondary: #616161;
--blue: #0475DE;
--sidebar-top: 5em;
}
/* Remove flicker for announcement top bar replacement */
.header-item.announcement {
background-color: white;
color: white;
padding: 0;
}
/* Make the book theme secondary nav stick below the new main top nav */
.header-article {
top: 58px;
z-index: 900 !important;
}
.toctree-l1.has-children {
font-weight: bold;
}
.toctree-l2 {
font-weight: normal;
}
div.navbar-brand-box {
padding-top: 4em;
}
td p {
margin-left: 0.75rem;
}
table.longtable.table.autosummary {
table-layout: fixed;
}
.table.autosummary td {
width: 100%;
}
tr.row-odd {
background-color: #f9fafb;
}
/* For Algolia search box
* overflow-y: to flow-over horizontally into main content
* height: to prevent topbar overlap
*/
#site-navigation {
overflow-y: auto;
height: calc(100vh - var(--sidebar-top));
position: sticky;
top: var(--sidebar-top) !important;
}
/* Center the algolia search bar*/
#search-input {
text-align: center;
}
.algolia-autocomplete {
width: 100%;
margin: auto;
}
/* Hide confusing "<-" back arrow in navigation for larger displays */
@media (min-width: 768px) {
#navbar-toggler {
display: none;
}
}
/* Make navigation scrollable on mobile, by making algolia not overflow */
@media (max-width: 768px) {
#site-navigation {
overflow-y: scroll;
}
.algolia-autocomplete .ds-dropdown-menu{
min-width: 250px;
}
}
/* sphinx-panels overrides the content width to 1140 for large displays.*/
@media (min-width: 1200px) {
.container, .container-lg, .container-md, .container-sm, .container-xl {
max-width: 1400px !important;
}
}
.bottom-right-promo-banner {
position: fixed;
bottom: 100px;
right: 20px;
width: 270px;
}
@media (max-width: 1500px) {
.bottom-right-promo-banner {
display: none;
}
}
@media screen and (max-width: 767px) {
.remove-mobile {
display: none;
}
}
@media screen and (max-width: 767px) {
.row-2-column {
flex-direction: column;
margin-top: 20px;
}
}
/* Make Algolia search box scrollable */
.algolia-autocomplete .ds-dropdown-menu {
height: 60vh !important;
overflow-y: scroll !important;
}
.bd-sidebar__content {
overflow-y: unset !important;
}
.bd-sidebar__top {
display: flex;
flex-direction: column;
}
.bd-sidebar li {
position: relative;
word-wrap: break-word;
}
nav.bd-links {
flex: 1;
}
nav.bd-links::-webkit-scrollbar-thumb {
background-color: #ccc;
}
nav.bd-links::-webkit-scrollbar {
width: 5px;
}
dt:target, span.highlighted {
background-color: white;
}
div.sphx-glr-bigcontainer {
display: inline-block;
width: 100%;
}
td.tune-colab,
th.tune-colab {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
/* Adjustment to Sphinx Book Theme */
.table td {
/* Remove row spacing on the left */
padding-left: 0;
}
.table thead th {
/* Remove row spacing on the left */
padding-left: 0;
}
img.inline-figure {
/* Override the display: block for img */
display: inherit !important;
}
#version-warning-banner {
/* Make version warning clickable */
z-index: 1;
margin-left: 0;
/* 20% is for ToC rightbar */
/* 2 * 1.5625em is for horizontal margins */
width: calc(100% - 20% - 2 * 1.5625em);
}
/* allow scrollable images */
.figure {
max-width: 100%;
overflow-x: auto;
}
img.horizontal-scroll {
max-width: none;
}
.clear-both {
clear: both;
min-height: 100px;
margin-top: 15px;
}
.buttons-float-left {
width: 150px;
float: left;
}
.buttons-float-right {
width: 150px;
float: right;
}
.card-body {
padding: 0.5rem !important;
}
/* custom css for pre elements */
pre {
/* Wrap code blocks instead of horizontal scrolling. */
white-space: pre-wrap;
box-shadow: none;
border-color: var(--border-color-gray);
background-color: var(--background-color-light-gray);
border-radius:0.25em;
}
/* notebook formatting */
.cell .cell_output {
max-height: 250px;
overflow-y: auto;
font-weight: bold;
}
/* Yellow doesn't render well on light background */
.cell .cell_output pre .-Color-Yellow {
color: #785840;
}
/* Newlines (\a) and spaces (\20) before each parameter */
.sig-param::before {
content: "\a\20\20\20\20";
white-space: pre;
}
/* custom css for outlined buttons */
.btn-outline-info:hover span, .btn-outline-primary:hover span {
color: #fff;
}
.btn-outline-info, .btn-outline-primary{
border-color: var(--buttons-color-blue);
}
.btn-outline-info:hover, .btn-outline-primary:hover{
border-color: var(--buttons-color-blue);
background-color: var(--buttons-color-blue);
}
.btn-outline-info.active:not(:disabled):not(.disabled), .btn-outline-info:not(:disabled):not(.disabled):active, .show>.btn-outline-info.dropdown-toggle {
border-color: var(--buttons-color-blue);
background-color: var(--buttons-color-blue);
color: #fff;
}
.btn-info, .btn-info:hover, .btn-info:focus {
border-color: var(--buttons-color-blue);
background-color: var(--buttons-color-blue);
}
.btn-info:hover{
opacity: 90%;
}
.btn-info:disabled{
border-color: var(--background-color-disabled);
background-color: var(--background-color-disabled);
opacity: 100%;
}
.btn-info.active:not(:disabled):not(.disabled), .btn-info:not(:disabled):not(.disabled):active, .show>.btn-info.dropdown-toggle {
border-color: var(--buttons-color-blue);
background-color: var(--buttons-color-blue);
}
.topnav {
background-color: white;
border-bottom: 1px solid rgba(0, 0, 0, .1);
display: flex;
align-items: center;
}
/* Content wrapper for the unified nav link / menus */
.top-nav-content {
max-width: 1400px;
width: 100%;
margin-left: auto;
margin-right: auto;
padding: 0 1.5rem;
display: flex;
align-items: center;
justify-content: space-between;
}
@media (max-width: 900px) {
/* If the window is too small, hide the custom sticky navigation bar at the top of the page.
Also make the pydata-sphinx-theme nav bar, which usually sits below the top nav bar, stick
to the top of the page.
*/
.top-nav-content {
display: none;
}
div.header-article.row.sticky-top.noprint {
position: sticky;
top: 0;
}
}
/* Styling the links and menus in the top nav */
.top-nav-content a {
text-decoration: none;
color: black;
font-size: 17px;
}
.top-nav-content a:hover {
color: #007bff;
}
/* The left part are the links and menus */
.top-nav-content > .left {
display: flex;
white-space: nowrap;
}
.top-nav-content .left > * {
margin-right: 8px;
}
.top-nav-content .left > a,
.top-nav-content .left > .menu > a {
text-align: center;
padding: 14px 16px;
border-bottom: 2px solid white;
}
.top-nav-content .menu:hover > a,
.top-nav-content .left > a:hover {
border-bottom: 2px solid #007bff;
}
/* Special styling for the Ray logo */
.top-nav-content .left > a.ray-logo {
width: 90px;
padding: 10px 0;
}
.top-nav-content .left > a.ray-logo:hover {
border-bottom: 2px solid white;
}
/* Styling the dropdown menus */
.top-nav-content .menu {
display: flex;
}
.top-nav-content .menu > a > .down-caret {
margin-left: 8px;
}
.top-nav-content .menu > ul {
display: none;
}
.top-nav-content > button.try-anyscale > span {
margin: 0 12px;
}
.top-nav-content .menu:hover > ul {
display: flex;
flex-direction: column;
align-items: flex-start;
box-shadow: 0 5px 15px 0 rgb(0 0 0 / 10%);
padding: 15px;
width: 330px;
position: absolute;
z-index: 2000;
background-color: white;
top: 58px;
}
.top-nav-content .menu:hover > ul > li {
list-style: none;
padding: 5px 0;
}
.top-nav-content .menu:hover > ul > li span {
display: block;
}
.top-nav-content .menu:hover > ul > li span.secondary {
color: #787878;
}
/* Styling the "Try Anyscale" button */
.top-nav-content > button.try-anyscale {
float: right;
border-radius: 6px;
background-color: #e7f2fa;
padding-left: 12px;
padding-right: 12px;
margin-left: 12px;
height: 40px;
border: none;
white-space: nowrap;
}
@media (max-width: 1000px) {
.top-nav-content > button.try-anyscale {
display: none;
}
}
/* custom css for tabs*/
.tabbed-set>label,.tabbed-set>label:hover {
border-bottom: 1px solid var(--border-color-gray);
color:var(--tabs-color-label-inactive);
font-weight: 500;
}
.tabbed-set>input:checked+label{
border-bottom: 0.125em solid;
color:var(--tabs-color-label-active);
}
.tabbed-label{
margin-bottom:0;
}
/* custom css for jupyter cells */
div.cell div.cell_input{
border: 1px var(--border-color-gray) solid;
background-color: var(--background-color-light-gray);
border-radius:0.25em;
border-left-color: var(--green);
border-left-width: medium;
}
/* custom css for table */
table {
border-color: var(--border-color-gray);
}
/* custom css for topic component */
div.topic{
border: 1px solid var(--border-color-gray);
border-radius:0.25em;
}
.topic {
background-color: var(--background-color-light-gray);
}
/* custom css for card component */
.card{
border-color: var(--border-color-gray);
}
.card-footer{
background-color: var(--background-color-light-gray);
border-top-color: var(--border-color-gray);
}
/* custom css for section navigation component */
.bd-toc nav>.nav {
border-left-color: var(--border-color-gray);
}
/* custom css for up and down arrows in collapsible cards */
details.dropdown .summary-up, details.dropdown .summary-down {
top: 1em;
}
/* remove focus border in collapsible admonition buttons */
.toggle.admonition button.toggle-button:focus {
outline: none;
}
/* custom css for shadow class */
.shadow {
box-shadow: 0 0.2rem 0.5rem rgb(0 0 0 / 5%), 0 0 0.0625rem rgb(0 0 0 / 10%) !important;
}
/* custom css for text area */
textarea {
border-color: var(--border-color-gray);
}
/* custom css for footer */
footer {
margin-top: 1rem;
padding:1em 0;
border-top-color: var(--border-color-gray);
}
.footer p{
color: var(--pst-color-text-secondary);
}
/* Make the hover color of tag/gallery buttons differ from "active" */
.tag.btn-outline-primary:hover {
background-color: rgba(20, 99, 208, 0.62) !important;
}
span.rst-current-version > span.fa.fa-book {
/* Move the book icon away from the top right
* corner of the version flyout menu */
margin: 10px 0px 0px 5px;
}
/*Extends the docstring signature box.*/
.rst-content dl:not(.docutils) dt {
display: block;
padding: 10px;
word-wrap: break-word;
padding-right: 100px;
}
/*Lists in an admonition note do not have awkward whitespace below.*/
.rst-content .admonition-note .section ul {
margin-bottom: 0;
}
/*Properties become blue (classmethod, staticmethod, property)*/
.rst-content dl dt em.property {
color: #2980b9;
text-transform: uppercase;
}
.rst-content .section ol p,
.rst-content .section ul p {
margin-bottom: 0;
}
/* Adjustment to Version block */
.rst-versions {
z-index: 1200 !important;
}
.image-header {
display: flex;
flex-direction: row;
align-items: center;
padding-left: 16px;
padding-right:16px;
gap: 16px;
}
.info-box {
box-shadow: 0px 4px 20px rgba(0, 0, 0, 0.05);
border-radius: 8px;
padding: 20px;
}
.info-box:hover{
box-shadow: 0px 4px 20px rgba(0, 0, 0, 0.1);
}
.no-underline{
text-decoration: none;
}
.no-underline:hover{
text-decoration: none;
}
.icon-hover:hover{
height: 30px ;
width: 30px;
}
.info-box-2 {
background-color: #F9FAFB;
border-radius: 8px;
padding-right: 16px;
padding-left: 16px;
padding-bottom: 24px;
padding-top: 4px;
}
.bold-link {
color: #000000 !important;
font-weight: 600;
}
.community-box {
border: 1px solid #D2DCE6;
border-radius: 8px;
display: flex;
margin-bottom: 16px;
}
.community-box:hover {
box-shadow: 0px 4px 20px rgba(0, 0, 0, 0.05);
text-decoration: none;
}
.community-box p {
margin-top: 1rem !important;
}
.tab-pane pre {
margin: 0;
padding: 0;
max-height: 252px;
overflow-y: auto;
}
.grid-container {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(300px,1fr));
grid-gap: 16px;
}
.grid-item {
padding: 20px;
}
.nav-pills {
background-color: #F9FAFB;
color: #000000;
padding: 8px;
border-bottom:none;
border-radius: 8px;
}
.nav-pills .nav-link.active {
background-color: #FFFFFF !important;
box-shadow: 0px 3px 14px 2px rgba(3,28,74,0.12);
border-radius: 8px;
padding: 20px;
color: #000000;
font-weight: 500;
}
.searchDiv {
width: 100%;
position: relative;
display: block;
}
.searchTerm {
width: 80%;
border: 2px solid var(--blue);
padding: 5px;
height: 45px;
border-radius: 5px;
outline: none;
}
.searchButton {
width: 40px;
height: 45px;
border: 1px solid var(--blue);
background: var(--blue);
color: #fff;
border-radius: 5px;
cursor: pointer;
font-size: 20px;
}
/*Resize the wrap to see the search bar change!*/
.searchWrap {
width: 100%;
position: relative;
margin: 15px;
top: 50%;
left: 50%;
transform: translate(-50%, -10%);
text-align: center;
}
.sd-card {
border: none !important;
}
.tag {
margin-bottom: 5px;
font-size: small;
}
/* Override float positioning of next-prev buttons so that
they take up space normally, and we can put other stuff at
the bottom of the page. */
.prev-next-area {
display: flex;
flex-direction: row;
}
.prev-next-area a.left-prev {
margin-right: auto;
width: fit-content;
float: none;
}
.prev-next-area a.right-next {
margin-left: auto;
width: fit-content;
float: none;
}
/* CSAT widgets */
#csat-inputs {
display: flex;
flex-direction: row;
align-items: center;
}
.csat-hidden {
display: none !important;
}
#csat-feedback-label {
color: #000;
font-weight: 500;
}
.csat-button {
margin-left: 16px;
padding: 8px 16px 8px 16px;
border-radius: 4px;
border: 1px solid #D2DCE6;
background: #FFF;
display: flex;
flex-direction: row;
align-items: center;
justify-content: center;
cursor: pointer;
width: 85px;
}
#csat-textarea-group {
display: flex;
flex-direction: column;
}
#csat-submit {
margin-left: auto;
font-weight: 700;
border: none;
margin-top: 12px;
cursor: pointer;
}
#csat-feedback-received {
display: flex;
flex-direction: row;
align-items: center;
justify-content: center;
}
.csat-button-active {
border: 1px solid #000;
}
.csat-icon {
margin-right: 4px;
}
footer.col.footer {
display: flex;
flex-direction: row;
}
footer.col.footer > p {
margin-left: auto;
}
#csat {
min-width: 60%;
}
#csat-textarea {
resize: none;
}
/* Ray Assistant */
.container-xl.blurred {
filter: blur(5px);
}
.chat-widget {
position: fixed;
bottom: 10px;
right: 10px;
z-index: 1000;
}
.chat-popup {
display: none;
position: fixed;
top: 20%;
left: 50%;
transform: translate(-50%, -20%);
width: 50%;
height: 70%;
background-color: white;
border: 1px solid #ccc;
border-radius: 10px;
box-shadow: 0 5px 10px rgba(0,0,0,0.1);
z-index: 1001;
max-height: 1000px;
overflow: hidden;
padding-bottom: 40px;
}
.chatFooter {
position: absolute;
bottom: 0;
right: 0;
width: 100%;
background-color: #f8f9fa;
}
#openChatBtn {
background-color: #000;
color: #fff;
width: 70px;
height: 70px;
border-radius: 10px;
border: none;
display: flex;
align-items: center;
justify-content: center;
}
#closeChatBtn {
border: none;
background-color: transparent;
color: #000;
font-size: 1.2em;
}
#closeChatBtn:hover {
color: #888;
}
.chatHeader {
display: flex;
justify-content: space-between;
align-items: center;
}
.chatContentContainer {
padding: 15px;
max-height: calc(100% - 80px);
overflow-y: auto;
}
.chatContentContainer input {
margin-top: 10px;
margin-bottom: 10px;
}
#result{
padding: 15px;
border-radius: 10px;
margin-top: 10px;
margin-bottom: 10px;
background-color: #f8f9fa;
max-height: calc(100% - 20px);
overflow-y: auto;
}
.chatContentContainer textarea {
flex-grow: 1;
min-width: 50px;
max-height: 40px;
resize: none;
}
.searchBtn {
white-space: nowrap;
}
.input-group {
display: flex;
align-items: stretch;
}
/* Kapa Ask AI button */
#kapa-widget-container figure {
padding: 0 !important;
}
.mantine-Modal-root figure {
padding: 0 !important;
}
@font-face {
font-family: "Linux Biolinum Keyboard";
src: url(../fonts/LinBiolinum_Kah.ttf);
}
.keys {
font-family: "Linux Biolinum Keyboard", sans-serif;
}
.bd-article-container h1, .bd-article-container h2, .bd-article-container h3, .bd-article-container h4, .bd-article-container h5, .bd-article-container p.caption {
color: black;
}

223
docs/_static/css/examples.css vendored Normal file
View File

@ -0,0 +1,223 @@
:root {
--ray-example-gallery-gap-x: 18px;
--ray-example-gallery-gap-y: 22px;
--sidebar-top: 5em;
}
#site-navigation {
width: 330px !important;
border-right: none;
margin-left: 32px;
overflow-y: auto;
max-height: calc(100vh - var(--sidebar-top));
position: sticky;
top: var(--sidebar-top) !important;
z-index: 1000;
}
#site-navigation h5 {
font-size: 16px;
font-weight: 600;
color: #000;
}
#site-navigation h6 {
font-size: 14px;
font-weight: 600;
color: #000;
text-transform: uppercase;
}
/* Hide the default sidebar content */
#site-navigation > div.bd-sidebar__content {
display: none;
}
#site-navigation > div.rtd-footer-container {
display: none;
}
.searchDiv {
margin-bottom: 2em;
}
#searchInput {
width: 100%;
color: #5F6469;
border: 1px solid #D2DCE6;
height: 50px;
border-radius: 4px;
background-color: #F9FAFB;
background-image: url("data:image/svg+xml,%3Csvg width='25' height='25' viewBox='0 0 25 25' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cg id='Systems / search-line' clip-path='url(%23clip0_1_150)'%3E%3Crect width='24' height='24' transform='translate(0.398529 0.0546875)' fill='%23F9FAFB'/%3E%3Cg id='Group'%3E%3Cpath id='Vector' d='M18.4295 16.6717L22.7125 20.9537L21.2975 22.3687L17.0155 18.0857C15.4223 19.3629 13.4405 20.0576 11.3985 20.0547C6.43053 20.0547 2.39853 16.0227 2.39853 11.0547C2.39853 6.08669 6.43053 2.05469 11.3985 2.05469C16.3665 2.05469 20.3985 6.08669 20.3985 11.0547C20.4014 13.0967 19.7068 15.0784 18.4295 16.6717ZM16.4235 15.9297C17.6926 14.6246 18.4014 12.8751 18.3985 11.0547C18.3985 7.18669 15.2655 4.05469 11.3985 4.05469C7.53053 4.05469 4.39853 7.18669 4.39853 11.0547C4.39853 14.9217 7.53053 18.0547 11.3985 18.0547C13.219 18.0576 14.9684 17.3488 16.2735 16.0797L16.4235 15.9297V15.9297Z' fill='%238C9196'/%3E%3C/g%3E%3C/g%3E%3Cdefs%3E%3CclipPath id='clip0_1_150'%3E%3Crect width='24' height='24' fill='white' transform='translate(0.398529 0.0546875)'/%3E%3C/clipPath%3E%3C/defs%3E%3C/svg%3E%0A");
background-repeat: no-repeat;
background-position-x: 0.5em;
background-position-y: center;
background-size: 1.5em;
padding-left: 3em;
}
#searchInput::placeholder {
color: #5F6469;
opacity: 1;
}
.tag {
margin-bottom: 5px;
font-size: small;
color: #000000;
border: 1px solid #D2DCE6;
border-radius: 14px;
display: flex;
flex-direction: row;
align-items: center;
width: fit-content;
gap: 1em;
}
.tag.btn-outline-primary {
color: #000000;
padding: 3px 12px 3px 12px;
line-height: 20px;
}
.tag-btn-wrapper {
display: flex;
flex-direction: row;
flex-wrap: wrap;
gap: 1em;
}
div.sd-container-fluid.docutils > div {
gap: var(--ray-example-gallery-gap-y) var(--ray-example-gallery-gap-x);
display: grid;
grid-template-columns: 1fr;
}
/* Reflow to a 2-column format for normal screens */
@media screen and (min-width: 768px) {
div.sd-container-fluid.docutils > div {
grid-template-columns: 1fr 1fr;
}
}
div.gallery-item {
width: auto;
}
div.gallery-item > div.sd-card {
border-radius: 8px;
box-shadow: 0px 4px 10px 0px rgba(0, 0, 0, 0.05) !important;
}
/* Example gallery "Tutorial" title */
div.sd-card-title > span.sd-bg-success.sd-bg-text-success {
color: #2F80ED !important;
font-weight: 500;
background: linear-gradient(180deg, rgba(25, 177, 226, 0.2) 0%, rgba(0, 109, 255, 0.2) 100%);
background-color: initial !important;
}
/* Example gallery "Code example" title */
div.sd-card-title > span.sd-bg-secondary.sd-bg-text-secondary {
color: #219653 !important;
font-weight: 500;
background: linear-gradient(180deg, rgba(29, 151, 108, 0.2) 0%, rgba(0, 226, 147, 0.2) 100%);
background-color: initial !important;
}
/* Example gallery "Blog" title */
div.sd-card-title > span.sd-bg-primary.sd-bg-text-primary {
color: #F2994A !important;
font-weight: 500;
background: linear-gradient(180deg, rgba(255, 230, 5, 0.2) 0%, rgba(255, 185, 80, 0.2) 100%);
background-color: initial !important;
}
/* Example gallery "Video" title */
div.sd-card-title > span.sd-bg-warning.sd-bg-text-warning {
color: #EB5757 !important;
font-weight: 500;
background: linear-gradient(180deg, rgba(150, 7, 7, 0.2) 0%, rgba(255, 115, 115, 0.2) 100%);
background-color: initial !important;
}
/* Example gallery "Course" title */
div.sd-card-title > span.sd-bg-info.sd-bg-text-info {
color: #7A64FF !important;
font-weight: 500;
background: linear-gradient(180deg, rgba(53, 25, 226, 0.2) 0%, rgba(183, 149, 255, 0.2) 100%);
background-color: initial !important;
}
div.sd-card-body > p.sd-card-text > a {
text-align: initial;
}
div.sd-card-body > p.sd-card-text > a > span {
color: rgb(81, 81, 81);
}
#main-content {
max-width: 100%;
}
#noMatches {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
}
#noMatchesInnerContent {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
}
#noMatches.hidden,.gallery-item.hidden {
display: none !important;
}
.btn-primary {
color: #004293;
background: rgba(61, 138, 233, 0.20);
padding: 3px 12px 3px 12px;
border: 1px solid #D2DCE6;
}
button.try-anyscale {
background-color: initial !important;
width: fit-content;
padding: 0 !important;
margin-left: auto !important;
float: initial !important;
}
button.try-anyscale > svg {
display: none;
}
button.try-anyscale > i {
display: none;
}
button.try-anyscale > span {
margin: 0;
text-decoration-line: underline;
font-weight: 500;
color: #000;
}
.top-nav-content {
justify-content: initial;
}
/* Hide nav bar that has github, fullscreen, and print icons */
div.header-article.row.sticky-top.noprint {
display: none !important;
}
/* Hide the footer with 'prev article' and 'next article' buttons */
.footer-article.hidden {
display: none !important;
}

108
docs/_static/css/termynal.css vendored Normal file
View File

@ -0,0 +1,108 @@
/**
* termynal.js
*
* @author Ines Montani <ines@ines.io>
* @version 0.0.1
* @license MIT
*/
:root {
--color-bg: #252a33;
--color-text: #eee;
--color-text-subtle: #a2a2a2;
}
[data-termynal] {
width: auto;
max-width: 100%;
background: var(--color-bg);
color: var(--color-text);
font-size: 18px;
font-family: 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace;
border-radius: 4px;
padding: 75px 45px 35px;
position: relative;
-webkit-box-sizing: border-box;
box-sizing: border-box;
}
[data-termynal]:before {
content: '';
position: absolute;
top: 15px;
left: 15px;
display: inline-block;
width: 15px;
height: 15px;
border-radius: 50%;
/* A little hack to display the window buttons in one pseudo element. */
background: #d9515d;
-webkit-box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930;
box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930;
}
[data-termynal]:after {
content: 'bash';
position: absolute;
color: var(--color-text-subtle);
top: 5px;
left: 0;
width: 100%;
text-align: center;
}
[data-ty] {
display: block;
line-height: 2;
}
[data-ty]:before {
/* Set up defaults and ensure empty lines are displayed. */
content: '';
display: inline-block;
vertical-align: middle;
}
[data-ty="input"]:before,
[data-ty-prompt]:before {
margin-right: 0.75em;
color: var(--color-text-subtle);
}
[data-ty="input"]:before {
content: '$';
}
[data-ty][data-ty-prompt]:before {
content: attr(data-ty-prompt);
}
[data-ty-cursor]:after {
content: attr(data-ty-cursor);
font-family: monospace;
margin-left: 0.5em;
-webkit-animation: blink 1s infinite;
animation: blink 1s infinite;
}
a[data-terminal-control] {
text-align: right;
display: block;
color: #aebbff;
}
/* Cursor animation */
@-webkit-keyframes blink {
50% {
opacity: 0;
}
}
@keyframes blink {
50% {
opacity: 0;
}
}

23
docs/_static/css/use_cases.css vendored Normal file
View File

@ -0,0 +1,23 @@
.query-param-ref-wrapper {
display: flex;
justify-content: center;
align-items: center;
border: 1px solid #8C9196;
border-radius: 8px;
}
.example-gallery-link {
padding: 1em 2em 1em 2em;
text-decoration: none !important;
color: black !important;
display: flex;
align-items: center;
}
/* Shooting star icon next to gallery links */
a.example-gallery-link::before {
content: url("data:image/svg+xml,%3Csvg width='24' height='24' viewBox='0 0 24 24' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cg id='Group'%3E%3Cpath id='Vector' d='M15.199 9.945C14.7653 9.53412 14.4863 8.98641 14.409 8.394L14.006 5.311L11.276 6.797C10.7511 7.08302 10.1436 7.17943 9.55597 7.07L6.49997 6.5L7.06997 9.556C7.1794 10.1437 7.08299 10.7511 6.79697 11.276L5.31097 14.006L8.39397 14.409C8.98603 14.4865 9.53335 14.7655 9.94397 15.199L12.082 17.456L13.418 14.649C13.6744 14.1096 14.1087 13.6749 14.648 13.418L17.456 12.082L15.199 9.945ZM15.224 15.508L13.011 20.158C12.9691 20.2459 12.9065 20.3223 12.8285 20.3806C12.7505 20.4389 12.6594 20.4774 12.5633 20.4926C12.4671 20.5079 12.3686 20.4995 12.2764 20.4682C12.1842 20.4369 12.101 20.3836 12.034 20.313L8.49197 16.574C8.39735 16.4742 8.27131 16.41 8.13497 16.392L3.02797 15.724C2.93149 15.7113 2.83954 15.6753 2.76006 15.6191C2.68058 15.563 2.61596 15.4883 2.57177 15.4016C2.52758 15.3149 2.50514 15.2187 2.5064 15.1214C2.50765 15.0241 2.53256 14.9285 2.57897 14.843L5.04097 10.319C5.10642 10.198 5.12831 10.0582 5.10297 9.923L4.15997 4.86C4.14207 4.76417 4.14778 4.66541 4.17662 4.57229C4.20546 4.47916 4.25656 4.39446 4.3255 4.32553C4.39444 4.25659 4.47913 4.20549 4.57226 4.17665C4.66539 4.14781 4.76414 4.14209 4.85997 4.16L9.92297 5.103C10.0582 5.12834 10.198 5.10645 10.319 5.041L14.843 2.579C14.9286 2.53257 15.0242 2.50769 15.1216 2.50648C15.219 2.50528 15.3152 2.52781 15.4019 2.57211C15.4887 2.61641 15.5633 2.68116 15.6194 2.76076C15.6755 2.84036 15.7114 2.93242 15.724 3.029L16.392 8.135C16.4099 8.27134 16.4742 8.39737 16.574 8.492L20.313 12.034C20.3836 12.101 20.4369 12.1842 20.4682 12.2765C20.4995 12.3687 20.5079 12.4671 20.4926 12.5633C20.4774 12.6595 20.4389 12.7505 20.3806 12.8285C20.3223 12.9065 20.2459 12.9691 20.158 13.011L15.508 15.224C15.3835 15.2832 15.2832 15.3835 15.224 15.508ZM16.021 17.435L17.435 16.021L21.678 20.263L20.263 21.678L16.021 17.435Z' fill='black'/%3E%3C/g%3E%3C/svg%3E%0A");
display: flex;
align-items: center;
margin-right: 0.5em;
}

View File

@ -14,7 +14,7 @@ project = "DB-GPT"
copyright = "2023, csunny"
author = "csunny"
version = "👏👏 0.4.0"
version = "👏👏 0.4.1"
html_title = project + " " + version
# -- General configuration ---------------------------------------------------
@ -67,6 +67,14 @@ locales_dirs = ["./locales/"]
gettext_compact = False
gettext_uuid = True
def setup(app):
app.add_css_file("css/custom.css")
app.add_css_file("css/examples.css")
app.add_css_file("css/termynal.css")
# app.add_css_file("css/use_cases.css")
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output

View File

@ -5,11 +5,6 @@ Installation From Source
To get started, install DB-GPT with the following steps.
DB-GPT can be deployed on servers with low hardware requirements or on servers with high hardware requirements.
You can install DB-GPT by Using third-part LLM REST API Service OpenAI, Azure.
And you can also install DB-GPT by deploy LLM Service by download LLM model.
1.Preparation
-----------------
@ -23,7 +18,7 @@ And you can also install DB-GPT by deploy LLM Service by download LLM model.
We use Sqlite as default database, so there is no need for database installation. If you choose to connect to other databases, you can follow our tutorial for installation and configuration.
For the entire installation process of DB-GPT, we use the miniconda3 virtual environment. Create a virtual environment and install the Python dependencies.
:ref: `https://docs.conda.io/en/latest/miniconda.html<How to install Miniconda>`
`How to install Miniconda <https://docs.conda.io/en/latest/miniconda.html>`_
.. code-block:: shell
@ -43,6 +38,21 @@ DB-GPT can be deployed on servers with low hardware requirements or on servers w
If you are low hardware requirements you can install DB-GPT by Using third-part LLM REST API Service OpenAI, Azure, tongyi.
.. tip::
As our project has the ability to achieve OpenAI performance of over 85%,
.. note::
Notice make sure you have install git-lfs
centos:yum install git-lfs
ubuntu:apt-get install git-lfs
macos:brew install git-lfs
.. tabs::
.. tab:: OpenAI
@ -73,7 +83,7 @@ If you are low hardware requirements you can install DB-GPT by Using third-part
.. tab:: Vicuna
([Vicuna-v1.5](https://huggingface.co/lmsys/vicuna-13b-v1.5) based on llama-2 has been released, we recommend you set `LLM_MODEL=vicuna-13b-v1.5` to try this model)
`Vicuna-v1.5 <https://huggingface.co/lmsys/vicuna-13b-v1.5>`_ based on llama-2 has been released, we recommend you set `LLM_MODEL=vicuna-13b-v1.5` to try this model)
.. list-table:: vicuna-v1.5 hardware requirements
:widths: 50 50 50
@ -96,17 +106,6 @@ If you are low hardware requirements you can install DB-GPT by Using third-part
- 20 GB
.. note::
Notice make sure you have install git-lfs
centos:yum install git-lfs
ubuntu:apt-get install git-lfs
macos:brew install git-lfs
.. code-block:: shell
cd DB-GPT
@ -151,16 +150,6 @@ If you are low hardware requirements you can install DB-GPT by Using third-part
- 8-bit
- 20 GB
.. note::
Notice make sure you have install git-lfs
centos:yum install git-lfs
ubuntu:apt-get install git-lfs
macos:brew install git-lfs
.. code-block:: shell
@ -189,16 +178,6 @@ If you are low hardware requirements you can install DB-GPT by Using third-part
.. tab:: ChatGLM
.. note::
Notice make sure you have install git-lfs
centos:yum install git-lfs
ubuntu:apt-get install git-lfs
macos:brew install git-lfs
.. code-block:: shell
@ -302,7 +281,7 @@ If you are low hardware requirements you can install DB-GPT by Using third-part
.. tab:: llama.cpp
DB-GPT already supports [llama.cpp](https://github.com/ggerganov/llama.cpp) via [llama-cpp-python](https://github.com/abetlen/llama-cpp-python).
DB-GPT already supports `llama.cpp <https://github.com/ggerganov/llama.cpp>`_ via `llama-cpp-python <https://github.com/abetlen/llama-cpp-python>`_ .
**Preparing Model Files**
@ -338,7 +317,7 @@ If you are low hardware requirements you can install DB-GPT by Using third-part
LLM_MODEL=llama-cpp
llama_cpp_prompt_template=vicuna_v1.1
Then you can run it according to [Run](https://db-gpt.readthedocs.io/en/latest/getting_started/install/deploy/deploy.html#run).
Then you can run it according to `Run <https://db-gpt.readthedocs.io/en/latest/getting_started/install/deploy/deploy.html#run>`_
**More Configurations**
@ -404,8 +383,9 @@ If you are low hardware requirements you can install DB-GPT by Using third-part
LLM_MODEL=vicuna-13b-v1.5
MODEL_TYPE=vllm
You can view the models supported by vLLM `here <https://vllm.readthedocs.io/en/latest/models/supported_models.html#supported-models>`_
Then you can run it according to `Run <https://db-gpt.readthedocs.io/en/latest/getting_started/install/deploy/deploy.html#run>`_
@ -415,24 +395,23 @@ If you are low hardware requirements you can install DB-GPT by Using third-part
-----------------
**(Optional) load examples into SQLite**
.. code-block:: shell
.. code-block:: shell
bash ./scripts/examples/load_examples.sh
bash ./scripts/examples/load_examples.sh
On windows platform:
On windows platform:
.. code-block:: shell
.. code-block:: shell
.\scripts\examples\load_examples.bat
.\scripts\examples\load_examples.bat
4.Run db-gpt server
-----------------
.. code-block:: shell
.. code-block:: shell
python pilot/server/dbgpt_server.py
python pilot/server/dbgpt_server.py
**Open http://localhost:5000 with your browser to see the product.**

View File

@ -0,0 +1,629 @@
# SOME DESCRIPTIVE TITLE.
# Copyright (C) 2023, csunny
# This file is distributed under the same license as the DB-GPT package.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 👏👏 0.4.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-11-03 13:00+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language: zh_CN\n"
"Language-Team: zh_CN <LL@li.org>\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
#: ../../getting_started/install/deploy.rst:4 7a1ee708aa40431981178ebd1d34b9aa
msgid "Installation From Source"
msgstr "源码安装"
#: ../../getting_started/install/deploy.rst:6 dd4c542c563b4d4ca5b710dc7326ff8b
msgid "To get started, install DB-GPT with the following steps."
msgstr "按照以下步骤进行安装"
#: ../../getting_started/install/deploy.rst:8 f352a8d93da744aaab775e8290c74704
msgid ""
"DB-GPT can be deployed on servers with low hardware requirements or on "
"servers with high hardware requirements. You can install DB-GPT by Using "
"third-part LLM REST API Service OpenAI, Azure."
msgstr ""
#: ../../getting_started/install/deploy.rst:11 b89bfaa679d5448791d315b9ffebe7c5
msgid ""
"And you can also install DB-GPT by deploy LLM Service by download LLM "
"model."
msgstr ""
#: ../../getting_started/install/deploy.rst:15 942b2999b3b5432e956c44e2a51b5269
msgid "1.Preparation"
msgstr "1.准备"
#: ../../getting_started/install/deploy.rst:16 d27c8698bd4a45d7a7ebdbba470318d6
msgid "**Download DB-GPT**"
msgstr "**下载DB-GPT项目**"
#: ../../getting_started/install/deploy.rst:22 ce3e61a03ca945b4a3c3b264a063442c
msgid "**Install Miniconda**"
msgstr "**安装Miniconda**"
#: ../../getting_started/install/deploy.rst:24 1fd868a2e84c4752b242b401ac64d0e4
msgid ""
"We use Sqlite as default database, so there is no need for database "
"installation. If you choose to connect to other databases, you can "
"follow our tutorial for installation and configuration. For the entire "
"installation process of DB-GPT, we use the miniconda3 virtual "
"environment. Create a virtual environment and install the Python "
"dependencies. `How to install Miniconda "
"<https://docs.conda.io/en/latest/miniconda.html>`_"
msgstr ""
"目前使用Sqlite作为默认数据库因此DB-"
"GPT快速部署不需要部署相关数据库服务。如果你想使用其他数据库需要先部署相关数据库服务。我们目前使用Miniconda进行python环境和包依赖管理[安装"
" Miniconda](https://docs.conda.io/en/latest/miniconda.html)"
#: ../../getting_started/install/deploy.rst:41 59dc27ad4237444d8eb7229fe29c975d
msgid "2.Deploy LLM Service"
msgstr "2.部署LLM服务"
#: ../../getting_started/install/deploy.rst:42 12ed83127fb744bcb60c5c7c16359a0e
msgid ""
"DB-GPT can be deployed on servers with low hardware requirements or on "
"servers with high hardware requirements."
msgstr "DB-GPT可以部署在对硬件要求不高的服务器也可以部署在对硬件要求高的服务器"
#: ../../getting_started/install/deploy.rst:44 1674c59c24804200ab53bd31847be19a
msgid ""
"If you are low hardware requirements you can install DB-GPT by Using "
"third-part LLM REST API Service OpenAI, Azure, tongyi."
msgstr "Low hardware requirements模式适用于对接第三方模型服务的api,比如OpenAI, 通义千问, 文心.cpp。"
#: ../../getting_started/install/deploy.rst:48 e9c37648778540fe982c26d4104931ae
msgid "As our project has the ability to achieve OpenAI performance of over 85%,"
msgstr "使用OpenAI服务可以让DB-GPT准确率达到85%"
#: ../../getting_started/install/deploy.rst:53 201b7af45c0046faada4b81e110e7745
msgid "Notice make sure you have install git-lfs"
msgstr "确认是否已经安装git-lfs"
#: ../../getting_started/install/deploy.rst:55 f7c9535c0eb546f7b1389a181b08c5c0
msgid "centos:yum install git-lfs"
msgstr ""
#: ../../getting_started/install/deploy.rst:57 8fb3efededcb42c592fa27b03c4e9a65
msgid "ubuntu:apt-get install git-lfs"
msgstr ""
#: ../../getting_started/install/deploy.rst:59 b3df472ae3ae470d94112f7327787e13
msgid "macos:brew install git-lfs"
msgstr ""
#: ../../getting_started/install/deploy.rst:63
#: ../../getting_started/install/deploy.rst:226
#: 070a6d653f6740cf852ddaf036ac2538 635662e449d34d5b9f6316898d14e0a6
msgid "OpenAI"
msgstr "OpenAI"
#: ../../getting_started/install/deploy.rst:65
#: ../../getting_started/install/deploy.rst:212
#: 1ff3d5d9f9814e638f118925bedb7800 59cda8abcdc7471eb0a488610121a533
msgid "Download embedding model"
msgstr "下载embedding model"
#: ../../getting_started/install/deploy.rst:77
#: ../../getting_started/install/deploy.rst:234
#: 0c68e075ab7840bd9d75ced89deaea86 b9e25c8690da4d9c94c012a04ddc8f0d
msgid "Configure LLM_MODEL and PROXY_API_URL and API_KEY in `.env` file"
msgstr "在`.env`文件设置LLM_MODEL and PROXY_API_URL and API_KEY"
#: ../../getting_started/install/deploy.rst:87
#: ../../getting_started/install/deploy.rst:285
#: 5eb0e6ffb66a48dfa05cfea8414b21c5 61b75bb879d64726976c131f0f7cea83
msgid "Make sure your .env configuration is not overwritten"
msgstr "认.env文件不会被覆盖\""
#: ../../getting_started/install/deploy.rst:90 d14c2a247eee401bae9b1711cdcc0712
msgid "Vicuna"
msgstr "Vicuna"
#: ../../getting_started/install/deploy.rst:91 d59e03c9d3c3405ea04d814bffc59ef8
msgid ""
"`Vicuna-v1.5 <https://huggingface.co/lmsys/vicuna-13b-v1.5>`_ based on "
"llama-2 has been released, we recommend you set `LLM_MODEL=vicuna-"
"13b-v1.5` to try this model)"
msgstr ""
#: ../../getting_started/install/deploy.rst:93 c6d22fb4c35c40378159e7845c87bb51
msgid "vicuna-v1.5 hardware requirements"
msgstr ""
#: ../../getting_started/install/deploy.rst:97
#: ../../getting_started/install/deploy.rst:142
#: 215382b66c944bef8ba8d081792cb3c5 eadfd6f7230f49a4a9d5552b51d766c0
msgid "Model"
msgstr ""
#: ../../getting_started/install/deploy.rst:98
#: ../../getting_started/install/deploy.rst:143
#: 5eca1dbe065b4e648ee00ca118e84214 abec181550804601ad54500469b332f9
msgid "Quantize"
msgstr ""
#: ../../getting_started/install/deploy.rst:99
#: ../../getting_started/install/deploy.rst:144
#: 24a907ffe6e4449abb632d80ada8733c 472a99174d68460a84a72d65d9fdcd07
msgid "VRAM Size"
msgstr ""
#: ../../getting_started/install/deploy.rst:100
#: ../../getting_started/install/deploy.rst:103
#: 87bba38be8f24c0688431a5985622d33 e8c8ef23e4964d059d9615f41719d05f
msgid "vicuna-7b-v1.5"
msgstr ""
#: ../../getting_started/install/deploy.rst:101
#: ../../getting_started/install/deploy.rst:107
#: ../../getting_started/install/deploy.rst:146
#: ../../getting_started/install/deploy.rst:152
#: 7695fa31995b4e038ac4359df76a0a2f a8c361af629c4a179c811ac39ef16c3c
#: defc11c71d45471da982eb7c96039450 f9362395c5ff4213b5fb2f4d1e430496
msgid "4-bit"
msgstr ""
#: ../../getting_started/install/deploy.rst:102
#: ../../getting_started/install/deploy.rst:147
#: 7f2ebb804fb042c1b24cf6274c2cb7fc 9604200fbd974a6e9e3d66a38f9e5895
msgid "8 GB"
msgstr ""
#: ../../getting_started/install/deploy.rst:104
#: ../../getting_started/install/deploy.rst:110
#: ../../getting_started/install/deploy.rst:149
#: ../../getting_started/install/deploy.rst:155
#: 4936725d2b0a47f39286453db766027c 879daa7be9e243b7a8a51f0859459096
#: c5322eaace39472282941c4bcae87232 e79778e8505e490a912aa80c28e37b0c
msgid "8-bit"
msgstr ""
#: ../../getting_started/install/deploy.rst:105
#: ../../getting_started/install/deploy.rst:108
#: ../../getting_started/install/deploy.rst:150
#: ../../getting_started/install/deploy.rst:153
#: 09cd46eb89234d1cbbaf9fccd2d7f206 531282962fd148108a5d4582022b9d11
#: c2249464c63b4808a13be66c6a04653d c54d26deb5e14bbc9ebeefe751ee32a1
msgid "12 GB"
msgstr ""
#: ../../getting_started/install/deploy.rst:106
#: ../../getting_started/install/deploy.rst:109
#: 40d56aeeeeae4ad5a08d432da14c91f6 a4aae2c08b35462699a074c20436b583
msgid "vicuna-13b-v1.5"
msgstr ""
#: ../../getting_started/install/deploy.rst:111
#: ../../getting_started/install/deploy.rst:156
#: 4e58bb5b42d043b8b8105ff1424b3dda 670a8f2e7d9a4f56b338a890a0e6179c
msgid "20 GB"
msgstr ""
#: ../../getting_started/install/deploy.rst:127
#: ../../getting_started/install/deploy.rst:174
#: ../../getting_started/install/deploy.rst:200
#: 7aec36decef845a4979e4d72b5556166 edb0520c46de41e79a56453901f5dbda
#: f726ecdc3a3b40eeb527464e705d262c
msgid "The model files are large and will take a long time to download."
msgstr ""
#: ../../getting_started/install/deploy.rst:129
#: ../../getting_started/install/deploy.rst:176
#: ../../getting_started/install/deploy.rst:202
#: 1ced4f4cd3ca4b4a8a545ba4625e5888 1f3fc32e037846ecba6e9d99acaa341e
#: 2e2a3b69997043858a21ce88c349fd87
msgid "**Configure LLM_MODEL in `.env` file**"
msgstr ""
#: ../../getting_started/install/deploy.rst:136
#: ../../getting_started/install/deploy.rst:231
#: 0ae3d2ff947545fb8ae3a3221ada4fca 908164bbe3f745cf994b65c7cc0d4f42
msgid "Baichuan"
msgstr ""
#: ../../getting_started/install/deploy.rst:138
#: 4249581eb6eb4c90ac467c3b23f9cf47
msgid "Baichuan hardware requirements"
msgstr ""
#: ../../getting_started/install/deploy.rst:145
#: ../../getting_started/install/deploy.rst:148
#: c01aea1eaf0c4d0ca14c10e51003fa2e f1081e1ecc6b42b6a7d227cf4a3b9aa9
msgid "baichuan-7b"
msgstr ""
#: ../../getting_started/install/deploy.rst:151
#: ../../getting_started/install/deploy.rst:154
#: 1cf37d7196814348a177341d80d9748a edf669023f5647c89c52475f385dd91f
msgid "baichuan-13b"
msgstr ""
#: ../../getting_started/install/deploy.rst:178
#: 25e45bdc7e6b475080f8b39cd555746d
msgid "please rename Baichuan path to \"baichuan2-13b\" or \"baichuan2-7b\""
msgstr "将Baichuan模型目录修改为\"baichuan2-13b\" 或 \"baichuan2-7b\""
#: ../../getting_started/install/deploy.rst:184
#: 80c36809333c4e2490166571222963cc
msgid "ChatGLM"
msgstr ""
#: ../../getting_started/install/deploy.rst:204
#: f1e3c93f447b40039a57d278d49ff32d
msgid "please rename chatglm model path to \"chatglm2-6b\""
msgstr "将chatglm模型目录修改为\"chatglm2-6b\""
#: ../../getting_started/install/deploy.rst:210
#: 62570cd6084a4b3c8ceff5fdd5234aa0
msgid "Other LLM API"
msgstr ""
#: ../../getting_started/install/deploy.rst:227
#: fb0c2d274f3740a2af7ece03fdb81d22
msgid "Azure"
msgstr ""
#: ../../getting_started/install/deploy.rst:228
#: 13e28550c95e4838b9e35a064448f9ca
msgid "Aliyun tongyi"
msgstr ""
#: ../../getting_started/install/deploy.rst:229
#: 32d7289e7e514db5b3f49ebad9eb0e46
msgid "Baidu wenxin"
msgstr ""
#: ../../getting_started/install/deploy.rst:230
#: b8033cd9b67d4b18823fc2053e23114b
msgid "Zhipu"
msgstr ""
#: ../../getting_started/install/deploy.rst:232
#: 9cc479d40a3f435a81e8e2aa8c015bbe
msgid "Bard"
msgstr ""
#: ../../getting_started/install/deploy.rst:287
#: abd4caa347e3405f86319825c14b3b4a
msgid "llama.cpp"
msgstr ""
#: ../../getting_started/install/deploy.rst:289
#: b3b53e55206345e8b332af19851f5a5f
msgid ""
"DB-GPT already supports `llama.cpp "
"<https://github.com/ggerganov/llama.cpp>`_ via `llama-cpp-python "
"<https://github.com/abetlen/llama-cpp-python>`_ ."
msgstr ""
"DB-GPT 已经支持了 `llama.cpp <https://github.com/ggerganov/llama.cpp>`_ via "
"`llama-cpp-python <https://github.com/abetlen/llama-cpp-python>`_ ."
#: ../../getting_started/install/deploy.rst:291
#: 54fc351de03445e7b4b7a5408215c0cf
msgid "**Preparing Model Files**"
msgstr "**准备Model文件**"
#: ../../getting_started/install/deploy.rst:293
#: 41e7c90a891d47c1ae04883454cf999f
msgid ""
"To use llama.cpp, you need to prepare a gguf format model file, and there"
" are two common ways to obtain it, you can choose either:"
msgstr "使用 llama.cpp你需要准备 gguf 格式的文件,你可以通过以下两种方法获取"
#: ../../getting_started/install/deploy.rst:295
#: 8aefa2f2e0ab47469717aff4af20668e
msgid "**1. Download a pre-converted model file.**"
msgstr "**1.下载已转换的模型文件.**"
#: ../../getting_started/install/deploy.rst:297
#: 9849e3b8c0824cfa9add58a97fb583c4
msgid ""
"Suppose you want to use [Vicuna 13B v1.5](https://huggingface.co/lmsys"
"/vicuna-13b-v1.5), you can download the file already converted from "
"[TheBloke/vicuna-13B-v1.5-GGUF](https://huggingface.co/TheBloke/vicuna-"
"13B-v1.5-GGUF), only one file is needed. Download it to the `models` "
"directory and rename it to `ggml-model-q4_0.gguf`."
msgstr ""
"假设您想使用[Vicuna 13B v1.5](https://huggingface.co/lmsys/vicuna-"
"13b-v1.5)您可以从[TheBloke/vicuna-"
"13B-v1.5-GGUF](https://huggingface.co/TheBloke/vicuna-"
"13B-v1.5-GGUF)下载已转换的文件只需要一个文件。将其下载到models目录并将其重命名为 `ggml-"
"model-q4_0.gguf`。"
#: ../../getting_started/install/deploy.rst:303
#: b79538a3af8a4946b24d8ebdb343e8aa
msgid "**2. Convert It Yourself**"
msgstr "**2. 自行转换**"
#: ../../getting_started/install/deploy.rst:305
#: 12f31f96202f4789af86c254feaac717
msgid ""
"You can convert the model file yourself according to the instructions in "
"[llama.cpp#prepare-data--run](https://github.com/ggerganov/llama.cpp"
"#prepare-data--run), and put the converted file in the models directory "
"and rename it to `ggml-model-q4_0.gguf`."
msgstr ""
"您可以根据[llama.cpp#prepare-data--run](https://github.com/ggerganov/llama.cpp"
"#prepare-data--run)中的说明自行转换模型文件并把转换后的文件放在models目录中并重命名为`ggml-"
"model-q4_0.gguf`。"
#: ../../getting_started/install/deploy.rst:307
#: c45612d904e64b3a8c0d50c564447853
msgid "**Installing Dependencies**"
msgstr "**安装依赖**"
#: ../../getting_started/install/deploy.rst:309
#: c3c04c3e560a46bba53719472a25ce11
msgid ""
"llama.cpp is an optional dependency in DB-GPT, and you can manually "
"install it using the following command:"
msgstr "llama.cpp在DB-GPT中是可选安装项, 你可以通过以下命令进行安装"
#: ../../getting_started/install/deploy.rst:316
#: 25f8ceb4fca941cdbd7c4dcc49818d79
msgid "**3.Modifying the Configuration File**"
msgstr "**3.修改配置文件**"
#: ../../getting_started/install/deploy.rst:318
#: 5a63a04a43a1487eac56f2560dbc2275
msgid "Next, you can directly modify your `.env` file to enable llama.cpp."
msgstr "修改`.env`文件使用llama.cpp"
#: ../../getting_started/install/deploy.rst:325
#: ../../getting_started/install/deploy.rst:393
#: e8b1498ad8c44201ba2e050db454c61c ffcb505780884dddaf3559990405a081
msgid ""
"Then you can run it according to `Run <https://db-"
"gpt.readthedocs.io/en/latest/getting_started/install/deploy/deploy.html#run>`_"
msgstr ""
"然后你可以根据[运行](https://db-gpt.readthedocs.io/projects/db-gpt-docs-zh-"
"cn/zh_CN/latest/getting_started/install/deploy/deploy.html#run)来运行"
#: ../../getting_started/install/deploy.rst:328
#: 0d8d0f68e43b465a9537fbc741a9d37f
msgid "**More Configurations**"
msgstr "**更多配置文件**"
#: ../../getting_started/install/deploy.rst:330
#: 6fdef4b280e5456faf11521700b7fe04
msgid ""
"In DB-GPT, the model configuration can be done through `{model "
"name}_{config key}`."
msgstr "在DB-GPT中模型配置可以通过`{模型名称}_{配置名}` 来配置。"
#: ../../getting_started/install/deploy.rst:332
#: 256b25378f3d4bfd902f155b3a4346ad
msgid "More Configurations"
msgstr "**更多配置文件**"
#: ../../getting_started/install/deploy.rst:336
#: ebe4390d2de44fd288379a439d99bccd
msgid "Environment Variable Key"
msgstr "环境变量Key"
#: ../../getting_started/install/deploy.rst:337
#: ee1c68623cc24446b4036870e0c7f21c
msgid "Default"
msgstr "默认值"
#: ../../getting_started/install/deploy.rst:338
#: ed6c358d27fb4022a70de1e733ad99cd
msgid "Description"
msgstr "描述"
#: ../../getting_started/install/deploy.rst:339
#: 4ad9e1b85c294cf084b8ebf858cc4075
msgid "llama_cpp_prompt_template"
msgstr ""
#: ../../getting_started/install/deploy.rst:340
#: ../../getting_started/install/deploy.rst:343
#: ../../getting_started/install/deploy.rst:349
#: ../../getting_started/install/deploy.rst:355
#: ../../getting_started/install/deploy.rst:361
#: 0cfccd9b8f7043ec8ccfe5159dfcf2c3 155d01bf0ad94016a32a2ec18a0fd881
#: 3a194e635268499a939990b3503da72e 8909fd33e33b4f5a9208b8338b82b20f
#: e32e3219d2384884b398054cecceec8a
msgid "None"
msgstr ""
#: ../../getting_started/install/deploy.rst:341
#: c395eb1a7b044536a7ed300320e230f5
msgid ""
"Prompt template name, now support: zero_shot, vicuna_v1.1,alpaca,llama-2"
",baichuan-chat,internlm-chat, If None, the prompt template is "
"automatically determined from model path。"
msgstr ""
"Prompt template 现在可以支持`zero_shot, vicuna_v1.1,alpaca,llama-2,baichuan-"
"chat,internlm-chat`, 如果是None, 可以根据模型路径来自动获取模型 Prompt template"
#: ../../getting_started/install/deploy.rst:342
#: 8fb84e002d1f4fc79bd4465b161e018a
msgid "llama_cpp_model_path"
msgstr ""
#: ../../getting_started/install/deploy.rst:344
#: 81df474169634fbf9da20fe239d903a0
msgid "Model path"
msgstr "模型路径"
#: ../../getting_started/install/deploy.rst:345
#: a94ac450e72940f78b8ba6f6c9854248
msgid "llama_cpp_n_gpu_layers"
msgstr ""
#: ../../getting_started/install/deploy.rst:346
#: 06a24fb226f24ed4a93cde7034a3b67f
msgid "1000000000"
msgstr ""
#: ../../getting_started/install/deploy.rst:347
#: d29a8733688a4c62a3ea88e4793b82ef
msgid ""
"Number of layers to offload to the GPU, Set this to 1000000000 to offload"
" all layers to the GPU. If your GPU VRAM is not enough, you can set a low"
" number, eg: 10"
msgstr "要将多少网络层转移到GPU上将其设置为1000000000以将所有层转移到GPU上。如果您的 GPU 内存不足可以设置较低的数字例如10。"
#: ../../getting_started/install/deploy.rst:348
#: 8531ecd5690e46f2a9d64823405054b1
msgid "llama_cpp_n_threads"
msgstr ""
#: ../../getting_started/install/deploy.rst:350
#: d22c11631f5b4bc3b9dabad2175f26d5
msgid ""
"Number of threads to use. If None, the number of threads is automatically"
" determined"
msgstr "要使用的线程数量。如果为None则线程数量将自动确定。"
#: ../../getting_started/install/deploy.rst:351
#: 06994945569849a9a07ca0fbb56509e4
msgid "llama_cpp_n_batch"
msgstr ""
#: ../../getting_started/install/deploy.rst:352
#: a509c6b3800941aa8aaed1c2c3a89937
msgid "512"
msgstr ""
#: ../../getting_started/install/deploy.rst:353
#: 02f71c7f7ddc49f6b61b163237f4c4fe
msgid "Maximum number of prompt tokens to batch together when calling llama_eval"
msgstr "在调用llama_eval时批处理在一起的prompt tokens的最大数量"
#: ../../getting_started/install/deploy.rst:354
#: 4d0df2430c22464fac021d816b4fa0c3
msgid "llama_cpp_n_gqa"
msgstr ""
#: ../../getting_started/install/deploy.rst:356
#: 24304fe7c331423ebcc9eb407d6f6f46
msgid "Grouped-query attention. Must be 8 for llama-2 70b."
msgstr "对于 llama-2 70B 模型Grouped-query attention 必须为8。"
#: ../../getting_started/install/deploy.rst:357
#: 8c6b332cdcc845dc9c218e3de7050e3e
msgid "llama_cpp_rms_norm_eps"
msgstr ""
#: ../../getting_started/install/deploy.rst:358
#: 2c2ac24d7d264be3aa6960e776fa5e56
msgid "5e-06"
msgstr ""
#: ../../getting_started/install/deploy.rst:359
#: 2d7170d63e364aa3bb3040d96ba31316
msgid "5e-6 is a good value for llama-2 models."
msgstr "对于llama-2模型来说5e-6是一个不错的值。"
#: ../../getting_started/install/deploy.rst:360
#: 402b942db0b149a8985685dbfe6f31c7
msgid "llama_cpp_cache_capacity"
msgstr ""
#: ../../getting_started/install/deploy.rst:362
#: c990275acf6741398ac147a8ddce1ce3
msgid "Maximum cache capacity. Examples: 2000MiB, 2GiB"
msgstr "模型缓存最大值. 例如: 2000MiB, 2GiB"
#: ../../getting_started/install/deploy.rst:363
#: 5f579fac7df54a338dc3c8a348bc90ea
msgid "llama_cpp_prefer_cpu"
msgstr ""
#: ../../getting_started/install/deploy.rst:364
#: 8413a349be174d50991653a5554933c5
msgid "False"
msgstr ""
#: ../../getting_started/install/deploy.rst:365
#: 78af815bd0724301870ef934d27b208d
msgid ""
"If a GPU is available, it will be preferred by default, unless "
"prefer_cpu=False is configured."
msgstr "如果有可用的GPU默认情况下会优先使用GPU除非配置了 prefer_cpu=False。"
#: ../../getting_started/install/deploy.rst:368
#: f193ef95a45f42d2a5e156a79ed09685
msgid "vllm"
msgstr ""
#: ../../getting_started/install/deploy.rst:370
#: e28e1d550515498e8e41a2e1187f9956
msgid "vLLM is a fast and easy-to-use library for LLM inference and serving."
msgstr "\"vLLM 是一个快速且易于使用的 LLM 推理和服务的库。"
#: ../../getting_started/install/deploy.rst:372
#: adbcb8083c274d5eae4209b5b4fb8048
msgid "**Running vLLM**"
msgstr "**运行vLLM**"
#: ../../getting_started/install/deploy.rst:374
#: 188e0fb1d0d34fccb15a796707ca95dd
msgid "**1.Installing Dependencies**"
msgstr "**1.安装依赖**"
#: ../../getting_started/install/deploy.rst:376
#: dbfcdd5d0e2a4cb29692022e63766116
msgid ""
"vLLM is an optional dependency in DB-GPT, and you can manually install it"
" using the following command:"
msgstr "vLLM 在 DB-GPT 是一个可选依赖, 你可以使用下面的命令手动安装它:"
#: ../../getting_started/install/deploy.rst:382
#: 4401138d25b34b028c4bf43ddfb89aa3
msgid "**2.Modifying the Configuration File**"
msgstr "**2.修改配置文件**"
#: ../../getting_started/install/deploy.rst:384
#: ee0ad8661dce4b909b07df4773150ee6
msgid "Next, you can directly modify your .env file to enable vllm."
msgstr "你可以直接修改你的 `.env` 文件"
#: ../../getting_started/install/deploy.rst:391
#: 1be59f77d774429b9959bf834871a414
msgid ""
"You can view the models supported by vLLM `here "
"<https://vllm.readthedocs.io/en/latest/models/supported_models.html"
"#supported-models>`_"
msgstr "你可以在 "
"[这里](https://vllm.readthedocs.io/en/latest/models/supported_models.html"
"#supported-models) 查看 vLLM 支持的模型。"
#: ../../getting_started/install/deploy.rst:400
#: def3d7d7cd03407d9dfeff2e1eee951e
msgid "3.Prepare sql example(Optional)"
msgstr "3.准备 sql example(可选)"
#: ../../getting_started/install/deploy.rst:401
#: b6038f79dac74fda8f6db08325bf0686
msgid "**(Optional) load examples into SQLite**"
msgstr "**(可选) load examples into SQLite**"
#: ../../getting_started/install/deploy.rst:408
#: 37c84b22186b48baba5aca60f8a70f49
msgid "On windows platform:"
msgstr ""
#: ../../getting_started/install/deploy.rst:415
#: 09c9508bac8a4330931d45d02c33762f
msgid "4.Run db-gpt server"
msgstr "4.运行db-gpt server"
#: ../../getting_started/install/deploy.rst:421
#: 04d68abe85a646388956cbd1b47f3232
msgid "**Open http://localhost:5000 with your browser to see the product.**"
msgstr "打开浏览器访问http://localhost:5000"

View File

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 👏👏 0.4.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-10-26 19:57+0800\n"
"POT-Creation-Date: 2023-11-03 11:47+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language: zh_CN\n"
@ -17,13 +17,13 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.13.1\n"
"Generated-By: Babel 2.12.1\n"
#: ../../modules/prompts.md:1 00907f941b7743b99278d7fd0b67e5d3
#: ../../modules/prompts.md:1 3c5bdc61dc4a4301acdc9775c854a896
msgid "Prompts"
msgstr "Prompts"
#: ../../modules/prompts.md:3 f1c7042cba32483b90ab22db28e369a9
#: ../../modules/prompts.md:3 118fc2b85e8b4e02a6868b3bc2a7892c
msgid ""
"**Prompt** is a very important part of the interaction between the large "
"model and the user, and to a certain extent, it determines the quality "
@ -33,96 +33,116 @@ msgid ""
"users to use large language models."
msgstr "**Prompt**是大模型与用户交互中非常重要的一环,在一定程度上决定了大模型生成答案的质量和准确性。在这个项目中,我们会根据用户输入和使用场景自动地优化相应提示,让用户更轻松、更高效地使用大语言模型。"
#: ../../modules/prompts.md:5 695c05f96a2c4077a0a25f5f6dc22d31
#: ../../modules/prompts.md:5 41614effa0a445b7b5a119311b902305
msgid "Prompt Management"
msgstr "Prompt 管理"
#: ../../modules/prompts.md:7 325254a0ceba48b8aa19a4cf66d18c12
#: ../../modules/prompts.md:7 a8ed0a7b3d1243ffa1ed80c24d1ab518
msgid ""
"Here, you can choose to create a Prompt in **Public Prompts** space or "
"**Private Prompts** space."
msgstr "该页面允许用户选择**公共Prompts**或者**私有Prompts**空间来创建相应的 Prompt。"
#: ../../modules/prompts.md:11 4528a48a14d246d8874c1b31819d3f4f
#: ../../modules/prompts.md:9 ../../modules/prompts.md:17
#: ../../modules/prompts.md:31 ../../modules/prompts.md:45
#: 68db272acc6b4572aa275940da4b788b 92d46d647bbb4035add92f750511a840
#: af1789fae8cb47b8a81e68520086f35e d7c2f6f43b5c406d82b7dc5bd92d183c
#: e2f91ca11e784fe5943d0738671f68bf
msgid "image"
msgstr ""
#: ../../modules/prompts.md:11 102220bf95f04f81acc9a0093458f297
msgid ""
"The difference between **Public Prompts** and **Private Prompts** is that"
" Prompts in **Public Prompts** space can be viewed and used by all users,"
" while prompts in **Private Prompts** space can only be viewed and used "
"by the owner."
msgstr "**公共 Prompts**和**私有 Prompts**空间的区别在于,**公共 Prompts**空间下的 Prompt 可供所有的用户查看和使用,而**私有 Prompts**空间下的 Prompt 只能被所有者查看和使用。"
msgstr ""
"**公共 Prompts**和**私有 Prompts**空间的区别在于,**公共 Prompts**空间下的 Prompt "
"可供所有的用户查看和使用,而**私有 Prompts**空间下的 Prompt 只能被所有者查看和使用。"
#: ../../modules/prompts.md:13 45fdbc12758c4cb3aa71f5616c75a6e6
#: ../../modules/prompts.md:13 2e0d2f6b335a4aacbdc83b7b7042a701
msgid "Create Prompt"
msgstr "创建 Prompt"
#: ../../modules/prompts.md:15 850b9c136d5949939c3966b6bff8204e
#: ../../modules/prompts.md:15 c9f8c3d1698941e08b90a35fffb2fce1
msgid "Click the \"Add Prompts\" button to pop up the following subpage:"
msgstr "点击 \"新增Prompts\"按钮可以弹出如下的子页面:"
#: ../../modules/prompts.md:19 9db6614fcc454bddbf2e3257232e7c93
#: ../../modules/prompts.md:19 23ed81a83ab2458f826f2b5d9c55a89a
msgid ""
"**Scene**: It is assumed here that when we have a lot of Prompts, we "
"often classify the Prompts according to scene, such as Prompts in the "
"chat knowledge scene, Prompts in the chat data scene, Prompts in the chat"
" normal scene, etc."
msgstr "**场景**:这里假设,当我们有很多 Prompts 时,往往会根据场景对 Prompts 进行分类,比如在 DB-GPT 项目中chat knowledge 场景的 Prompts、chat data 场景的 Prompts、chat normal 场景的 Prompts 等等。"
msgstr ""
"**场景**:这里假设,当我们有很多 Prompts 时,往往会根据场景对 Prompts 进行分类,比如在 DB-GPT 项目中chat "
"knowledge 场景的 Prompts、chat data 场景的 Prompts、chat normal 场景的 Prompts 等等。"
#: ../../modules/prompts.md:21 45a0a38ceafe4575a5957b2de8c0661f
#: ../../modules/prompts.md:21 11299da493e741869fe67237f1cb1794
msgid ""
"**Sub Scene**: Continuing with the above, assuming that we have a lot of "
"Prompts, scene classification alone is not enough. For example, in the "
"chat data scenario, there can be many types of sub-scene: anomaly "
"recognition sub scene, attribution analysis sub scene, etc. sub scene is "
"used to distinguish subcategories under each scene."
msgstr "**次级场景**:接着上面的内容,如果我们的 Prompt 很多时,仅使用场景一级分类是不够的。例如,在 chat data 场景中,还可以细分为很多的次级场景:异常识别次级场景、归因分析次级场景等等。次级场景是用于区分每个场景下的子类别。"
msgstr ""
"**次级场景**:接着上面的内容,如果我们的 Prompt 很多时,仅使用场景一级分类是不够的。例如,在 chat data "
"场景中,还可以细分为很多的次级场景:异常识别次级场景、归因分析次级场景等等。次级场景是用于区分每个场景下的子类别。"
#: ../../modules/prompts.md:23 78544fc530114bb0bc0a753811e9ce58
#: ../../modules/prompts.md:23 c15d62af27094d14acb6428c0e3e1a1d
msgid ""
"**Name**: Considering that a Prompt generally contains a lot of content, "
"for ease of use and easy search, we need to name the Prompt. Note: The "
"name of the Prompt is not allowed to be repeated. Name is the unique key "
"that identifies a Prompt."
msgstr "**名称**:考虑到每个 Prompt 的内容会非常多,为了方便用户使用和搜索,我们需要给每个 Prompt 命名。注意Prompt 的名称不允许重复,名称是一个 Prompt 的唯一键。"
msgstr ""
"**名称**:考虑到每个 Prompt 的内容会非常多,为了方便用户使用和搜索,我们需要给每个 Prompt 命名。注意Prompt "
"的名称不允许重复,名称是一个 Prompt 的唯一键。"
#: ../../modules/prompts.md:25 f44598d2f2a4429d8c1624feca65c867
#: ../../modules/prompts.md:25 621fe9c729c94e9bbde637b5a1856284
msgid "**Content**: Here is the actual Prompt content that will be input to LLM."
msgstr "**内容**:这里是实际要输入 LLM 的提示内容。"
#: ../../modules/prompts.md:27 2384fc6006e74e9f94966f6112e6bb06
#: ../../modules/prompts.md:27 ac2f153f704c4841a044daaf6548262b
msgid "Edit Prompt"
msgstr "编辑 Prompt"
#: ../../modules/prompts.md:29 efe8e16acc584f258645b1b20d891f71
#: ../../modules/prompts.md:29 3d6238ea482842e0968f691f3fd0c947
msgid ""
"Existing Prompts can be edited. Note that except **name**, other items "
"can be modified."
msgstr "已有的 Prompts 可以被编辑,除了名称不可修改,其余的内容均可修改。"
#: ../../modules/prompts.md:33 b1a82b4d88584275955087114cdcf574
#: ../../modules/prompts.md:33 7cbe985fd9534471bce5f93a93da82fd
msgid "Delete Prompt"
msgstr "删除 Prompt"
#: ../../modules/prompts.md:35 e4e55367b8574324937f8b3006e7d3cd
#: ../../modules/prompts.md:35 849ab9ef2a2c4a29bb827eb373f37b7d
msgid ""
"Ordinary users can only delete Prompts created by themselves in the "
"private Prompts space. Administrator users can delete Prompts in public "
"Prompts spaces and private Prompts spaces."
msgstr "普通用户只能删除他们自己在私有 Prompts 空间中创建的 Prompts管理员可以删除 公共 Prompts 空间下的 Prompts也可以删除私有 Prompts 空间下的 Prompts即使 Prompts 的创建者不是管理员)。"
msgstr ""
"普通用户只能删除他们自己在私有 Prompts 空间中创建的 Prompts管理员可以删除 公共 Prompts 空间下的 "
"Prompts也可以删除私有 Prompts 空间下的 Prompts即使 Prompts 的创建者不是管理员)。"
#: ../../modules/prompts.md:38 ad8210816c524908bd131ffa9adae07c
#: ../../modules/prompts.md:38 191921e5664d4326b01f0c45dc88a1e5
msgid "Use Prompt"
msgstr "使用 Prompt"
#: ../../modules/prompts.md:40 e7dd1e88f47647ecbfaf02a0d068b3f9
#: ../../modules/prompts.md:40 87ad58641f834f30bce178e748d75284
msgid ""
"Users can find and use Prompts next to the input boxes in each scene. "
"Click to view all contents of Prompts library."
msgstr "用户可以在每个场景中的输入框旁边找到并使用 Prompts。 点击悬浮图标可以查看当前用户能使用的全部 Prompts。"
#: ../../modules/prompts.md:42 38b6c173235c4f8499da14496a5a78b3
#: ../../modules/prompts.md:42 60458c7980174c73bc0d56e9e27cd2b3
msgid ""
"✓ Hover the mouse over each Prompt to preview the Prompt content. ✓ "
"Click Prompt to automatically fill in the Prompt content in the input "
"box."
msgstr "✓ 将鼠标悬停在每个 Prompt 上,可预览 Prompt 的内容。 ✓ 单击对应的 Prompt可自动将 Prompt 的内容填充到输入框中。"
msgstr ""
"✓ 将鼠标悬停在每个 Prompt 上,可预览 Prompt 的内容。 ✓ 单击对应的 Prompt可自动将 Prompt "
"的内容填充到输入框中。"

View File

@ -55,11 +55,6 @@ class ChatKnowledge(BaseChat):
"vector_store_name": self.knowledge_space,
"vector_store_type": CFG.VECTOR_STORE_TYPE,
}
# from pilot.graph_engine.graph_factory import RAGGraphFactory
#
# self.rag_engine = CFG.SYSTEM_APP.get_component(
# ComponentType.RAG_GRAPH_DEFAULT.value, RAGGraphFactory
# ).create()
embedding_factory = CFG.SYSTEM_APP.get_component(
"embedding_factory", EmbeddingFactory
)

View File

@ -13,13 +13,13 @@ The assistant gives helpful, detailed, professional and polite answers to the us
_DEFAULT_TEMPLATE_ZH = """ 基于以下已知的信息, 专业、简要的回答用户的问题,
如果无法从提供的内容中获取答案, 请说: "知识库中提供的内容不足以回答此问题" 禁止胡乱编造
如果无法从提供的内容中获取答案, 请说: "知识库中提供的内容不足以回答此问题" 禁止胡乱编造, 回答的时候最好按照1.2.3.点进行总结
已知内容:
{context}
问题:
{question}
"""
_DEFAULT_TEMPLATE_EN = """ Based on the known information below, provide users with professional and concise answers to their questions. If the answer cannot be obtained from the provided content, please say: "The information provided in the knowledge base is not sufficient to answer this question." It is forbidden to make up information randomly.
_DEFAULT_TEMPLATE_EN = """ Based on the known information below, provide users with professional and concise answers to their questions. If the answer cannot be obtained from the provided content, please say: "The information provided in the knowledge base is not sufficient to answer this question." It is forbidden to make up information randomly. When answering, it is best to summarize according to points 1.2.3.
known information:
{context}
question:

View File

@ -29,9 +29,9 @@ def initialize_components(
system_app.register_instance(controller)
# Register global default RAGGraphFactory
from pilot.graph_engine.graph_factory import DefaultRAGGraphFactory
# from pilot.graph_engine.graph_factory import DefaultRAGGraphFactory
system_app.register(DefaultRAGGraphFactory)
# system_app.register(DefaultRAGGraphFactory)
from pilot.base_modules.agent.controller import module_agent

View File

@ -441,16 +441,7 @@ class KnowledgeService:
logger.info(
f"async_document_summary, doc:{doc.doc_name}, chunk_size:{len(texts)}, begin generate summary"
)
# summary = self._llm_extract_summary(texts[0])
summary = self._mapreduce_extract_summary(texts)
# summaries = prompt_helper.repack(prompt=DEFAULT_TREE_SUMMARIZE_PROMPT_SEL, text_chunks=summaries)
# if (len(summaries)) > 1:
# outputs, summary = self._refine_extract_summary(summaries[1:], summaries[0])
# else:
# summary = self._llm_extract_summary("\n".join(summaries))
# print(
# f"refine summary outputs:{summaries}"
# )
print(f"final summary:{summary}")
doc.summary = summary
return knowledge_document_dao.update_knowledge_document(doc)
@ -466,7 +457,6 @@ class KnowledgeService:
f"async doc sync, doc:{doc.doc_name}, chunk_size:{len(chunk_docs)}, begin embedding to vector store-{CFG.VECTOR_STORE_TYPE}"
)
try:
self.async_document_summary(chunk_docs, doc)
vector_ids = client.knowledge_embedding_batch(chunk_docs)
doc.status = SyncStatus.FINISHED.name
doc.result = "document embedding success"
@ -543,37 +533,9 @@ class KnowledgeService:
)
return summary_iters[0]
def _refine_extract_summary(self, docs, summary: str, max_iteration: int = 5):
"""Extract refine summary by llm"""
from pilot.scene.base import ChatScene
from pilot.common.chat_util import llm_chat_response_nostream
import uuid
print(f"initialize summary is :{summary}")
outputs = [summary]
max_iteration = max_iteration if len(docs) > max_iteration else len(docs)
for doc in docs[0:max_iteration]:
chat_param = {
"chat_session_id": uuid.uuid1(),
"current_user_input": doc,
"select_param": summary,
"model_name": self.model_name,
}
from pilot.utils import utils
loop = utils.get_or_create_event_loop()
summary = loop.run_until_complete(
llm_chat_response_nostream(
ChatScene.ExtractRefineSummary.value(), **{"chat_param": chat_param}
)
)
outputs.append(summary)
print(f"iterator is {len(outputs)} current summary is :{summary}")
return outputs, summary
def _mapreduce_extract_summary(self, docs):
"""Extract mapreduce summary by llm
map -> multi thread generate summary
"""Extract summary by mapreduce mode
map -> multi async thread generate summary
reduce -> merge the summaries by map process
Args:
docs:List[str]