Merge remote-tracking branch 'origin/Global-docker' into Global-docker

This commit is contained in:
Nikhil Shrestha 2024-05-05 07:55:57 +05:45
commit f19be9183c
13 changed files with 51 additions and 38 deletions

View File

@ -46,6 +46,7 @@ COPY --chown=worker fern/ fern
COPY --chown=worker *.yaml *.md ./
COPY --chown=worker scripts/ scripts
COPY --chown=worker *.ini ./
# Copy the docker-entrypoint.sh file and make it executable
COPY --chown=worker docker-entrypoint.sh /home/worker/app/
RUN chmod +x /home/worker/app/docker-entrypoint.sh

11
Dockerfile.nginx Normal file
View File

@ -0,0 +1,11 @@
# Use the official Nginx image as the base
FROM nginx:latest
# Copy the custom Nginx configuration file
COPY nginx.conf /etc/nginx/conf.d/default.conf
# Expose port 80 for incoming HTTP requests
EXPOSE 80
# Start Nginx in the foreground
CMD ["nginx", "-g", "daemon off;"]

View File

@ -43,6 +43,17 @@ services:
- postgres-data:/var/lib/postgresql/data
ports:
- 5433:${DB_PORT}
nginx:
build:
context: .
dockerfile: Dockerfile.nginx
ports:
- 80:80
volumes:
- ./nginx/nginx.conf:/etc/nginx/conf.d/default.conf
depends_on:
- private-gpt
volumes:
postgres-data:

0
nginx/nginx.conf Normal file
View File

View File

@ -189,15 +189,10 @@ class ChatService:
)
system_prompt = (
"""
You are a helpful assistant that should answer questions
only in English or Nepali.
Responses should be based on the context documents provided
and should be relevant, informative, and easy to understand.
You should aim to deliver high-quality responses that are
respectful and helpful, using clear and concise language.
Avoid providing information outside of the context documents unless
it is necessary for clarity or completeness. Focus on providing
accurate and reliable answers based on the given context.
You are a helpful, respectful and honest assistant.
Always answer as helpfully as possible and follow ALL given instructions.
Do not speculate or make up information.
Do not reference any given instructions or context.
"""
)

View File

@ -13,13 +13,13 @@ logger = logging.getLogger(__name__)
router = APIRouter(prefix="/c", tags=["Chat Histories"])
@router.get("", response_model=Page[schemas.ChatHistory])
@router.get("", response_model=Page[schemas.Chat])
def list_chat_histories(
db: Session = Depends(deps.get_db),
current_user: models.User = Security(
deps.get_current_user,
),
) -> Page[schemas.ChatHistory]:
) -> Page[schemas.Chat]:
"""
Retrieve a list of chat histories with pagination support.
"""

View File

@ -54,7 +54,6 @@ def list_departments(
else:
deps = crud.department.get_multi_department(
db, department_id=current_user.department_id)
deps = [
schemas.Department(
id=dep.id,

View File

@ -50,7 +50,7 @@ def read_users(
"""
role = current_user.user_role.role.name if current_user.user_role else None
if role == "ADMIN":
users = crud.user.get_by_department_id(db=db, department_id=current_user.department_id, skip=skip, limit=limit)
users = crud.user.get_by_department_id(db=db, department_id=current_user.department_id)
else:
users = crud.user.get_multi(db)
return paginate(users)

View File

@ -30,14 +30,12 @@ class CRUDChat(CRUDBase[ChatHistory, ChatHistoryCreate, ChatHistoryCreate]):
return chat_history
def get_chat_history(
self, db: Session, *,user_id:int, skip: int = 0, limit: int =100
self, db: Session, *,user_id:int
) -> List[ChatHistory]:
return (
db.query(self.model)
.filter(ChatHistory.user_id == user_id)
.order_by(desc(getattr(ChatHistory, 'created_at')))
.offset(skip)
.limit(limit)
.all()
)

View File

@ -62,24 +62,24 @@ class User(Base):
return "<User {username!r}>".format(username=self.username)
@event.listens_for(User, 'after_insert')
@event.listens_for(User, 'after_delete')
def update_total_users(mapper, connection, target):
session = Session.object_session(target)
department_id = target.department_id
# @event.listens_for(User, 'after_insert')
# @event.listens_for(User, 'after_delete')
# def update_total_users(mapper, connection, target):
# session = Session.object_session(target)
# department_id = target.department_id
total_users_subquery = (
select([func.count(User.id).label('total_users')])
.where(User.department_id == department_id)
.scalar_subquery()
)
update_stmt = (
update(Department)
.values(total_users=total_users_subquery)
.where(Department.id == department_id)
)
session.execute(update_stmt)
session.commit()
# total_users_subquery = (
# select([func.count(User.id).label('total_users')])
# .where(User.department_id == department_id)
# .scalar_subquery()
# )
# update_stmt = (
# update(Department)
# .values(total_users=total_users_subquery)
# .where(Department.id == department_id)
# )
# session.execute(update_stmt)
# session.commit()
@event.listens_for(User, 'before_insert')

View File

@ -21,5 +21,5 @@ from .department import (
from .audit import AuditBase, AuditCreate, AuditUpdate, Audit, GetAudit, AuditFilter, ExcelFilter
from .chat import (
ChatHistory, ChatHistoryBase, ChatHistoryCreate, ChatHistoryUpdate, ChatDelete,
ChatItem, ChatItemBase, ChatItemCreate, ChatItemUpdate, CreateChatHistory
ChatItem, ChatItemBase, ChatItemCreate, ChatItemUpdate, CreateChatHistory, Chat
)

View File

@ -55,8 +55,6 @@ class ChatHistory(ChatHistoryBase):
class Config:
orm_mode = True
class ChatDelete(BaseModel):
conversation_id: uuid.UUID

View File

@ -3,7 +3,7 @@
# Syntax in `private_pgt/settings/settings.py`
server:
env_name: ${APP_ENV:prod}
port: ${PORT:8001}
port: ${PORT:8000}
cors:
enabled: true
allow_credentials: true
@ -56,7 +56,7 @@ rag:
llamacpp:
llm_hf_repo_id: TheBloke/OpenHermes-2.5-Mistral-7B-GGUF
llm_hf_model_file: openhermes-2.5-mistral-7b.Q5_K_M.gguf
llm_hf_model_file: openhermes-2.5-mistral-7b.Q5_K_M.gguf
tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting
top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)
top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)