Compare commits
1 Commits
v0.4.28
...
chore/Alex
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aada4b2f2d |
3
.github/CODEOWNERS
vendored
@@ -9,5 +9,4 @@
|
||||
# Unless a later match takes precedence, these owners will be requested for
|
||||
# review when someone opens a pull request.
|
||||
|
||||
/.github/settings.yml @k8sgpt-ai/maintainers
|
||||
* @k8sgpt-ai/maintainers @k8sgpt-ai/k8sgpt-maintainers @k8sgpt-ai/k8sgpt-approvers
|
||||
* @k8sgpt-ai/maintainers
|
||||
47
.github/settings.yml
vendored
@@ -1,47 +0,0 @@
|
||||
repository:
|
||||
name: "k8sgpt"
|
||||
description: "Giving Kubernetes SRE superpowers to everyone"
|
||||
homepage_url: "https://k8sgpt.ai"
|
||||
topics: kubernetes, devops, tooling, openai, sre
|
||||
|
||||
default_branch: main
|
||||
allow_squash_merge: true
|
||||
allow_merge_commit: true
|
||||
allow_rebase_merge: true
|
||||
|
||||
has_wiki: false
|
||||
|
||||
teams:
|
||||
- name: "maintainers"
|
||||
permission: "admin"
|
||||
- name: "k8sgpt-maintainers"
|
||||
permission: "maintain"
|
||||
- name: "k8sgpt-approvers"
|
||||
permission: "push"
|
||||
- name: "contributors"
|
||||
permission: "push"
|
||||
|
||||
branches:
|
||||
- name: main
|
||||
protection:
|
||||
required_pull_request_reviews:
|
||||
required_approving_review_count: 1
|
||||
dismiss_stale_reviews: true
|
||||
require_code_owner_reviews: true
|
||||
dismissal_restrictions: {}
|
||||
code_owner_approval: true
|
||||
required_conversation_resolution: true
|
||||
|
||||
required_status_checks:
|
||||
strict: true
|
||||
contexts:
|
||||
- "DCO"
|
||||
|
||||
enforce_admins: true
|
||||
|
||||
required_linear_history: true
|
||||
|
||||
restrictions:
|
||||
users: []
|
||||
apps: []
|
||||
teams: []
|
||||
119
.github/workflows/build_container.yaml
vendored
@@ -8,16 +8,13 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- fix/build-branch
|
||||
- '[0-9]+.[1-9][0-9]*.x'
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
|
||||
env:
|
||||
GO_VERSION: "~1.24"
|
||||
GO_VERSION: "~1.20"
|
||||
IMAGE_NAME: "k8sgpt"
|
||||
REGISTRY_IMAGE: ghcr.io/k8sgpt-ai/k8sgpt
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
@@ -25,7 +22,7 @@ defaults:
|
||||
jobs:
|
||||
prepare_ci_run:
|
||||
name: Prepare CI Run
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
GIT_SHA: ${{ steps.extract_branch.outputs.GIT_SHA }}
|
||||
BRANCH: ${{ steps.extract_branch.outputs.BRANCH }}
|
||||
@@ -36,7 +33,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5
|
||||
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3
|
||||
|
||||
- name: Extract branch name
|
||||
id: extract_branch
|
||||
@@ -54,61 +51,97 @@ jobs:
|
||||
id: get_run_type
|
||||
run: |
|
||||
NON_FORKED_AND_NON_ROBOT_RUN=${{ ( github.actor != 'renovate[bot]' && github.actor != 'dependabot[bot]' ) && ( github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository ) }}
|
||||
echo "github.actor != 'renovate[bot]' = ${{ github.actor != 'renovate[bot]' }}"
|
||||
echo "github.actor != 'dependabot[bot]' = ${{ github.actor != 'dependabot[bot]' }}"
|
||||
echo "github.event_name == 'push' = ${{ github.event_name == 'push' }}"
|
||||
echo "github.event.pull_request.head.repo.full_name == github.repository = ${{ github.event.pull_request.head.repo.full_name == github.repository }}"
|
||||
echo "NON_FORKED_AND_NON_ROBOT_RUN = $NON_FORKED_AND_NON_ROBOT_RUN"
|
||||
echo "NON_FORKED_AND_NON_ROBOT_RUN=$NON_FORKED_AND_NON_ROBOT_RUN" >> "$GITHUB_OUTPUT"
|
||||
|
||||
build-and-push:
|
||||
name: Build and Push Multi-arch Image
|
||||
build_image:
|
||||
name: Build Container Image
|
||||
needs: prepare_ci_run
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ needs.prepare_ci_run.outputs.NON_FORKED_AND_NON_ROBOT_RUN == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
BRANCH: ${{ needs.prepare_ci_run.outputs.BRANCH }}
|
||||
DATETIME: ${{ needs.prepare_ci_run.outputs.DATETIME }}
|
||||
BUILD_TIME: ${{ needs.prepare_ci_run.outputs.BUILD_TIME }}
|
||||
GIT_SHA: ${{ needs.prepare_ci_run.outputs.GIT_SHA }}
|
||||
|
||||
RELEASE_REGISTRY: "localhost:5000/k8sgpt"
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY_IMAGE }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=dev-${{ env.DATETIME }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.K8SGPT_BOT_SECRET }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3
|
||||
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2
|
||||
|
||||
- name: Build and push multi-arch image
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6
|
||||
- name: Build Docker Image
|
||||
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v4
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64
|
||||
file: ./container/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
target: production
|
||||
tags: |
|
||||
${{ env.RELEASE_REGISTRY }}/${{ env.IMAGE_NAME }}:dev-${{ env.DATETIME }}
|
||||
build-args: |
|
||||
GIT_HASH=${{ env.GIT_SHA }}
|
||||
RELEASE_VERSION=dev-${{ env.DATETIME }}
|
||||
BUILD_TIME=${{ env.BUILD_TIME }}
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
push: false
|
||||
cache-from: type=gha,scope=${{ github.ref_name }}-${{ env.IMAGE_NAME }}
|
||||
cache-to: type=gha,scope=${{ github.ref_name }}-${{ env.IMAGE_NAME }}
|
||||
outputs: type=docker,dest=/tmp/${{ env.IMAGE_NAME }}-image.tar
|
||||
|
||||
- name: Upload image as artifact
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3
|
||||
with:
|
||||
name: ${{ env.IMAGE_NAME }}-image.tar
|
||||
path: /tmp/${{ env.IMAGE_NAME }}-image.tar
|
||||
|
||||
upload_images:
|
||||
name: Upload images to ghcr registry
|
||||
needs: [ prepare_ci_run, build_image ]
|
||||
if: github.event_name == 'push' && needs.prepare_ci_run.outputs.NON_FORKED_AND_NON_ROBOT_RUN == 'true' # only run on push to main/maintenance branches
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
DATETIME: ${{ needs.prepare_ci_run.outputs.DATETIME }}
|
||||
BUILD_TIME: ${{ needs.prepare_ci_run.outputs.BUILD_TIME }}
|
||||
GIT_SHA: ${{ needs.prepare_ci_run.outputs.GIT_SHA }}
|
||||
permissions:
|
||||
packages: write # Needed for pushing images to the registry
|
||||
contents: read # Needed for checking out the repository
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2
|
||||
with:
|
||||
registry: "ghcr.io"
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2
|
||||
|
||||
- name: Build Docker Image
|
||||
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v4
|
||||
with:
|
||||
context: .
|
||||
file: ./container/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
target: production
|
||||
tags: |
|
||||
${{ env.REGISTRY_IMAGE }}:${{ env.DATETIME }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
secrets: |
|
||||
GIT_AUTH_TOKEN=${{ secrets.K8SGPT_BOT_SECRET }}
|
||||
ghcr.io/k8sgpt-ai/${{ env.IMAGE_NAME }}:dev-${{ env.DATETIME }}
|
||||
build-args: |
|
||||
GIT_HASH=${{ env.GIT_SHA }}
|
||||
RELEASE_VERSION=dev-${{ env.DATETIME }}
|
||||
BUILD_TIME=${{ env.BUILD_TIME }}
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
push: true
|
||||
cache-from: type=gha,scope=${{ github.ref_name }}-${{ env.IMAGE_NAME }}
|
||||
cache-to: type=gha,scope=${{ github.ref_name }}-${{ env.IMAGE_NAME }}
|
||||
18
.github/workflows/golangci_lint.yaml
vendored
@@ -1,18 +0,0 @@
|
||||
name: Run golangci-lint
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8
|
||||
with:
|
||||
version: v2.1.0
|
||||
only-new-issues: true
|
||||
57
.github/workflows/release.yaml
vendored
@@ -23,9 +23,9 @@ jobs:
|
||||
# Release-please creates a PR that tracks all changes
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5
|
||||
|
||||
- uses: google-github-actions/release-please-action@e4dc86ba9405554aeba3c6bb2d169500e7d3b4ee # v4.1.1
|
||||
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3
|
||||
|
||||
- uses: google-github-actions/release-please-action@e0b9d1885d92e9a93d5ce8656de60e3b806e542c # v3
|
||||
id: release
|
||||
with:
|
||||
command: manifest
|
||||
@@ -36,36 +36,21 @@ jobs:
|
||||
if: needs.release-please.outputs.releases_created == 'true'
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
needs:
|
||||
- release-please
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
# this might remove tools that are actually needed,
|
||||
# if set to "true" but frees about 6 GB
|
||||
tool-cache: false
|
||||
# all of these default to true, but feel free to set to
|
||||
# "false" if necessary for your workflow
|
||||
android: false
|
||||
dotnet: false
|
||||
haskell: false
|
||||
large-packages: true
|
||||
docker-images: true
|
||||
swap-storage: true
|
||||
- name: Checkout
|
||||
uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5
|
||||
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4
|
||||
with:
|
||||
go-version: '~1.24'
|
||||
- name: Download Syft
|
||||
uses: anchore/sbom-action/download-syft@55dc4ee22412511ee8c3142cbea40418e6cec693 # v0.17.8
|
||||
go-version: '1.20'
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6
|
||||
uses: goreleaser/goreleaser-action@f82d6c1c344bcacabba2c841718984797f664a6b # v4
|
||||
with:
|
||||
# either 'goreleaser' (default) or 'goreleaser-pro'
|
||||
distribution: goreleaser
|
||||
@@ -73,41 +58,37 @@ jobs:
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.K8SGPT_BOT_SECRET }}
|
||||
SLACK_TOKEN: ${{ secrets.SLACK_TOKEN }}
|
||||
# - name: Update new version in krew-index
|
||||
# uses: rajatjindal/krew-release-bot@3d9faef30a82761d610544f62afddca00993eef9 # v0.0.47
|
||||
|
||||
build-container:
|
||||
if: needs.release-please.outputs.releases_created == 'true'
|
||||
needs:
|
||||
- release-please
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
id-token: write
|
||||
env:
|
||||
IMAGE_TAG: ghcr.io/k8sgpt-ai/k8sgpt:${{ needs.release-please.outputs.tag_name }}
|
||||
IMAGE_NAME: k8sgpt
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5
|
||||
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3
|
||||
uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2
|
||||
with:
|
||||
registry: "ghcr.io"
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.K8SGPT_BOT_SECRET }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build Docker Image
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6
|
||||
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v4
|
||||
with:
|
||||
context: .
|
||||
file: ./container/Dockerfile
|
||||
@@ -117,18 +98,18 @@ jobs:
|
||||
${{ env.IMAGE_TAG }}
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
push: true
|
||||
cache-from: type=gha,scope=${{ github.ref_name }}-${{ env.IMAGE_TAG }}
|
||||
cache-to: type=gha,scope=${{ github.ref_name }}-${{ env.IMAGE_TAG }}
|
||||
cache-from: type=gha,scope=${{ github.ref_name }}-${{ env.IMAGE_NAME }}
|
||||
cache-to: type=gha,scope=${{ github.ref_name }}-${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Generate SBOM
|
||||
uses: anchore/sbom-action@55dc4ee22412511ee8c3142cbea40418e6cec693 # v0.17.8
|
||||
uses: anchore/sbom-action@448520c4f19577ffce70a8317e619089054687e3 # v0.13.4
|
||||
with:
|
||||
image: ${{ env.IMAGE_TAG }}
|
||||
artifact-name: sbom-${{ env.IMAGE_NAME }}
|
||||
output-file: ./sbom-${{ env.IMAGE_NAME }}.spdx.json
|
||||
|
||||
- name: Attach SBOM to release
|
||||
uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v1
|
||||
with:
|
||||
tag_name: ${{ needs.release-please.outputs.tag_name }}
|
||||
files: ./sbom-${{ env.IMAGE_NAME }}.spdx.json
|
||||
files: ./sbom-${{ env.IMAGE_NAME }}.spdx.json
|
||||
4
.github/workflows/semantic_pr.yaml
vendored
@@ -10,13 +10,13 @@ defaults:
|
||||
shell: bash
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: read # Needed for checking out the repository
|
||||
pull-requests: read # Needed for reading prs
|
||||
steps:
|
||||
- name: Validate Pull Request
|
||||
uses: amannn/action-semantic-pull-request@fdd4d3ddf614fbcd8c29e4b106d3bbe0cb2c605d # v6.0.1
|
||||
uses: amannn/action-semantic-pull-request@c3cd5d1ea3580753008872425915e343e351ab54 # v5.2.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
30
.github/workflows/test.yaml
vendored
@@ -1,30 +0,0 @@
|
||||
name: Run tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
GO_VERSION: "~1.24"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Run test
|
||||
run: go test ./... -coverprofile=coverage.txt
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
7
.gitignore
vendored
@@ -1,10 +1,3 @@
|
||||
.idea
|
||||
__debug*
|
||||
.DS_Store
|
||||
k8sgpt*
|
||||
!charts/k8sgpt
|
||||
*.vscode
|
||||
dist/
|
||||
|
||||
bin/
|
||||
pkg/server/example/example
|
||||
@@ -1,4 +1,3 @@
|
||||
version: 2
|
||||
# This is an example .goreleaser.yml file with some sensible defaults.
|
||||
# Make sure to check the documentation at https://goreleaser.com
|
||||
before:
|
||||
@@ -15,32 +14,7 @@ builds:
|
||||
- windows
|
||||
- darwin
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X main.version={{.Version}}
|
||||
- -X main.commit={{.ShortCommit}}
|
||||
- -X main.Date={{.CommitDate}}
|
||||
|
||||
nfpms:
|
||||
- file_name_template: "{{ .ProjectName }}_{{ .Arch }}"
|
||||
maintainer: "K8sGPT Maintainers <contact@k8sgpt.ai>"
|
||||
homepage: https://k8sgpt.ai
|
||||
description: >-
|
||||
K8sGPT is a tool for scanning your kubernetes clusters, diagnosing and triaging issues in simple english. It has SRE experience codified into it’s analyzers and helps to pull out the most relevant information to enrich it with AI.
|
||||
license: "Apache-2.0"
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
- apk
|
||||
bindir: /usr/bin
|
||||
section: utils
|
||||
contents:
|
||||
- src: ./LICENSE
|
||||
dst: /usr/share/doc/k8sgpt/copyright
|
||||
file_info:
|
||||
mode: 0644
|
||||
|
||||
sboms:
|
||||
- artifacts: archive
|
||||
- -s -w -X main.version={{.Version}}
|
||||
|
||||
archives:
|
||||
- format: tar.gz
|
||||
@@ -54,44 +28,26 @@ archives:
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
- goos: windows
|
||||
format: zip
|
||||
|
||||
brews:
|
||||
- name: k8sgpt
|
||||
homepage: https://k8sgpt.ai
|
||||
repository:
|
||||
tap:
|
||||
owner: k8sgpt-ai
|
||||
name: homebrew-k8sgpt
|
||||
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
name_template: 'checksums.txt'
|
||||
|
||||
snapshot:
|
||||
name_template: "{{ incpatch .Version }}-next"
|
||||
|
||||
announce:
|
||||
slack:
|
||||
# Whether its enabled or not.
|
||||
#
|
||||
# Templates: allowed (since v2.6).
|
||||
enabled: true
|
||||
|
||||
# Message template to use while publishing.
|
||||
#
|
||||
# Default: '{{ .ProjectName }} {{ .Tag }} is out! Check it out at {{ .ReleaseURL }}'.
|
||||
# Templates: allowed.
|
||||
message_template: "{{ .ProjectName }} release {{.Tag}} is out!"
|
||||
|
||||
# The name of the channel that the user selected as a destination for webhook messages.
|
||||
channel: "#general"
|
||||
|
||||
# Set your Webhook's user name.
|
||||
username: "K8sGPT"
|
||||
|
||||
# Emoji to use as the icon for this message. Overrides icon_url.
|
||||
icon_emoji: ""
|
||||
|
||||
# URL to an image to use as the icon for this message.
|
||||
icon_url: ""
|
||||
changelog:
|
||||
skip: true
|
||||
|
||||
# The lines beneath this are called `modelines`. See `:help modeline`
|
||||
# Feel free to remove those if you don't want/use them.
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
||||
|
||||
110
.krew.yaml
@@ -1,110 +0,0 @@
|
||||
apiVersion: krew.googlecontainertools.github.com/v1alpha2
|
||||
kind: Plugin
|
||||
metadata:
|
||||
name: gpt
|
||||
spec:
|
||||
version: {{ .TagName }}
|
||||
homepage: https://github.com/k8sgpt-ai/k8sgpt
|
||||
shortDescription: "Giving Kubernetes Superpowers to everyone"
|
||||
description: |
|
||||
A tool for scanning your Kubernetes clusters, diagnosing, and triaging issues in simple English.
|
||||
platforms:
|
||||
##########
|
||||
# Darwin #
|
||||
##########
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/k8sgpt-ai/k8sgpt/releases/download/{{ .TagName }}/k8sgpt_Darwin_x86_64.tar.gz" .TagName | indent 6 }}
|
||||
files:
|
||||
- from: "k8sgpt"
|
||||
to: "kubectl-gpt"
|
||||
- from: "LICENSE"
|
||||
to: "."
|
||||
bin: kubectl-gpt
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: arm64
|
||||
{{addURIAndSha "https://github.com/k8sgpt-ai/k8sgpt/releases/download/{{ .TagName }}/k8sgpt_Darwin_arm64.tar.gz" .TagName | indent 6 }}
|
||||
files:
|
||||
- from: "k8sgpt"
|
||||
to: "kubectl-gpt"
|
||||
- from: "LICENSE"
|
||||
to: "."
|
||||
bin: kubectl-gpt
|
||||
|
||||
#########
|
||||
# Linux #
|
||||
#########
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/k8sgpt-ai/k8sgpt/releases/download/{{ .TagName }}/k8sgpt_Linux_x86_64.tar.gz" .TagName | indent 6 }}
|
||||
files:
|
||||
- from: "k8sgpt"
|
||||
to: "kubectl-gpt"
|
||||
- from: "LICENSE"
|
||||
to: "."
|
||||
bin: kubectl-gpt
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: arm64
|
||||
{{addURIAndSha "https://github.com/k8sgpt-ai/k8sgpt/releases/download/{{ .TagName }}/k8sgpt_Linux_arm64.tar.gz" .TagName | indent 6 }}
|
||||
files:
|
||||
- from: "k8sgpt"
|
||||
to: "kubectl-gpt"
|
||||
- from: "LICENSE"
|
||||
to: "."
|
||||
bin: kubectl-gpt
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: "386"
|
||||
{{addURIAndSha "https://github.com/k8sgpt-ai/k8sgpt/releases/download/{{ .TagName }}/k8sgpt_Linux_i386.tar.gz" .TagName | indent 6 }}
|
||||
files:
|
||||
- from: "k8sgpt"
|
||||
to: "kubectl-gpt"
|
||||
- from: "LICENSE"
|
||||
to: "."
|
||||
bin: kubectl-gpt
|
||||
|
||||
###########
|
||||
# Windows #
|
||||
###########
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/k8sgpt-ai/k8sgpt/releases/download/{{ .TagName }}/k8sgpt_Windows_x86_64.zip" .TagName | indent 6 }}
|
||||
files:
|
||||
- from: "k8sgpt"
|
||||
to: "kubectl-gpt"
|
||||
- from: "LICENSE"
|
||||
to: "."
|
||||
bin: kubectl-gpt
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: arm64
|
||||
{{addURIAndSha "https://github.com/k8sgpt-ai/k8sgpt/releases/download/{{ .TagName }}/k8sgpt_Windows_arm64.zip" .TagName | indent 6 }}
|
||||
files:
|
||||
- from: "k8sgpt"
|
||||
to: "kubectl-gpt"
|
||||
- from: "LICENSE"
|
||||
to: "."
|
||||
bin: kubectl-gpt
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: "386"
|
||||
{{addURIAndSha "https://github.com/k8sgpt-ai/k8sgpt/releases/download/{{ .TagName }}/k8sgpt_Windows_i386.zip" .TagName | indent 6 }}
|
||||
files:
|
||||
- from: "k8sgpt"
|
||||
to: "kubectl-gpt"
|
||||
- from: "LICENSE"
|
||||
to: "."
|
||||
bin: kubectl-gpt
|
||||
@@ -1 +1 @@
|
||||
{".":"0.4.28"}
|
||||
{".":"0.1.0"}
|
||||
2109
CHANGELOG.md
@@ -1,128 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
contact@k8sgpt.ai.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
@@ -2,7 +2,7 @@
|
||||
We're happy that you want to contribute to this project. Please read the sections to make the process as smooth as possible.
|
||||
|
||||
## Requirements
|
||||
- Golang `1.23`
|
||||
- Golang `1.20`
|
||||
- An OpenAI API key
|
||||
* OpenAI API keys can be obtained from [OpenAI](https://platform.openai.com/account/api-keys)
|
||||
* You can set the API key for k8sgpt using `./k8sgpt auth key`
|
||||
@@ -17,7 +17,7 @@ We're happy that you want to contribute to this project. Please read the section
|
||||
- We are also happy to help you find something to work on. Just reach out to us.
|
||||
|
||||
**Getting in touch with the community**
|
||||
* Join our [#k8sgpt slack channel](https://join.slack.com/t/k8sgpt/shared_invite/zt-1rwe5fpzq-VNtJK8DmYbbm~iWL1H34nw)
|
||||
* Join our [#k8sgpt slack channel](https://slack.cloud-native.io/channels/k8sgpt)
|
||||
* Introduce yourself on the slack channel or open an issue to let us know that you are interested in contributing
|
||||
|
||||
**Discuss issues**
|
||||
@@ -30,7 +30,7 @@ We're happy that you want to contribute to this project. Please read the section
|
||||
- Assign yourself to the issue, if you are working on it (if you are not a member of the organization, please leave a comment on the issue)
|
||||
- Make your changes
|
||||
- Keep pull requests small and focused, if you have multiple changes, please open multiple PRs
|
||||
- Create a pull request back to the upstream repository and follow the [pull request template](.github/pull_request_template.md) guidelines.
|
||||
- Create a pull request back to the upstream repository and follow follow the [pull request template](.github/pull_request_template.md) guidelines.
|
||||
- Wait for a review and address any comments
|
||||
|
||||
**Opening PRs**
|
||||
|
||||
215
LICENSE
@@ -1,202 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
Copyright (c) 2023 Alex Jones
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
1. Definitions.
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2023 The k8sgpt Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
482
MCP.md
@@ -1,482 +0,0 @@
|
||||
# K8sGPT Model Context Protocol (MCP) Server
|
||||
|
||||
K8sGPT provides a Model Context Protocol (MCP) server that exposes Kubernetes cluster operations as standardized tools, resources, and prompts for AI assistants like Claude, ChatGPT, and other MCP-compatible clients.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [What is MCP?](#what-is-mcp)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Server Modes](#server-modes)
|
||||
- [Available Tools](#available-tools)
|
||||
- [Available Resources](#available-resources)
|
||||
- [Available Prompts](#available-prompts)
|
||||
- [Usage Examples](#usage-examples)
|
||||
- [Integration with AI Assistants](#integration-with-ai-assistants)
|
||||
- [HTTP API Reference](#http-api-reference)
|
||||
|
||||
## What is MCP?
|
||||
|
||||
The Model Context Protocol (MCP) is an open standard that enables AI assistants to securely connect to external data sources and tools. K8sGPT's MCP server exposes Kubernetes operations through this standardized interface, allowing AI assistants to:
|
||||
|
||||
- Analyze cluster health and issues
|
||||
- Query Kubernetes resources
|
||||
- Access pod logs and events
|
||||
- Get troubleshooting guidance
|
||||
- Manage analyzer filters
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Start the MCP Server
|
||||
|
||||
**Stdio mode (for local AI assistants):**
|
||||
```bash
|
||||
k8sgpt serve --mcp
|
||||
```
|
||||
|
||||
**HTTP mode (for network access):**
|
||||
```bash
|
||||
k8sgpt serve --mcp --mcp-http --mcp-port 8089
|
||||
```
|
||||
|
||||
### Test with curl
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8089/mcp \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "tools/list"
|
||||
}'
|
||||
```
|
||||
|
||||
## Server Modes
|
||||
|
||||
### Stdio Mode (Default)
|
||||
|
||||
Used by local AI assistants like Claude Desktop:
|
||||
|
||||
```bash
|
||||
k8sgpt serve --mcp
|
||||
```
|
||||
|
||||
Configure in your MCP client (e.g., Claude Desktop's `claude_desktop_config.json`):
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"k8sgpt": {
|
||||
"command": "k8sgpt",
|
||||
"args": ["serve", "--mcp"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### HTTP Mode
|
||||
|
||||
Used for network access and webhooks:
|
||||
|
||||
```bash
|
||||
k8sgpt serve --mcp --mcp-http --mcp-port 8089
|
||||
```
|
||||
|
||||
The server runs in stateless mode, so no session management is required. Each request is independent.
|
||||
|
||||
## Available Tools
|
||||
|
||||
The MCP server exposes 12 tools for Kubernetes operations:
|
||||
|
||||
### Cluster Analysis
|
||||
|
||||
**analyze**
|
||||
- Analyze Kubernetes resources for issues and problems
|
||||
- Parameters:
|
||||
- `namespace` (optional): Namespace to analyze
|
||||
- `explain` (optional): Get AI explanations for issues
|
||||
- `filters` (optional): Comma-separated list of analyzers to use
|
||||
|
||||
**cluster-info**
|
||||
- Get Kubernetes cluster information and version
|
||||
|
||||
### Resource Management
|
||||
|
||||
**list-resources**
|
||||
- List Kubernetes resources of a specific type
|
||||
- Parameters:
|
||||
- `resourceType` (required): Type of resource (pods, deployments, services, nodes, jobs, cronjobs, statefulsets, daemonsets, replicasets, configmaps, secrets, ingresses, pvcs, pvs)
|
||||
- `namespace` (optional): Namespace to query
|
||||
- `labelSelector` (optional): Label selector for filtering
|
||||
|
||||
**get-resource**
|
||||
- Get detailed information about a specific Kubernetes resource
|
||||
- Parameters:
|
||||
- `resourceType` (required): Type of resource
|
||||
- `name` (required): Resource name
|
||||
- `namespace` (optional): Namespace
|
||||
|
||||
**list-namespaces**
|
||||
- List all namespaces in the cluster
|
||||
|
||||
### Debugging and Troubleshooting
|
||||
|
||||
**get-logs**
|
||||
- Get logs from a pod container
|
||||
- Parameters:
|
||||
- `podName` (required): Name of the pod
|
||||
- `namespace` (optional): Namespace
|
||||
- `container` (optional): Container name
|
||||
- `tail` (optional): Number of lines to show
|
||||
- `previous` (optional): Show logs from previous container instance
|
||||
- `sinceSeconds` (optional): Show logs from last N seconds
|
||||
|
||||
**list-events**
|
||||
- List Kubernetes events for debugging
|
||||
- Parameters:
|
||||
- `namespace` (optional): Namespace to query
|
||||
- `involvedObjectName` (optional): Filter by object name
|
||||
- `involvedObjectKind` (optional): Filter by object kind
|
||||
|
||||
### Analyzer Management
|
||||
|
||||
**list-filters**
|
||||
- List all available and active analyzers/filters
|
||||
|
||||
**add-filters**
|
||||
- Add filters to enable specific analyzers
|
||||
- Parameters:
|
||||
- `filters` (required): Comma-separated list of analyzer names
|
||||
|
||||
**remove-filters**
|
||||
- Remove filters to disable specific analyzers
|
||||
- Parameters:
|
||||
- `filters` (required): Comma-separated list of analyzer names
|
||||
|
||||
### Integrations
|
||||
|
||||
**list-integrations**
|
||||
- List available integrations (Prometheus, AWS, Keda, Kyverno, etc.)
|
||||
|
||||
### Configuration
|
||||
|
||||
**config**
|
||||
- Configure K8sGPT settings including custom analyzers and cache
|
||||
|
||||
## Available Resources
|
||||
|
||||
Resources provide read-only access to cluster information:
|
||||
|
||||
**cluster-info**
|
||||
- URI: `cluster-info`
|
||||
- Get information about the Kubernetes cluster
|
||||
|
||||
**namespaces**
|
||||
- URI: `namespaces`
|
||||
- List all namespaces in the cluster
|
||||
|
||||
**active-filters**
|
||||
- URI: `active-filters`
|
||||
- Get currently active analyzers/filters
|
||||
|
||||
## Available Prompts
|
||||
|
||||
Prompts provide guided troubleshooting workflows:
|
||||
|
||||
**troubleshoot-pod**
|
||||
- Interactive pod debugging workflow
|
||||
- Arguments:
|
||||
- `podName` (required): Name of the pod to troubleshoot
|
||||
- `namespace` (required): Namespace of the pod
|
||||
|
||||
**troubleshoot-deployment**
|
||||
- Interactive deployment debugging workflow
|
||||
- Arguments:
|
||||
- `deploymentName` (required): Name of the deployment
|
||||
- `namespace` (required): Namespace of the deployment
|
||||
|
||||
**troubleshoot-cluster**
|
||||
- General cluster troubleshooting workflow
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Example 1: Analyze a Namespace
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8089/mcp \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "tools/call",
|
||||
"params": {
|
||||
"name": "analyze",
|
||||
"arguments": {
|
||||
"namespace": "production",
|
||||
"explain": "true"
|
||||
}
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
### Example 2: List Pods
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8089/mcp \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 2,
|
||||
"method": "tools/call",
|
||||
"params": {
|
||||
"name": "list-resources",
|
||||
"arguments": {
|
||||
"resourceType": "pods",
|
||||
"namespace": "default"
|
||||
}
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
### Example 3: Get Pod Logs
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8089/mcp \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 3,
|
||||
"method": "tools/call",
|
||||
"params": {
|
||||
"name": "get-logs",
|
||||
"arguments": {
|
||||
"podName": "nginx-abc123",
|
||||
"namespace": "default",
|
||||
"tail": "100"
|
||||
}
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
### Example 4: Access a Resource
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8089/mcp \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 4,
|
||||
"method": "resources/read",
|
||||
"params": {
|
||||
"uri": "namespaces"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
### Example 5: Get a Troubleshooting Prompt
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8089/mcp \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 5,
|
||||
"method": "prompts/get",
|
||||
"params": {
|
||||
"name": "troubleshoot-pod",
|
||||
"arguments": {
|
||||
"podName": "nginx-abc123",
|
||||
"namespace": "default"
|
||||
}
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
## Integration with AI Assistants
|
||||
|
||||
### Claude Desktop
|
||||
|
||||
Add to `claude_desktop_config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"k8sgpt": {
|
||||
"command": "k8sgpt",
|
||||
"args": ["serve", "--mcp"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Restart Claude Desktop and you'll see k8sgpt tools available in the tool selector.
|
||||
|
||||
### Custom MCP Clients
|
||||
|
||||
Any MCP-compatible client can connect to the k8sgpt server. For HTTP-based clients:
|
||||
|
||||
1. Start the server: `k8sgpt serve --mcp --mcp-http --mcp-port 8089`
|
||||
2. Connect to: `http://localhost:8089/mcp`
|
||||
3. Use standard MCP protocol methods: `tools/list`, `tools/call`, `resources/read`, `prompts/get`
|
||||
|
||||
## HTTP API Reference
|
||||
|
||||
### Endpoint
|
||||
|
||||
```
|
||||
POST http://localhost:8089/mcp
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
### Request Format
|
||||
|
||||
All requests follow the JSON-RPC 2.0 format:
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "method_name",
|
||||
"params": {
|
||||
...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Discovery Methods
|
||||
|
||||
**List Tools**
|
||||
```json
|
||||
{"jsonrpc": "2.0", "id": 1, "method": "tools/list"}
|
||||
```
|
||||
|
||||
**List Resources**
|
||||
```json
|
||||
{"jsonrpc": "2.0", "id": 2, "method": "resources/list"}
|
||||
```
|
||||
|
||||
**List Prompts**
|
||||
```json
|
||||
{"jsonrpc": "2.0", "id": 3, "method": "prompts/list"}
|
||||
```
|
||||
|
||||
### Tool Invocation
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 4,
|
||||
"method": "tools/call",
|
||||
"params": {
|
||||
"name": "tool_name",
|
||||
"arguments": {
|
||||
"arg1": "value1",
|
||||
"arg2": "value2"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Resource Access
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 5,
|
||||
"method": "resources/read",
|
||||
"params": {
|
||||
"uri": "resource_uri"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Prompt Access
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 6,
|
||||
"method": "prompts/get",
|
||||
"params": {
|
||||
"name": "prompt_name",
|
||||
"arguments": {
|
||||
"arg1": "value1"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Response Format
|
||||
|
||||
Successful responses:
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": {
|
||||
...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Error responses:
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"error": {
|
||||
"code": -32600,
|
||||
"message": "Error description"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Custom Port
|
||||
|
||||
```bash
|
||||
k8sgpt serve --mcp --mcp-http --mcp-port 9000
|
||||
```
|
||||
|
||||
### With Specific Backend
|
||||
|
||||
```bash
|
||||
k8sgpt serve --mcp --backend openai
|
||||
```
|
||||
|
||||
### With Kubeconfig
|
||||
|
||||
```bash
|
||||
k8sgpt serve --mcp --kubeconfig ~/.kube/config
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Connection Issues
|
||||
|
||||
Verify the server is running:
|
||||
```bash
|
||||
curl http://localhost:8089/mcp
|
||||
```
|
||||
|
||||
### Permission Issues
|
||||
|
||||
Ensure your kubeconfig has appropriate cluster access:
|
||||
```bash
|
||||
kubectl cluster-info
|
||||
```
|
||||
|
||||
### Tool Errors
|
||||
|
||||
List available tools to verify names:
|
||||
```bash
|
||||
curl -X POST http://localhost:8089/mcp \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"jsonrpc": "2.0", "id": 1, "method": "tools/list"}'
|
||||
```
|
||||
|
||||
## Learn More
|
||||
|
||||
- [MCP Specification](https://modelcontextprotocol.io/)
|
||||
- [K8sGPT Documentation](https://docs.k8sgpt.ai/)
|
||||
- [MCP Go Library](https://github.com/mark3labs/mcp-go)
|
||||
163
Makefile
@@ -1,163 +0,0 @@
|
||||
# Copyright 2023 K8sgpt AI. All rights reserved.
|
||||
# Use of this source code is governed by a MIT style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# ==============================================================================
|
||||
# define the default goal
|
||||
#
|
||||
ROOT_PACKAGE=github.com/k8sgpt-ai/k8sgpt
|
||||
|
||||
SHELL := /bin/bash
|
||||
DIRS=$(shell ls)
|
||||
GO=go
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
# include the common makefile
|
||||
COMMON_SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST)))
|
||||
# ROOT_DIR: root directory of the code base
|
||||
ifeq ($(origin ROOT_DIR),undefined)
|
||||
ROOT_DIR := $(abspath $(shell cd $(COMMON_SELF_DIR)/. && pwd -P))
|
||||
endif
|
||||
# OUTPUT_DIR: The directory where the build output is stored.
|
||||
ifeq ($(origin OUTPUT_DIR),undefined)
|
||||
OUTPUT_DIR := $(ROOT_DIR)/bin
|
||||
$(shell mkdir -p $(OUTPUT_DIR))
|
||||
endif
|
||||
|
||||
ifeq ($(origin VERSION), undefined)
|
||||
VERSION := $(shell git describe --abbrev=0 --dirty --always --tags | sed 's/-/./g')
|
||||
endif
|
||||
|
||||
# Check if the tree is dirty. default to dirty(maybe u should commit?)
|
||||
GIT_TREE_STATE:="dirty"
|
||||
ifeq (, $(shell git status --porcelain 2>/dev/null))
|
||||
GIT_TREE_STATE="clean"
|
||||
endif
|
||||
GIT_COMMIT:=$(shell git rev-parse HEAD)
|
||||
|
||||
IMG ?= ghcr.io/k8sgpt-ai/k8sgpt:latest
|
||||
|
||||
BUILDFILE = "./main.go"
|
||||
BUILDAPP = "$(OUTPUT_DIR)/k8sgpt"
|
||||
|
||||
.PHONY: all
|
||||
all: tidy add-copyright lint cover build
|
||||
|
||||
# ==============================================================================
|
||||
# Targets
|
||||
|
||||
## build: Build binaries by default
|
||||
.PHONY: build
|
||||
build:
|
||||
@echo "$(shell go version)"
|
||||
@echo "===========> Building binary $(BUILDAPP) *[Git Info]: $(VERSION)-$(GIT_COMMIT)"
|
||||
@export CGO_ENABLED=0 && go build -o $(BUILDAPP) -ldflags "-s -w -X main.version=dev -X main.commit=$$(git rev-parse --short HEAD) -X main.date=$$(date +%FT%TZ)" $(BUILDFILE)
|
||||
|
||||
## tidy: tidy go.mod
|
||||
.PHONY: tidy
|
||||
tidy:
|
||||
@$(GO) mod tidy
|
||||
|
||||
## deploy: Deploy k8sgpt
|
||||
.PHONY: deploy
|
||||
deploy: helm
|
||||
@echo "===========> Deploying k8sgpt"
|
||||
$(HELM) install k8sgpt charts/k8sgpt -n k8sgpt --create-namespace
|
||||
|
||||
## update: Update k8sgpt
|
||||
.PHONY: update
|
||||
update: helm
|
||||
@echo "===========> Updating k8sgpt"
|
||||
$(HELM) upgrade k8sgpt charts/k8sgpt -n k8sgpt
|
||||
|
||||
## undeploy: Undeploy k8sgpt
|
||||
.PHONY: undeploy
|
||||
undeploy: helm
|
||||
@echo "===========> Undeploying k8sgpt"
|
||||
$(HELM) uninstall k8sgpt -n k8sgpt
|
||||
|
||||
## docker-build: Build docker image
|
||||
.PHONY: docker-build
|
||||
docker-build:
|
||||
@echo "===========> Building docker image"
|
||||
docker buildx build --build-arg=VERSION="$$(git describe --tags --abbrev=0)" --build-arg=COMMIT="$$(git rev-parse --short HEAD)" --build-arg DATE="$$(date +%FT%TZ)" --platform="linux/amd64,linux/arm64" -t ${IMG} -f container/Dockerfile . --push
|
||||
|
||||
## docker-build-local: Build docker image for local testing
|
||||
.PHONY: docker-build-local
|
||||
docker-build-local:
|
||||
@echo "===========> Building docker image for local testing"
|
||||
docker build --build-arg=VERSION="$$(git describe --tags --abbrev=0)" --build-arg=COMMIT="$$(git rev-parse --short HEAD)" --build-arg DATE="$$(date +%FT%TZ)" -t k8sgpt:local -f container/Dockerfile .
|
||||
|
||||
## fmt: Run go fmt against code.
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
@$(GO) fmt ./...
|
||||
|
||||
## vet: Run go vet against code.
|
||||
.PHONY: vet
|
||||
vet:
|
||||
@$(GO) vet ./...
|
||||
|
||||
## lint: Run go lint against code.
|
||||
.PHONY: lint
|
||||
lint:
|
||||
@golangci-lint run -v --timeout=5m ./...
|
||||
|
||||
## style: Code style -> fmt,vet,lint
|
||||
.PHONY: style
|
||||
style: fmt vet lint
|
||||
|
||||
## test: Run unit test
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "===========> Run unit test"
|
||||
@$(GO) test ./...
|
||||
|
||||
## cover: Run unit test with coverage
|
||||
.PHONY: cover
|
||||
cover: test
|
||||
@$(GO) test -cover
|
||||
|
||||
## go.clean: Clean all builds
|
||||
.PHONY: clean
|
||||
clean:
|
||||
@echo "===========> Cleaning all builds OUTPUT_DIR($(OUTPUT_DIR))"
|
||||
@-rm -vrf $(OUTPUT_DIR)
|
||||
@echo "===========> End clean..."
|
||||
|
||||
## help: Show this help info.
|
||||
.PHONY: help
|
||||
help: Makefile
|
||||
@printf "\n\033[1mUsage: make <TARGETS> ...\033[0m\n\n\\033[1mTargets:\\033[0m\n\n"
|
||||
@sed -n 's/^##//p' $< | awk -F':' '{printf "\033[36m%-28s\033[0m %s\n", $$1, $$2}' | sed -e 's/^/ /'
|
||||
|
||||
## copyright.verify: Validate boilerplate headers for assign files
|
||||
.PHONY: copyright.verify
|
||||
copyright.verify: tools.verify.addlicense
|
||||
@echo "===========> Validate boilerplate headers for assign files starting in the $(ROOT_DIR) directory"
|
||||
# @addlicense -v -check -ignore **/test/** -f $(LICENSE_TEMPLATE) $(CODE_DIRS)
|
||||
@echo "===========> End of boilerplate headers check..."
|
||||
|
||||
## copyright.add: Add the boilerplate headers for all files
|
||||
.PHONY: copyright.add
|
||||
copyright.add: tools.verify.addlicense
|
||||
@echo "===========> Adding $(LICENSE_TEMPLATE) the boilerplate headers for all files"
|
||||
# @addlicense -y $(shell date +"%Y") -v -c "K8sgpt AI." -f $(LICENSE_TEMPLATE) $(CODE_DIRS)
|
||||
@echo "===========> End the copyright is added..."
|
||||
|
||||
# =====
|
||||
# Tools
|
||||
|
||||
HELM_VERSION ?= v3.11.3
|
||||
|
||||
helm:
|
||||
if ! test -f $(OUTPUT_DIR)/helm-$(GOOS)-$(GOARCH); then \
|
||||
curl -L https://get.helm.sh/helm-$(HELM_VERSION)-$(GOOS)-$(GOARCH).tar.gz | tar xz; \
|
||||
mv $(GOOS)-$(GOARCH)/helm $(OUTPUT_DIR)/helm-$(GOOS)-$(GOARCH); \
|
||||
chmod +x $(OUTPUT_DIR)/helm-$(GOOS)-$(GOARCH); \
|
||||
rm -rf ./$(GOOS)-$(GOARCH)/; \
|
||||
fi
|
||||
HELM=$(OUTPUT_DIR)/helm-$(GOOS)-$(GOARCH)
|
||||
735
README.md
@@ -2,744 +2,101 @@
|
||||
<source media="(prefers-color-scheme: dark)" srcset="./images/banner-white.png" width="600px;">
|
||||
<img alt="Text changing depending on mode. Light: 'So light!' Dark: 'So dark!'" src="./images/banner-black.png" width="600px;">
|
||||
</picture>
|
||||
<br/>
|
||||
|
||||

|
||||

|
||||

|
||||
[](https://bestpractices.coreinfrastructure.org/projects/7272)
|
||||
[](https://docs.k8sgpt.ai/)
|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fk8sgpt-ai%2Fk8sgpt?ref=badge_shield)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
[](https://github.com/k8sgpt-ai/k8sgpt)
|
||||
[](https://codecov.io/github/k8sgpt-ai/k8sgpt)
|
||||

|
||||
`k8sgpt` is a tool for scanning your kubernetes clusters, diagnosing and triaging issues in simple english.
|
||||
|
||||
`k8sgpt` is a tool for scanning your Kubernetes clusters, diagnosing, and triaging issues in simple English.
|
||||
It has SRE experience codified into it's analyzers and helps to pull out the most relevent information to enrich it with AI.
|
||||
|
||||
It has SRE experience codified into its analyzers and helps to pull out the most relevant information to enrich it with AI.
|
||||
## Quick Start
|
||||
|
||||
_Out of the box integration with OpenAI, Azure, Cohere, Amazon Bedrock, Google Gemini and local models._
|
||||
|
||||
<a href="https://www.producthunt.com/posts/k8sgpt?utm_source=badge-featured&utm_medium=badge&utm_souce=badge-k8sgpt" target="_blank"><img src="https://api.producthunt.com/widgets/embed-image/v1/featured.svg?post_id=389489&theme=light" alt="K8sGPT - K8sGPT gives Kubernetes Superpowers to everyone | Product Hunt" style="width: 250px; height: 54px;" width="250" height="54" /></a> <a href="https://hellogithub.com/repository/9dfe44c18dfb4d6fa0181baf8b2cf2e1" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=9dfe44c18dfb4d6fa0181baf8b2cf2e1&claim_uid=gqG4wmzkMrP0eFy" alt="Featured|HelloGitHub" style="width: 250px; height: 54px;" width="250" height="54" /></a>
|
||||
|
||||
|
||||
<img src="images/demo4.gif" width="650px">
|
||||
|
||||
# Table of Contents
|
||||
- [Overview](#k8sgpt)
|
||||
- [Installation](#cli-installation)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Analyzers](#analyzers)
|
||||
- [Examples](#examples)
|
||||
- [LLM AI Backends](#llm-ai-backends)
|
||||
- [Key Features](#key-features)
|
||||
- [Model Context Protocol (MCP)](#model-context-protocol-mcp)
|
||||
- [Documentation](#documentation)
|
||||
- [Contributing](#contributing)
|
||||
- [Community](#community)
|
||||
- [License](#license)
|
||||
|
||||
# CLI Installation
|
||||
|
||||
### Linux/Mac via brew
|
||||
|
||||
```sh
|
||||
brew install k8sgpt
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```sh
|
||||
brew tap k8sgpt-ai/k8sgpt
|
||||
brew install k8sgpt
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>RPM-based installation (RedHat/CentOS/Fedora)</summary>
|
||||
* Currently the default AI provider is OpenAI, you will need to generate an API key from [OpenAI](https://openai.com)
|
||||
* You can do this by running `k8sgpt generate` to open a browser link to generate it
|
||||
* Run `k8sgpt auth` to set it in k8sgpt.
|
||||
* Run `k8sgpt analyze` to run a scan.
|
||||
* And use `k8sgpt analyze --explain` to get a more detailed explanation of the issues.
|
||||
|
||||
**32 bit:**
|
||||
|
||||
<!---x-release-please-start-version-->
|
||||
|
||||
```
|
||||
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.28/k8sgpt_386.rpm
|
||||
```
|
||||
<!---x-release-please-end-->
|
||||
|
||||
**64 bit:**
|
||||
|
||||
<!---x-release-please-start-version-->
|
||||
```
|
||||
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.28/k8sgpt_amd64.rpm
|
||||
```
|
||||
<!---x-release-please-end-->
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>DEB-based installation (Ubuntu/Debian)</summary>
|
||||
|
||||
**32 bit:**
|
||||
|
||||
<!---x-release-please-start-version-->
|
||||
|
||||
```
|
||||
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.28/k8sgpt_386.deb
|
||||
sudo dpkg -i k8sgpt_386.deb
|
||||
```
|
||||
|
||||
<!---x-release-please-end-->
|
||||
|
||||
**64 bit:**
|
||||
|
||||
<!---x-release-please-start-version-->
|
||||
|
||||
```
|
||||
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.28/k8sgpt_amd64.deb
|
||||
sudo dpkg -i k8sgpt_amd64.deb
|
||||
```
|
||||
|
||||
<!---x-release-please-end-->
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>APK-based installation (Alpine)</summary>
|
||||
|
||||
**32 bit:**
|
||||
|
||||
<!---x-release-please-start-version-->
|
||||
```
|
||||
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.28/k8sgpt_386.apk
|
||||
apk add --allow-untrusted k8sgpt_386.apk
|
||||
```
|
||||
<!---x-release-please-end-->
|
||||
|
||||
**64 bit:**
|
||||
|
||||
<!---x-release-please-start-version-->
|
||||
```
|
||||
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.28/k8sgpt_amd64.apk
|
||||
apk add --allow-untrusted k8sgpt_amd64.apk
|
||||
```
|
||||
<!---x-release-please-end-->
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Failing Installation on WSL or Linux (missing gcc)</summary>
|
||||
When installing Homebrew on WSL or Linux, you may encounter the following error:
|
||||
|
||||
```
|
||||
==> Installing k8sgpt from k8sgpt-ai/k8sgpt Error: The following formula cannot be installed from a bottle and must be
|
||||
built from the source. k8sgpt Install Clang or run brew install gcc.
|
||||
```
|
||||
|
||||
If you install gcc as suggested, the problem will persist. Therefore, you need to install the build-essential package.
|
||||
|
||||
```
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Windows
|
||||
|
||||
- Download the latest Windows binaries of **k8sgpt** from the [Release](https://github.com/k8sgpt-ai/k8sgpt/releases)
|
||||
tab based on your system architecture.
|
||||
- Extract the downloaded package to your desired location. Configure the system _PATH_ environment variable with the binary location
|
||||
|
||||
## Operator Installation
|
||||
|
||||
To install within a Kubernetes cluster please use our `k8sgpt-operator` with installation instructions available [here](https://github.com/k8sgpt-ai/k8sgpt-operator)
|
||||
|
||||
_This mode of operation is ideal for continuous monitoring of your cluster and can integrate with your existing monitoring such as Prometheus and Alertmanager._
|
||||
|
||||
## Quick Start
|
||||
|
||||
- Currently, the default AI provider is OpenAI, you will need to generate an API key from [OpenAI](https://openai.com)
|
||||
- You can do this by running `k8sgpt generate` to open a browser link to generate it
|
||||
- Run `k8sgpt auth add` to set it in k8sgpt.
|
||||
- You can provide the password directly using the `--password` flag.
|
||||
- Run `k8sgpt filters` to manage the active filters used by the analyzer. By default, all filters are executed during analysis.
|
||||
- Run `k8sgpt analyze` to run a scan.
|
||||
- And use `k8sgpt analyze --explain` to get a more detailed explanation of the issues.
|
||||
- You also run `k8sgpt analyze --with-doc` (with or without the explain flag) to get the official documentation from Kubernetes.
|
||||
|
||||
# Using with Claude Desktop
|
||||
|
||||
K8sGPT can be integrated with Claude Desktop to provide AI-powered Kubernetes cluster analysis. This integration requires K8sGPT v0.4.14 or later.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Install K8sGPT v0.4.14 or later:
|
||||
```sh
|
||||
brew install k8sgpt
|
||||
```
|
||||
|
||||
2. Install Claude Desktop from the official website
|
||||
|
||||
3. Configure K8sGPT with your preferred AI backend:
|
||||
```sh
|
||||
k8sgpt auth
|
||||
```
|
||||
|
||||
## Setup
|
||||
|
||||
1. Start the K8sGPT MCP server:
|
||||
```sh
|
||||
k8sgpt serve --mcp
|
||||
```
|
||||
|
||||
2. In Claude Desktop:
|
||||
- Open Settings
|
||||
- Navigate to the Integrations section
|
||||
- Add K8sGPT as a new integration
|
||||
- The MCP server will be automatically detected
|
||||
|
||||
3. Configure Claude Desktop with the following JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"k8sgpt": {
|
||||
"command": "k8sgpt",
|
||||
"args": [
|
||||
"serve",
|
||||
"--mcp"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Once connected, you can use Claude Desktop to:
|
||||
- Analyze your Kubernetes cluster
|
||||
- Get detailed insights about cluster health
|
||||
- Receive recommendations for fixing issues
|
||||
- Query cluster information
|
||||
|
||||
Example commands in Claude Desktop:
|
||||
- "Analyze my Kubernetes cluster"
|
||||
- "What's the health status of my cluster?"
|
||||
- "Show me any issues in the default namespace"
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter connection issues:
|
||||
1. Ensure K8sGPT is running with the MCP server enabled
|
||||
2. Verify your Kubernetes cluster is accessible
|
||||
3. Check that your AI backend is properly configured
|
||||
4. Restart both K8sGPT and Claude Desktop
|
||||
|
||||
For more information, visit our [documentation](https://docs.k8sgpt.ai).
|
||||
<img src="images/demo4.gif" width=650px; />
|
||||
|
||||
## Analyzers
|
||||
|
||||
K8sGPT uses analyzers to triage and diagnose issues in your cluster. It has a set of analyzers that are built in, but
|
||||
you will be able to write your own analyzers.
|
||||
K8sGPT uses analyzers to triage and diagnose issues in your cluster. It has a set of analyzers that are built in, but you will be able to write your own analyzers.
|
||||
|
||||
### Built in analyzers
|
||||
|
||||
#### Enabled by default
|
||||
|
||||
- [x] podAnalyzer
|
||||
- [x] pvcAnalyzer
|
||||
- [x] rsAnalyzer
|
||||
- [x] serviceAnalyzer
|
||||
- [x] eventAnalyzer
|
||||
- [x] ingressAnalyzer
|
||||
- [x] statefulSetAnalyzer
|
||||
- [x] deploymentAnalyzer
|
||||
- [x] jobAnalyzer
|
||||
- [x] cronJobAnalyzer
|
||||
- [x] nodeAnalyzer
|
||||
- [x] mutatingWebhookAnalyzer
|
||||
- [x] validatingWebhookAnalyzer
|
||||
- [x] configMapAnalyzer
|
||||
|
||||
#### Optional
|
||||
## Usage
|
||||
|
||||
- [x] hpaAnalyzer
|
||||
- [x] pdbAnalyzer
|
||||
- [x] networkPolicyAnalyzer
|
||||
- [x] gatewayClass
|
||||
- [x] gateway
|
||||
- [x] httproute
|
||||
- [x] logAnalyzer
|
||||
- [x] storageAnalyzer
|
||||
- [x] securityAnalyzer
|
||||
- [x] CatalogSource
|
||||
- [x] ClusterCatalog
|
||||
- [x] ClusterExtension
|
||||
- [x] ClusterService
|
||||
- [x] ClusterServiceVersion
|
||||
- [x] OperatorGroup
|
||||
- [x] InstallPlan
|
||||
- [x] Subscription
|
||||
```
|
||||
Usage:
|
||||
k8sgpt [command]
|
||||
|
||||
## Examples
|
||||
Available Commands:
|
||||
analyze This command will find problems within your Kubernetes cluster
|
||||
auth Authenticate with your chosen backend
|
||||
completion Generate the autocompletion script for the specified shell
|
||||
generate Generate Key for your chosen backend (opens browser)
|
||||
help Help about any command
|
||||
version Print the version number of k8sgpt
|
||||
|
||||
Flags:
|
||||
--config string config file (default is $HOME/.k8sgpt.git.yaml)
|
||||
-h, --help help for k8sgpt
|
||||
--kubeconfig string Path to a kubeconfig. Only required if out-of-cluster.
|
||||
--master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.
|
||||
-t, --toggle Help message for toggle
|
||||
|
||||
Use "k8sgpt [command] --help" for more information about a command.
|
||||
```
|
||||
|
||||
_Run a scan with the default analyzers_
|
||||
|
||||
```
|
||||
k8sgpt generate
|
||||
k8sgpt auth add
|
||||
k8sgpt auth
|
||||
k8sgpt analyze --explain
|
||||
k8sgpt analyze --explain --with-doc
|
||||
```
|
||||
|
||||
_Filter on resource_
|
||||
|
||||
```
|
||||
k8sgpt analyze --explain --filter=Service
|
||||
```
|
||||
|
||||
_Filter by namespace_
|
||||
|
||||
```
|
||||
k8sgpt analyze --explain --filter=Pod --namespace=default
|
||||
k8sgpt analyze --explain --resource=Service
|
||||
```
|
||||
|
||||
_Output to JSON_
|
||||
|
||||
```
|
||||
k8sgpt analyze --explain --filter=Service --output=json
|
||||
k8sgpt analyze --explain --resource=Service --output=json
|
||||
```
|
||||
|
||||
_Anonymize during explain_
|
||||
## Upcoming major milestones
|
||||
|
||||
```
|
||||
k8sgpt analyze --explain --filter=Service --output=json --anonymize
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary> Using filters </summary>
|
||||
|
||||
_List filters_
|
||||
|
||||
```
|
||||
k8sgpt filters list
|
||||
```
|
||||
|
||||
_Add default filters_
|
||||
|
||||
```
|
||||
k8sgpt filters add [filter(s)]
|
||||
```
|
||||
|
||||
### Examples :
|
||||
|
||||
- Simple filter : `k8sgpt filters add Service`
|
||||
- Multiple filters : `k8sgpt filters add Ingress,Pod`
|
||||
|
||||
_Remove default filters_
|
||||
|
||||
```
|
||||
k8sgpt filters remove [filter(s)]
|
||||
```
|
||||
|
||||
### Examples :
|
||||
|
||||
- Simple filter : `k8sgpt filters remove Service`
|
||||
- Multiple filters : `k8sgpt filters remove Ingress,Pod`
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary> Additional commands </summary>
|
||||
|
||||
_List configured backends_
|
||||
|
||||
```
|
||||
k8sgpt auth list
|
||||
```
|
||||
|
||||
_Update configured backends_
|
||||
|
||||
```
|
||||
k8sgpt auth update $MY_BACKEND1,$MY_BACKEND2..
|
||||
```
|
||||
|
||||
_Remove configured backends_
|
||||
|
||||
```
|
||||
k8sgpt auth remove -b $MY_BACKEND1,$MY_BACKEND2..
|
||||
```
|
||||
|
||||
_List integrations_
|
||||
|
||||
```
|
||||
k8sgpt integrations list
|
||||
```
|
||||
|
||||
_Activate integrations_
|
||||
|
||||
```
|
||||
k8sgpt integrations activate [integration(s)]
|
||||
```
|
||||
|
||||
_Use integration_
|
||||
|
||||
```
|
||||
k8sgpt analyze --filter=[integration(s)]
|
||||
```
|
||||
|
||||
_Deactivate integrations_
|
||||
|
||||
```
|
||||
k8sgpt integrations deactivate [integration(s)]
|
||||
```
|
||||
|
||||
_Serve mode_
|
||||
|
||||
```
|
||||
k8sgpt serve
|
||||
```
|
||||
|
||||
_Serve mode with MCP (Model Context Protocol)_
|
||||
|
||||
```
|
||||
# Enable MCP server on default port 8089
|
||||
k8sgpt serve --mcp --mcp-http
|
||||
|
||||
# Enable MCP server on custom port
|
||||
k8sgpt serve --mcp --mcp-http --mcp-port 8089
|
||||
|
||||
# Full serve mode with MCP
|
||||
k8sgpt serve --mcp --mcp-http --port 8080 --metrics-port 8081 --mcp-port 8089
|
||||
```
|
||||
|
||||
The MCP server enables integration with tools like Claude Desktop and other MCP-compatible clients. It runs on port 8089 by default and provides:
|
||||
- Kubernetes cluster analysis via MCP protocol
|
||||
- Resource information and health status
|
||||
- AI-powered issue explanations and recommendations
|
||||
|
||||
For Helm chart deployment with MCP support, see the `charts/k8sgpt/values-mcp-example.yaml` file.
|
||||
|
||||
_Analysis with serve mode_
|
||||
|
||||
```
|
||||
grpcurl -plaintext -d '{"namespace": "k8sgpt", "explain" : "true"}' localhost:8080 schema.v1.ServerAnalyzerService/Analyze
|
||||
{
|
||||
"status": "OK"
|
||||
}
|
||||
```
|
||||
|
||||
_Analysis with custom headers_
|
||||
|
||||
```
|
||||
k8sgpt analyze --explain --custom-headers CustomHeaderKey:CustomHeaderValue
|
||||
```
|
||||
|
||||
_Print analysis stats_
|
||||
|
||||
```
|
||||
k8sgpt analyze -s
|
||||
The stats mode allows for debugging and understanding the time taken by an analysis by displaying the statistics of each analyzer.
|
||||
- Analyzer Ingress took 47.125583ms
|
||||
- Analyzer PersistentVolumeClaim took 53.009167ms
|
||||
- Analyzer CronJob took 57.517792ms
|
||||
- Analyzer Deployment took 156.6205ms
|
||||
- Analyzer Node took 160.109833ms
|
||||
- Analyzer ReplicaSet took 245.938333ms
|
||||
- Analyzer StatefulSet took 448.0455ms
|
||||
- Analyzer Pod took 5.662594708s
|
||||
- Analyzer Service took 38.583359166s
|
||||
```
|
||||
|
||||
_Diagnostic information_
|
||||
|
||||
To collect diagnostic information use the following command to create a `dump_<timestamp>_json` in your local directory.
|
||||
```
|
||||
k8sgpt dump
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## LLM AI Backends
|
||||
|
||||
K8sGPT uses the chosen LLM, generative AI provider when you want to explain the analysis results using --explain flag e.g. `k8sgpt analyze --explain`. You can use `--backend` flag to specify a configured provider (it's `openai` by default).
|
||||
|
||||
You can list available providers using `k8sgpt auth list`:
|
||||
|
||||
```
|
||||
Default:
|
||||
> openai
|
||||
Active:
|
||||
Unused:
|
||||
> openai
|
||||
> localai
|
||||
> ollama
|
||||
> azureopenai
|
||||
> cohere
|
||||
> amazonbedrock
|
||||
> amazonsagemaker
|
||||
> google
|
||||
> huggingface
|
||||
> noopai
|
||||
> googlevertexai
|
||||
> watsonxai
|
||||
> customrest
|
||||
> ibmwatsonxai
|
||||
```
|
||||
|
||||
For detailed documentation on how to configure and use each provider see [here](https://docs.k8sgpt.ai/reference/providers/backend/).
|
||||
|
||||
_To set a new default provider_
|
||||
|
||||
```
|
||||
k8sgpt auth default -p azureopenai
|
||||
Default provider set to azureopenai
|
||||
```
|
||||
|
||||
_Using Amazon Bedrock with inference profiles_
|
||||
|
||||
_System Inference Profile_
|
||||
|
||||
```
|
||||
k8sgpt auth add --backend amazonbedrock --providerRegion us-east-1 --model arn:aws:bedrock:us-east-1:123456789012:inference-profile/my-inference-profile
|
||||
|
||||
```
|
||||
|
||||
_Application Inference Profile_
|
||||
|
||||
```
|
||||
k8sgpt auth add --backend amazonbedrock --providerRegion us-east-1 --model arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/2uzp4s0w39t6
|
||||
|
||||
```
|
||||
|
||||
## Key Features
|
||||
|
||||
<details>
|
||||
|
||||
With this option, the data is anonymized before being sent to the AI Backend. During the analysis execution, `k8sgpt` retrieves sensitive data (Kubernetes object names, labels, etc.). This data is masked when sent to the AI backend and replaced by a key that can be used to de-anonymize the data when the solution is returned to the user.
|
||||
|
||||
<summary> Anonymization </summary>
|
||||
|
||||
1. Error reported during analysis:
|
||||
|
||||
```bash
|
||||
Error: HorizontalPodAutoscaler uses StatefulSet/fake-deployment as ScaleTargetRef which does not exist.
|
||||
```
|
||||
|
||||
2. Payload sent to the AI backend:
|
||||
|
||||
```bash
|
||||
Error: HorizontalPodAutoscaler uses StatefulSet/tGLcCRcHa1Ce5Rs as ScaleTargetRef which does not exist.
|
||||
```
|
||||
|
||||
3. Payload returned by the AI:
|
||||
|
||||
```bash
|
||||
The Kubernetes system is trying to scale a StatefulSet named tGLcCRcHa1Ce5Rs using the HorizontalPodAutoscaler, but it cannot find the StatefulSet. The solution is to verify that the StatefulSet name is spelled correctly and exists in the same namespace as the HorizontalPodAutoscaler.
|
||||
```
|
||||
|
||||
4. Payload returned to the user:
|
||||
|
||||
```bash
|
||||
The Kubernetes system is trying to scale a StatefulSet named fake-deployment using the HorizontalPodAutoscaler, but it cannot find the StatefulSet. The solution is to verify that the StatefulSet name is spelled correctly and exists in the same namespace as the HorizontalPodAutoscaler.
|
||||
```
|
||||
|
||||
### Further Details
|
||||
|
||||
Note: **Anonymization does not currently apply to events.**
|
||||
|
||||
_In a few analysers like Pod, we feed to the AI backend the event messages which are not known beforehand thus we are not masking them for the **time being**._
|
||||
|
||||
- The following is the list of analysers in which data is **being masked**:-
|
||||
|
||||
- Statefulset
|
||||
- Service
|
||||
- PodDisruptionBudget
|
||||
- Node
|
||||
- NetworkPolicy
|
||||
- Ingress
|
||||
- HPA
|
||||
- Deployment
|
||||
- Cronjob
|
||||
|
||||
- The following is the list of analysers in which data is **not being masked**:-
|
||||
|
||||
- ReplicaSet
|
||||
- PersistentVolumeClaim
|
||||
- Pod
|
||||
- Log
|
||||
- **_\*Events_**
|
||||
|
||||
**\*Note**:
|
||||
|
||||
- k8gpt will not mask the above analysers because they do not send any identifying information except **Events** analyser.
|
||||
- Masking for **Events** analyzer is scheduled in the near future as seen in this [issue](https://github.com/k8sgpt-ai/k8sgpt/issues/560). _Further research has to be made to understand the patterns and be able to mask the sensitive parts of an event like pod name, namespace etc._
|
||||
|
||||
- The following is the list of fields which are not **being masked**:-
|
||||
|
||||
- Describe
|
||||
- ObjectStatus
|
||||
- Replicas
|
||||
- ContainerStatus
|
||||
- **_\*Event Message_**
|
||||
- ReplicaStatus
|
||||
- Count (Pod)
|
||||
|
||||
**\*Note**:
|
||||
|
||||
- It is quite possible the payload of the event message might have something like "super-secret-project-pod-X crashed" which we don't currently redact _(scheduled in the near future as seen in this [issue](https://github.com/k8sgpt-ai/k8sgpt/issues/560))_.
|
||||
|
||||
### Proceed with care
|
||||
|
||||
- The K8gpt team recommends using an entirely different backend **(a local model) in critical production environments**. By using a local model, you can rest assured that everything stays within your DMZ, and nothing is leaked.
|
||||
- If there is any uncertainty about the possibility of sending data to a public LLM (open AI, Azure AI) and it poses a risk to business-critical operations, then, in such cases, the use of public LLM should be avoided based on personal assessment and the jurisdiction of risks involved.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary> Configuration management</summary>
|
||||
- [ ] Multiple AI backend support
|
||||
- [ ] Custom AI/ML model backend support
|
||||
- [ ] Custom analyzers
|
||||
|
||||
`k8sgpt` stores config data in the `$XDG_CONFIG_HOME/k8sgpt/k8sgpt.yaml` file. The data is stored in plain text, including your OpenAI key.
|
||||
### What about kubectl-ai?
|
||||
|
||||
Config file locations:
|
||||
| OS | Path |
|
||||
| ------- | ------------------------------------------------ |
|
||||
| MacOS | ~/Library/Application Support/k8sgpt/k8sgpt.yaml |
|
||||
| Linux | ~/.config/k8sgpt/k8sgpt.yaml |
|
||||
| Windows | %LOCALAPPDATA%/k8sgpt/k8sgpt.yaml |
|
||||
The the kubectl-ai [project](https://github.com/sozercan/kubectl-ai) uses AI to create manifests and apply them to the cluster. It is not what we are trying to do here, it is focusing on writing YAML manifests.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
There may be scenarios where caching remotely is preferred.
|
||||
In these scenarios K8sGPT supports AWS S3 or Azure Blob storage Integration.
|
||||
|
||||
<summary> Remote caching </summary>
|
||||
<em>Note: You can configure and use only one remote cache at a time</em>
|
||||
|
||||
_Adding a remote cache_
|
||||
|
||||
- AWS S3
|
||||
- _As a prerequisite `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` are required as environmental variables._
|
||||
- Configuration, `k8sgpt cache add s3 --region <aws region> --bucket <name>`
|
||||
- Minio Configuration with HTTP endpoint ` k8sgpt cache add s3 --bucket <name> --endpoint <http://localhost:9000>`
|
||||
- Minio Configuration with HTTPs endpoint, skipping TLS verification ` k8sgpt cache add s3 --bucket <name> --endpoint <https://localhost:9000> --insecure`
|
||||
- K8sGPT will create the bucket if it does not exist
|
||||
- Azure Storage
|
||||
- We support a number of [techniques](https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication?tabs=bash#2-authenticate-with-azure) to authenticate against Azure
|
||||
- Configuration, `k8sgpt cache add azure --storageacc <storage account name> --container <container name>`
|
||||
- K8sGPT assumes that the storage account already exist and it will create the container if it does not exist
|
||||
- It is the **user** responsibility have to grant specific permissions to their identity in order to be able to upload blob files and create SA containers (e.g Storage Blob Data Contributor)
|
||||
- Google Cloud Storage
|
||||
- _As a prerequisite `GOOGLE_APPLICATION_CREDENTIALS` are required as environmental variables._
|
||||
- Configuration, ` k8sgpt cache add gcs --region <gcp region> --bucket <name> --projectid <project id>`
|
||||
- K8sGPT will create the bucket if it does not exist
|
||||
|
||||
_Listing cache items_
|
||||
|
||||
```
|
||||
k8sgpt cache list
|
||||
```
|
||||
|
||||
_Purging an object from the cache_
|
||||
Note: purging an object using this command will delete upstream files, so it requires appropriate permissions.
|
||||
|
||||
```
|
||||
k8sgpt cache purge $OBJECT_NAME
|
||||
```
|
||||
|
||||
_Removing the remote cache_
|
||||
Note: this will not delete the upstream S3 bucket or Azure storage container
|
||||
|
||||
```
|
||||
k8sgpt cache remove
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary> Custom Analyzers</summary>
|
||||
|
||||
There may be scenarios where you wish to write your own analyzer in a language of your choice.
|
||||
K8sGPT now supports the ability to do so by abiding by the [schema](https://github.com/k8sgpt-ai/schemas/blob/main/protobuf/schema/v1/custom_analyzer.proto) and serving the analyzer for consumption.
|
||||
To do so, define the analyzer within the K8sGPT configuration and it will add it into the scanning process.
|
||||
In addition to this you will need to enable the following flag on analysis:
|
||||
|
||||
```
|
||||
k8sgpt analyze --custom-analysis
|
||||
```
|
||||
|
||||
Here is an example local host analyzer in [Rust](https://github.com/k8sgpt-ai/host-analyzer)
|
||||
When this is run on `localhost:8080` the K8sGPT config can pick it up with the following additions:
|
||||
|
||||
```
|
||||
custom_analyzers:
|
||||
- name: host-analyzer
|
||||
connection:
|
||||
url: localhost
|
||||
port: 8080
|
||||
```
|
||||
|
||||
This now gives the ability to pass through hostOS information ( from this analyzer example ) to K8sGPT to use as context with normal analysis.
|
||||
|
||||
_See the docs on how to write a custom analyzer_
|
||||
|
||||
_Listing custom analyzers configured_
|
||||
```
|
||||
k8sgpt custom-analyzer list
|
||||
```
|
||||
|
||||
_Adding custom analyzer without install_
|
||||
```
|
||||
k8sgpt custom-analyzer add --name my-custom-analyzer --port 8085
|
||||
```
|
||||
|
||||
_Removing custom analyzer_
|
||||
```
|
||||
k8sgpt custom-analyzer remove --names "my-custom-analyzer,my-custom-analyzer-2"
|
||||
```
|
||||
|
||||
</details>
|
||||
## Model Context Protocol (MCP)
|
||||
|
||||
K8sGPT provides a Model Context Protocol server that exposes Kubernetes operations as standardized tools for AI assistants like Claude, ChatGPT, and other MCP-compatible clients.
|
||||
|
||||
**Start the MCP server:**
|
||||
|
||||
Stdio mode (for local AI assistants):
|
||||
```bash
|
||||
k8sgpt serve --mcp
|
||||
```
|
||||
|
||||
HTTP mode (for network access):
|
||||
```bash
|
||||
k8sgpt serve --mcp --mcp-http --mcp-port 8089
|
||||
```
|
||||
K8sgpt is focused on triaging and diagnosing issues in your cluster. It is a tool for SRE, Platform & DevOps engineers to help them understand what is going on in their cluster. Cutting through the noise of logs and multiple tools to find the root cause of an issue.
|
||||
|
||||
**Features:**
|
||||
- 12 tools for cluster analysis, resource management, and debugging
|
||||
- 3 resources for cluster information access
|
||||
- 3 interactive troubleshooting prompts
|
||||
- Stateless HTTP mode for one-off invocations
|
||||
- Full integration with Claude Desktop and other MCP clients
|
||||
|
||||
**Learn more:** See [MCP.md](MCP.md) for complete documentation, usage examples, and integration guides.
|
||||
## Documentation
|
||||
### Configuration
|
||||
|
||||
Find our official documentation available [here](https://docs.k8sgpt.ai)
|
||||
`k8sgpt` stores config data in `~/.k8sgpt.yaml` the data is stored in plain text, including your OpenAI key.
|
||||
|
||||
## Contributing
|
||||
### Contributing
|
||||
|
||||
Please read our [contributing guide](./CONTRIBUTING.md).
|
||||
|
||||
## Community
|
||||
|
||||
Find us on [Slack](https://join.slack.com/t/k8sgpt/shared_invite/zt-332vhyaxv-bfjJwHZLXWVCB3QaXafEYQ)
|
||||
|
||||
<a href="https://github.com/k8sgpt-ai/k8sgpt/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=k8sgpt-ai/k8sgpt" />
|
||||
</a>
|
||||
|
||||
## License
|
||||
|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fk8sgpt-ai%2Fk8sgpt?ref=badge_large)
|
||||
### Community
|
||||
* Find us on [Slack](https://cloud-native.slack.com/channels/k8sgpt-ai)
|
||||
|
||||
11
SECURITY.md
@@ -1,11 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
We currently support the latest release for security patching and will deploy forward releases.
|
||||
|
||||
For example if there is a vulnerability in release `0.1.0` we will fix that release in version `0.1.1-fix` or `0.1.1`
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you are aware of a vulnerability please feel free to disclose it responsibly to contact@k8sgpt.ai or to one of our maintainers in our Slack community.
|
||||
@@ -1,83 +0,0 @@
|
||||
# Supported AI Providers and Models in K8sGPT
|
||||
|
||||
K8sGPT supports a variety of AI/LLM providers (backends). Some providers have a fixed set of supported models, while others allow you to specify any model supported by the provider.
|
||||
|
||||
---
|
||||
|
||||
## Providers and Supported Models
|
||||
|
||||
### OpenAI
|
||||
- **Model:** User-configurable (any model supported by OpenAI, e.g., `gpt-3.5-turbo`, `gpt-4`, etc.)
|
||||
|
||||
### Azure OpenAI
|
||||
- **Model:** User-configurable (any model deployed in your Azure OpenAI resource)
|
||||
|
||||
### LocalAI
|
||||
- **Model:** User-configurable (default: `llama3`)
|
||||
|
||||
### Ollama
|
||||
- **Model:** User-configurable (default: `llama3`, others can be specified)
|
||||
|
||||
### NoOpAI
|
||||
- **Model:** N/A (no real model, used for testing)
|
||||
|
||||
### Cohere
|
||||
- **Model:** User-configurable (any model supported by Cohere)
|
||||
|
||||
### Amazon Bedrock
|
||||
- **Supported Models:**
|
||||
- anthropic.claude-sonnet-4-20250514-v1:0
|
||||
- us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
- eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
- apac.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
- us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
- eu.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
- apac.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
- anthropic.claude-3-5-sonnet-20240620-v1:0
|
||||
- us.anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
- anthropic.claude-v2
|
||||
- anthropic.claude-v1
|
||||
- anthropic.claude-instant-v1
|
||||
- ai21.j2-ultra-v1
|
||||
- ai21.j2-jumbo-instruct
|
||||
- amazon.titan-text-express-v1
|
||||
- amazon.nova-pro-v1:0
|
||||
- eu.amazon.nova-pro-v1:0
|
||||
- us.amazon.nova-pro-v1:0
|
||||
- amazon.nova-lite-v1:0
|
||||
- eu.amazon.nova-lite-v1:0
|
||||
- us.amazon.nova-lite-v1:0
|
||||
- anthropic.claude-3-haiku-20240307-v1:0
|
||||
|
||||
> **Note:**
|
||||
> If you use an AWS Bedrock inference profile ARN (e.g., `arn:aws:bedrock:us-east-1:<account>:application-inference-profile/<id>`) as the model, you must still provide a valid modelId (e.g., `anthropic.claude-3-sonnet-20240229-v1:0`). K8sGPT will automatically set the required `X-Amzn-Bedrock-Inference-Profile-ARN` header for you when making requests to Bedrock.
|
||||
|
||||
### Amazon SageMaker
|
||||
- **Model:** User-configurable (any model deployed in your SageMaker endpoint)
|
||||
|
||||
### Google GenAI
|
||||
- **Model:** User-configurable (any model supported by Google GenAI, e.g., `gemini-pro`)
|
||||
|
||||
### Huggingface
|
||||
- **Model:** User-configurable (any model supported by Huggingface Inference API)
|
||||
|
||||
### Google VertexAI
|
||||
- **Supported Models:**
|
||||
- gemini-1.0-pro-001
|
||||
|
||||
### OCI GenAI
|
||||
- **Model:** User-configurable (any model supported by OCI GenAI)
|
||||
|
||||
### Custom REST
|
||||
- **Model:** User-configurable (any model your custom REST endpoint supports)
|
||||
|
||||
### IBM Watsonx
|
||||
- **Supported Models:**
|
||||
- ibm/granite-13b-chat-v2
|
||||
|
||||
### Groq
|
||||
- **Model:** User-configurable (any model supported by Groq, e.g., `llama-3.3-70b-versatile`, `mixtral-8x7b-32768`)
|
||||
|
||||
---
|
||||
|
||||
For more details on configuring each provider and model, refer to the official K8sGPT documentation and the provider's own documentation.
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: v2
|
||||
appVersion: v0.4.23 #x-release-please-version
|
||||
description: A Helm chart for K8SGPT
|
||||
name: k8sgpt
|
||||
type: application
|
||||
version: 1.0.0
|
||||
@@ -1,44 +0,0 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "k8sgpt.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "k8sgpt.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "k8sgpt.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "k8sgpt.labels" -}}
|
||||
helm.sh/chart: {{ include "k8sgpt.chart" . }}
|
||||
app.kubernetes.io/name: {{ include "k8sgpt.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,67 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "k8sgpt.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
{{- if .Values.deployment.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.deployment.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "k8sgpt.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "k8sgpt.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "k8sgpt.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
{{- if .Values.deployment.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.deployment.securityContext | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "k8sgpt.fullname" . }}
|
||||
containers:
|
||||
- name: k8sgpt-container
|
||||
imagePullPolicy: {{ .Values.deployment.imagePullPolicy }}
|
||||
image: {{ .Values.deployment.image.repository }}:{{ .Values.deployment.image.tag | default .Chart.AppVersion }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
{{- if .Values.deployment.mcp.enabled }}
|
||||
- containerPort: {{ .Values.deployment.mcp.port | int }}
|
||||
{{- end }}
|
||||
args: ["serve"
|
||||
{{- if .Values.deployment.mcp.enabled }}, "--mcp", "-v","--mcp-http", "--mcp-port", {{ .Values.deployment.mcp.port | quote }}
|
||||
{{- end }}
|
||||
]
|
||||
{{- if .Values.deployment.resources }}
|
||||
resources:
|
||||
{{- toYaml .Values.deployment.resources | nindent 10 }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: K8SGPT_MODEL
|
||||
value: {{ .Values.deployment.env.model }}
|
||||
- name: K8SGPT_BACKEND
|
||||
value: {{ .Values.deployment.env.backend }}
|
||||
{{- if .Values.secret.secretKey }}
|
||||
- name: K8SGPT_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: ai-backend-secret
|
||||
key: secret-key
|
||||
{{- end }}
|
||||
- name: XDG_CONFIG_HOME
|
||||
value: /k8sgpt-config/
|
||||
- name: XDG_CACHE_HOME
|
||||
value: /k8sgpt-config/
|
||||
volumeMounts:
|
||||
- mountPath: /k8sgpt-config
|
||||
name: config
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: config
|
||||
@@ -1,16 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "k8sgpt.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels:
|
||||
{{- include "k8sgpt.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- '*'
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "k8sgpt.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels:
|
||||
{{- include "k8sgpt.labels" . | nindent 4 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "k8sgpt.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ template "k8sgpt.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "k8sgpt.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels:
|
||||
{{- include "k8sgpt.labels" . | nindent 4 }}
|
||||
@@ -1,10 +0,0 @@
|
||||
{{- if .Values.secret.secretKey }}
|
||||
apiVersion: v1
|
||||
data:
|
||||
secret-key: {{ .Values.secret.secretKey }}
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: ai-backend-secret
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
type: Opaque
|
||||
{{- end}}
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "k8sgpt.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels:
|
||||
{{- include "k8sgpt.labels" . | nindent 4 }}
|
||||
{{- if .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.service.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "k8sgpt.name" . }}
|
||||
ports:
|
||||
- name: http
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
- name: metrics
|
||||
port: 8081
|
||||
targetPort: 8081
|
||||
{{- if .Values.deployment.mcp.enabled }}
|
||||
- name: mcp
|
||||
port: {{ .Values.deployment.mcp.port | int }}
|
||||
targetPort: {{ .Values.deployment.mcp.port | int }}
|
||||
{{- end }}
|
||||
type: {{ .Values.service.type }}
|
||||
@@ -1,21 +0,0 @@
|
||||
{{- if .Values.serviceMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "k8sgpt.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels:
|
||||
{{- include "k8sgpt.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- honorLabels: true
|
||||
path: /metrics
|
||||
port: metrics
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "k8sgpt.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -1,39 +0,0 @@
|
||||
# Example values file to enable MCP (Model Context Protocol) service
|
||||
# Copy this file and modify as needed, then use: helm install -f values-mcp-example.yaml
|
||||
|
||||
deployment:
|
||||
# Enable MCP server
|
||||
mcp:
|
||||
enabled: true
|
||||
port: "8089" # Port for MCP server (default: 8089)
|
||||
http: true # Enable HTTP mode for MCP server
|
||||
|
||||
# Other deployment settings remain the same
|
||||
image:
|
||||
repository: ghcr.io/k8sgpt-ai/k8sgpt
|
||||
tag: "" # defaults to Chart.appVersion if unspecified
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
model: "gpt-3.5-turbo"
|
||||
backend: "openai"
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: "512Mi"
|
||||
requests:
|
||||
cpu: "0.2"
|
||||
memory: "156Mi"
|
||||
|
||||
# Service configuration
|
||||
service:
|
||||
type: ClusterIP
|
||||
annotations: {}
|
||||
|
||||
# Secret configuration for AI backend
|
||||
secret:
|
||||
secretKey: "" # base64 encoded OpenAI token
|
||||
|
||||
# ServiceMonitor for Prometheus metrics
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
additionalLabels: {}
|
||||
@@ -1,35 +0,0 @@
|
||||
deployment:
|
||||
image:
|
||||
repository: ghcr.io/k8sgpt-ai/k8sgpt
|
||||
tag: "" # defaults to Chart.appVersion if unspecified
|
||||
imagePullPolicy: Always
|
||||
annotations: {}
|
||||
env:
|
||||
model: "gpt-3.5-turbo"
|
||||
backend: "openai" # one of: [ openai | llama ]
|
||||
# MCP (Model Context Protocol) server configuration
|
||||
mcp:
|
||||
enabled: false # Enable MCP server
|
||||
port: "8089" # Port for MCP server
|
||||
http: true # Enable HTTP mode for MCP server
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: "512Mi"
|
||||
requests:
|
||||
cpu: "0.2"
|
||||
memory: "156Mi"
|
||||
securityContext: {}
|
||||
# Set securityContext.runAsUser/runAsGroup if necessary. Values below were taken from https://github.com/k8sgpt-ai/k8sgpt/blob/main/container/Dockerfile
|
||||
# runAsUser: 65532
|
||||
# runAsGroup: 65532
|
||||
secret:
|
||||
secretKey: "" # base64 encoded OpenAI token
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
annotations: {}
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
additionalLabels: {}
|
||||
@@ -1,47 +1,28 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyze
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai/interactive"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/analysis"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/analyzer"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/schollz/progressbar/v3"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var (
|
||||
explain bool
|
||||
backend string
|
||||
output string
|
||||
filters []string
|
||||
language string
|
||||
nocache bool
|
||||
namespace string
|
||||
labelSelector string
|
||||
anonymize bool
|
||||
maxConcurrency int
|
||||
withDoc bool
|
||||
interactiveMode bool
|
||||
customAnalysis bool
|
||||
customHeaders []string
|
||||
withStats bool
|
||||
explain bool
|
||||
backend string
|
||||
output string
|
||||
filters []string
|
||||
language string
|
||||
nocache bool
|
||||
)
|
||||
|
||||
// AnalyzeCmd represents the problems command
|
||||
@@ -52,129 +33,118 @@ var AnalyzeCmd = &cobra.Command{
|
||||
Long: `This command will find problems within your Kubernetes cluster and
|
||||
provide you with a list of issues that need to be resolved`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// Create analysis configuration first.
|
||||
config, err := analysis.NewAnalysis(
|
||||
backend,
|
||||
language,
|
||||
filters,
|
||||
namespace,
|
||||
labelSelector,
|
||||
nocache,
|
||||
explain,
|
||||
maxConcurrency,
|
||||
withDoc,
|
||||
interactiveMode,
|
||||
customHeaders,
|
||||
withStats,
|
||||
)
|
||||
|
||||
verbose := viper.GetBool("verbose")
|
||||
if verbose {
|
||||
fmt.Println("Debug: Checking analysis configuration.")
|
||||
}
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
// get backend from file
|
||||
backendType := viper.GetString("backend_type")
|
||||
if backendType == "" {
|
||||
color.Red("No backend set. Please run k8sgpt auth")
|
||||
os.Exit(1)
|
||||
}
|
||||
if verbose {
|
||||
fmt.Println("Debug: Analysis initialized.")
|
||||
// override the default backend if a flag is provided
|
||||
if backend != "" {
|
||||
backendType = backend
|
||||
}
|
||||
defer config.Close()
|
||||
|
||||
if customAnalysis {
|
||||
config.RunCustomAnalysis()
|
||||
if verbose {
|
||||
fmt.Println("Debug: All custom analyzers completed.")
|
||||
}
|
||||
}
|
||||
config.RunAnalysis()
|
||||
if verbose {
|
||||
fmt.Println("Debug: All core analyzers completed.")
|
||||
// get the token with viper
|
||||
token := viper.GetString(fmt.Sprintf("%s_key", backendType))
|
||||
// check if nil
|
||||
if token == "" {
|
||||
color.Red("No %s key set. Please run k8sgpt auth", backendType)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if explain {
|
||||
err := config.GetAIResults(output, anonymize)
|
||||
if verbose {
|
||||
fmt.Println("Debug: Checking AI results.")
|
||||
}
|
||||
if err != nil {
|
||||
var aiClient ai.IAI
|
||||
switch backendType {
|
||||
case "openai":
|
||||
aiClient = &ai.OpenAIClient{}
|
||||
if err := aiClient.Configure(token, language); err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
// print results
|
||||
output_data, err := config.PrintOutput(output)
|
||||
if verbose {
|
||||
fmt.Println("Debug: Checking output.")
|
||||
}
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
default:
|
||||
color.Red("Backend not supported")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if withStats {
|
||||
statsData := config.PrintStats()
|
||||
fmt.Println(string(statsData))
|
||||
ctx := context.Background()
|
||||
// Get kubernetes client from viper
|
||||
client := viper.Get("kubernetesClient").(*kubernetes.Client)
|
||||
|
||||
var analysisResults *[]analyzer.Analysis = &[]analyzer.Analysis{}
|
||||
if err := analyzer.RunAnalysis(ctx, client, aiClient, explain, analysisResults); err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println(string(output_data))
|
||||
|
||||
if interactiveMode && explain {
|
||||
if output == "json" {
|
||||
color.Yellow("Caution: interactive mode using --json enabled may use additional tokens.")
|
||||
}
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
interactiveClient := interactive.NewInteractionRunner(config, output_data)
|
||||
|
||||
go interactiveClient.StartInteraction()
|
||||
for {
|
||||
select {
|
||||
case res := <-sigs:
|
||||
switch res {
|
||||
default:
|
||||
os.Exit(0)
|
||||
}
|
||||
case res := <-interactiveClient.State:
|
||||
switch res {
|
||||
case interactive.E_EXITED:
|
||||
os.Exit(0)
|
||||
// Removed filtered results from slice
|
||||
if len(filters) > 0 {
|
||||
var filteredResults []analyzer.Analysis
|
||||
for _, analysis := range *analysisResults {
|
||||
for _, filter := range filters {
|
||||
if strings.Contains(analysis.Kind, filter) {
|
||||
filteredResults = append(filteredResults, analysis)
|
||||
}
|
||||
}
|
||||
}
|
||||
analysisResults = &filteredResults
|
||||
}
|
||||
|
||||
var bar *progressbar.ProgressBar
|
||||
if len(*analysisResults) > 0 {
|
||||
bar = progressbar.Default(int64(len(*analysisResults)))
|
||||
} else {
|
||||
color.Green("{ \"status\": \"OK\" }")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// This variable is used to store the results that will be printed
|
||||
// It's necessary because the heap memory is lost when the function returns
|
||||
var printOutput []analyzer.Analysis
|
||||
|
||||
for _, analysis := range *analysisResults {
|
||||
|
||||
if explain {
|
||||
parsedText, err := analyzer.ParseViaAI(ctx, aiClient, analysis.Error, nocache)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
continue
|
||||
}
|
||||
analysis.Details = parsedText
|
||||
bar.Add(1)
|
||||
}
|
||||
printOutput = append(printOutput, analysis)
|
||||
}
|
||||
|
||||
// print results
|
||||
for n, analysis := range printOutput {
|
||||
|
||||
switch output {
|
||||
case "json":
|
||||
analysis.Error = analysis.Error[0:]
|
||||
j, err := json.Marshal(analysis)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(string(j))
|
||||
default:
|
||||
fmt.Printf("%s %s(%s): %s \n%s\n", color.CyanString("%d", n),
|
||||
color.YellowString(analysis.Name), color.CyanString(analysis.ParentObject),
|
||||
color.RedString(analysis.Error[0]), color.GreenString(analysis.Details))
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// namespace flag
|
||||
AnalyzeCmd.Flags().StringVarP(&namespace, "namespace", "n", "", "Namespace to analyze")
|
||||
// no cache flag
|
||||
AnalyzeCmd.Flags().BoolVarP(&nocache, "no-cache", "c", false, "Do not use cached data")
|
||||
// anonymize flag
|
||||
AnalyzeCmd.Flags().BoolVarP(&anonymize, "anonymize", "a", false, "Anonymize data before sending it to the AI backend. This flag masks sensitive data, such as Kubernetes object names and labels, by replacing it with a key. However, please note that this flag does not currently apply to events.")
|
||||
|
||||
AnalyzeCmd.Flags().BoolVarP(&nocache, "no-cache", "n", false, "Do not use cached data")
|
||||
// array of strings flag
|
||||
AnalyzeCmd.Flags().StringSliceVarP(&filters, "filter", "f", []string{}, "Filter for these analyzers (e.g. Pod, PersistentVolumeClaim, Service, ReplicaSet)")
|
||||
// explain flag
|
||||
AnalyzeCmd.Flags().StringSliceVarP(&filters, "filter", "f", []string{}, "Filter for these analzyers (e.g. Pod,PersistentVolumeClaim,Service,ReplicaSet)")
|
||||
|
||||
AnalyzeCmd.Flags().BoolVarP(&explain, "explain", "e", false, "Explain the problem to me")
|
||||
// add flag for backend
|
||||
AnalyzeCmd.Flags().StringVarP(&backend, "backend", "b", "", "Backend AI provider")
|
||||
AnalyzeCmd.Flags().StringVarP(&backend, "backend", "b", "openai", "Backend AI provider")
|
||||
// output as json
|
||||
AnalyzeCmd.Flags().StringVarP(&output, "output", "o", "text", "Output format (text, json)")
|
||||
// add language options for output
|
||||
AnalyzeCmd.Flags().StringVarP(&language, "language", "l", "english", "Languages to use for AI (e.g. 'English', 'Spanish', 'French', 'German', 'Italian', 'Portuguese', 'Dutch', 'Russian', 'Chinese', 'Japanese', 'Korean')")
|
||||
// add max concurrency
|
||||
AnalyzeCmd.Flags().IntVarP(&maxConcurrency, "max-concurrency", "m", 10, "Maximum number of concurrent requests to the Kubernetes API server")
|
||||
// kubernetes doc flag
|
||||
AnalyzeCmd.Flags().BoolVarP(&withDoc, "with-doc", "d", false, "Give me the official documentation of the involved field")
|
||||
// interactive mode flag
|
||||
AnalyzeCmd.Flags().BoolVarP(&interactiveMode, "interactive", "i", false, "Enable interactive mode that allows further conversation with LLM about the problem. Works only with --explain flag")
|
||||
// custom analysis flag
|
||||
AnalyzeCmd.Flags().BoolVarP(&customAnalysis, "custom-analysis", "z", false, "Enable custom analyzers")
|
||||
// add custom headers flag
|
||||
AnalyzeCmd.Flags().StringSliceVarP(&customHeaders, "custom-headers", "r", []string{}, "Custom Headers, <key>:<value> (e.g CustomHeaderKey:CustomHeaderValue AnotherHeader:AnotherValue)")
|
||||
// label selector flag
|
||||
AnalyzeCmd.Flags().StringVarP(&labelSelector, "selector", "L", "", "Label selector (label query) to filter on, supports '=', '==', and '!='. (e.g. -L key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints.")
|
||||
// print stats
|
||||
AnalyzeCmd.Flags().BoolVarP(&withStats, "with-stat", "s", false, "Print analysis stats. This option disables errors display.")
|
||||
}
|
||||
|
||||
188
cmd/auth/add.go
@@ -1,188 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBackend = "openai"
|
||||
defaultModel = "gpt-4o"
|
||||
)
|
||||
|
||||
var addCmd = &cobra.Command{
|
||||
Use: "add",
|
||||
Short: "Add new provider",
|
||||
Long: "The add command allows to configure a new backend AI provider",
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
backend, _ := cmd.Flags().GetString("backend")
|
||||
if strings.ToLower(backend) == "azureopenai" {
|
||||
_ = cmd.MarkFlagRequired("engine")
|
||||
_ = cmd.MarkFlagRequired("baseurl")
|
||||
}
|
||||
if strings.ToLower(backend) == "amazonsagemaker" {
|
||||
_ = cmd.MarkFlagRequired("endpointname")
|
||||
_ = cmd.MarkFlagRequired("providerRegion")
|
||||
}
|
||||
if strings.ToLower(backend) == "amazonbedrock" {
|
||||
_ = cmd.MarkFlagRequired("providerRegion")
|
||||
}
|
||||
if strings.ToLower(backend) == "ibmwatsonxai" {
|
||||
_ = cmd.MarkFlagRequired("providerId")
|
||||
}
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
validBackend := func(validBackends []string, backend string) bool {
|
||||
for _, b := range validBackends {
|
||||
if b == backend {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// check if backend is not empty and a valid value
|
||||
if backend == "" {
|
||||
color.Yellow(fmt.Sprintf("Warning: backend input is empty, will use the default value: %s", defaultBackend))
|
||||
backend = defaultBackend
|
||||
} else {
|
||||
if !validBackend(ai.Backends, backend) {
|
||||
color.Red("Error: Backend AI accepted values are '%v'", strings.Join(ai.Backends, ", "))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// get ai configuration
|
||||
err := viper.UnmarshalKey("ai", &configAI)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// search for provider with same name
|
||||
providerIndex := -1
|
||||
for i, provider := range configAI.Providers {
|
||||
if backend == provider.Name {
|
||||
providerIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if providerIndex != -1 {
|
||||
// provider with same name exists, update provider info
|
||||
color.Yellow("Provider with same name already exists.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// check if model is not empty
|
||||
if model == "" {
|
||||
model = defaultModel
|
||||
color.Yellow(fmt.Sprintf("Warning: model input is empty, will use the default value: %s", defaultModel))
|
||||
}
|
||||
if temperature > 1.0 || temperature < 0.0 {
|
||||
color.Red("Error: temperature ranges from 0 to 1.")
|
||||
os.Exit(1)
|
||||
}
|
||||
if topP > 1.0 || topP < 0.0 {
|
||||
color.Red("Error: topP ranges from 0 to 1.")
|
||||
os.Exit(1)
|
||||
}
|
||||
if topK < 1 || topK > 100 {
|
||||
color.Red("Error: topK ranges from 1 to 100.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if ai.NeedPassword(backend) && password == "" {
|
||||
fmt.Printf("Enter %s Key: ", backend)
|
||||
bytePassword, err := term.ReadPassword(int(syscall.Stdin))
|
||||
if err != nil {
|
||||
color.Red("Error reading %s Key from stdin: %s", backend,
|
||||
err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
password = strings.TrimSpace(string(bytePassword))
|
||||
}
|
||||
|
||||
// create new provider object
|
||||
newProvider := ai.AIProvider{
|
||||
Name: backend,
|
||||
Model: model,
|
||||
Password: password,
|
||||
BaseURL: baseURL,
|
||||
EndpointName: endpointName,
|
||||
Engine: engine,
|
||||
Temperature: temperature,
|
||||
ProviderRegion: providerRegion,
|
||||
ProviderId: providerId,
|
||||
CompartmentId: compartmentId,
|
||||
TopP: topP,
|
||||
TopK: topK,
|
||||
MaxTokens: maxTokens,
|
||||
OrganizationId: organizationId,
|
||||
}
|
||||
|
||||
if providerIndex == -1 {
|
||||
// provider with same name does not exist, add new provider to list
|
||||
configAI.Providers = append(configAI.Providers, newProvider)
|
||||
viper.Set("ai", configAI)
|
||||
if err := viper.WriteConfig(); err != nil {
|
||||
color.Red("Error writing config file: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
color.Green("%s added to the AI backend provider list", backend)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// add flag for backend
|
||||
addCmd.Flags().StringVarP(&backend, "backend", "b", defaultBackend, "Backend AI provider")
|
||||
// add flag for model
|
||||
addCmd.Flags().StringVarP(&model, "model", "m", defaultModel, "Backend AI model")
|
||||
// add flag for password
|
||||
addCmd.Flags().StringVarP(&password, "password", "p", "", "Backend AI password")
|
||||
// add flag for url
|
||||
addCmd.Flags().StringVarP(&baseURL, "baseurl", "u", "", "URL AI provider, (e.g `http://localhost:8080/v1`)")
|
||||
// add flag for endpointName
|
||||
addCmd.Flags().StringVarP(&endpointName, "endpointname", "n", "", "Endpoint Name, e.g. `endpoint-xxxxxxxxxxxx` (only for amazonbedrock, amazonsagemaker backends)")
|
||||
// add flag for topP
|
||||
addCmd.Flags().Float32VarP(&topP, "topp", "", 0.5, "Probability Cutoff: Set a threshold (0.0-1.0) to limit word choices. Higher values add randomness, lower values increase predictability.")
|
||||
// add flag for topK
|
||||
addCmd.Flags().Int32VarP(&topK, "topk", "c", 50, "Sampling Cutoff: Set a threshold (1-100) to restrict the sampling process to the top K most probable words at each step. Higher values lead to greater variability, lower values increases predictability.")
|
||||
// max tokens
|
||||
addCmd.Flags().IntVarP(&maxTokens, "maxtokens", "l", 2048, "Specify a maximum output length. Adjust (1-...) to control text length. Higher values produce longer output, lower values limit length")
|
||||
// add flag for temperature
|
||||
addCmd.Flags().Float32VarP(&temperature, "temperature", "t", 0.7, "The sampling temperature, value ranges between 0 ( output be more deterministic) and 1 (more random)")
|
||||
// add flag for azure open ai engine/deployment name
|
||||
addCmd.Flags().StringVarP(&engine, "engine", "e", "", "Azure AI deployment name (only for azureopenai backend)")
|
||||
//add flag for amazonbedrock region name
|
||||
addCmd.Flags().StringVarP(&providerRegion, "providerRegion", "r", "", "Provider Region name (only for amazonbedrock, googlevertexai backend)")
|
||||
//add flag for vertexAI/WatsonxAI Project ID
|
||||
addCmd.Flags().StringVarP(&providerId, "providerId", "i", "", "Provider specific ID for e.g. project (only for googlevertexai/ibmwatsonxai backend)")
|
||||
//add flag for OCI Compartment ID
|
||||
addCmd.Flags().StringVarP(&compartmentId, "compartmentId", "k", "", "Compartment ID for generative AI model (only for oci backend)")
|
||||
// add flag for openai organization
|
||||
addCmd.Flags().StringVarP(&organizationId, "organizationId", "o", "", "OpenAI or AzureOpenAI Organization ID (only for openai and azureopenai backend)")
|
||||
}
|
||||
@@ -1,64 +1,61 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
var (
|
||||
backend string
|
||||
password string
|
||||
baseURL string
|
||||
endpointName string
|
||||
model string
|
||||
engine string
|
||||
temperature float32
|
||||
providerRegion string
|
||||
providerId string
|
||||
compartmentId string
|
||||
topP float32
|
||||
topK int32
|
||||
maxTokens int
|
||||
organizationId string
|
||||
backend string
|
||||
)
|
||||
|
||||
var configAI ai.AIConfiguration
|
||||
|
||||
// authCmd represents the auth command
|
||||
var AuthCmd = &cobra.Command{
|
||||
Use: "auth",
|
||||
Short: "Authenticate with your chosen backend",
|
||||
Long: `Provide the necessary credentials to authenticate with your chosen backend.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
_ = cmd.Help()
|
||||
return
|
||||
|
||||
backendType := viper.GetString("backend_type")
|
||||
if backendType == "" {
|
||||
// Set the default backend
|
||||
viper.Set("backend_type", "openai")
|
||||
if err := viper.WriteConfig(); err != nil {
|
||||
color.Red("Error writing config file: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
// override the default backend if a flag is provided
|
||||
if backend != "" {
|
||||
backendType = backend
|
||||
}
|
||||
|
||||
fmt.Printf("Enter %s Key: ", backendType)
|
||||
bytePassword, err := term.ReadPassword(int(syscall.Stdin))
|
||||
if err != nil {
|
||||
color.Red("Error reading %s Key from stdin: %s", backendType,
|
||||
err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
password := strings.TrimSpace(string(bytePassword))
|
||||
|
||||
viper.Set(fmt.Sprintf("%s_key", backendType), password)
|
||||
if err := viper.WriteConfig(); err != nil {
|
||||
color.Red("Error writing config file: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
color.Green("key added")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// add subcommand to list backends
|
||||
AuthCmd.AddCommand(listCmd)
|
||||
// add subcommand to create new backend provider
|
||||
AuthCmd.AddCommand(addCmd)
|
||||
// add subcommand to remove new backend provider
|
||||
AuthCmd.AddCommand(removeCmd)
|
||||
// add subcommand to set default backend provider
|
||||
AuthCmd.AddCommand(defaultCmd)
|
||||
// add subcommand to update backend provider
|
||||
AuthCmd.AddCommand(updateCmd)
|
||||
// add flag for backend
|
||||
AuthCmd.Flags().StringVarP(&backend, "backend", "b", "openai", "Backend AI provider")
|
||||
}
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var (
|
||||
providerName string
|
||||
)
|
||||
|
||||
var defaultCmd = &cobra.Command{
|
||||
Use: "default",
|
||||
Short: "Set your default AI backend provider",
|
||||
Long: "The command to set your new default AI backend provider (default is openai)",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := viper.UnmarshalKey("ai", &configAI)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if providerName == "" {
|
||||
if configAI.DefaultProvider != "" {
|
||||
color.Yellow("Your default provider is %s", configAI.DefaultProvider)
|
||||
} else {
|
||||
color.Yellow("Your default provider is openai")
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
// lowercase the provider name
|
||||
providerName = strings.ToLower(providerName)
|
||||
|
||||
// Check if the provider is in the provider list
|
||||
providerExists := false
|
||||
for _, provider := range configAI.Providers {
|
||||
if provider.Name == providerName {
|
||||
providerExists = true
|
||||
}
|
||||
}
|
||||
if !providerExists {
|
||||
color.Red("Error: Provider %s does not exist", providerName)
|
||||
os.Exit(1)
|
||||
}
|
||||
// Set the default provider
|
||||
configAI.DefaultProvider = providerName
|
||||
|
||||
viper.Set("ai", configAI)
|
||||
// Viper write config
|
||||
err = viper.WriteConfig()
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
// Print acknowledgement
|
||||
color.Green("Default provider set to %s", providerName)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// provider name flag
|
||||
defaultCmd.Flags().StringVarP(&providerName, "provider", "p", "", "The name of the provider to set as default")
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var details bool
|
||||
|
||||
var listCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List configured providers",
|
||||
Long: "The list command displays a list of configured providers",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
// get ai configuration
|
||||
err := viper.UnmarshalKey("ai", &configAI)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the default if it is set
|
||||
fmt.Print(color.YellowString("Default: \n"))
|
||||
if configAI.DefaultProvider != "" {
|
||||
fmt.Printf("> %s\n", color.BlueString(configAI.DefaultProvider))
|
||||
} else {
|
||||
fmt.Printf("> %s\n", color.BlueString("openai"))
|
||||
}
|
||||
|
||||
// Get list of all AI Backends and only print them if they are not in the provider list
|
||||
fmt.Print(color.YellowString("Active: \n"))
|
||||
for _, aiBackend := range ai.Backends {
|
||||
providerExists := false
|
||||
for _, provider := range configAI.Providers {
|
||||
if provider.Name == aiBackend {
|
||||
providerExists = true
|
||||
}
|
||||
}
|
||||
if providerExists {
|
||||
fmt.Printf("> %s\n", color.GreenString(aiBackend))
|
||||
if details {
|
||||
for _, provider := range configAI.Providers {
|
||||
if provider.Name == aiBackend {
|
||||
printDetails(provider)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Print(color.YellowString("Unused: \n"))
|
||||
for _, aiBackend := range ai.Backends {
|
||||
providerExists := false
|
||||
for _, provider := range configAI.Providers {
|
||||
if provider.Name == aiBackend {
|
||||
providerExists = true
|
||||
}
|
||||
}
|
||||
if !providerExists {
|
||||
fmt.Printf("> %s\n", color.RedString(aiBackend))
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
listCmd.Flags().BoolVar(&details, "details", false, "Print active provider configuration details")
|
||||
}
|
||||
|
||||
func printDetails(provider ai.AIProvider) {
|
||||
if provider.Model != "" {
|
||||
fmt.Printf(" - Model: %s\n", provider.Model)
|
||||
}
|
||||
if provider.Engine != "" {
|
||||
fmt.Printf(" - Engine: %s\n", provider.Engine)
|
||||
}
|
||||
if provider.BaseURL != "" {
|
||||
fmt.Printf(" - BaseURL: %s\n", provider.BaseURL)
|
||||
}
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var removeCmd = &cobra.Command{
|
||||
Use: "remove",
|
||||
Short: "Remove provider(s)",
|
||||
Long: "The command to remove AI backend provider(s)",
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
_ = cmd.MarkFlagRequired("backends")
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if backend == "" {
|
||||
color.Red("Error: backends must be set.")
|
||||
_ = cmd.Help()
|
||||
return
|
||||
}
|
||||
inputBackends := strings.Split(backend, ",")
|
||||
|
||||
err := viper.UnmarshalKey("ai", &configAI)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, b := range inputBackends {
|
||||
foundBackend := false
|
||||
for i, provider := range configAI.Providers {
|
||||
if b == provider.Name {
|
||||
foundBackend = true
|
||||
configAI.Providers = append(configAI.Providers[:i], configAI.Providers[i+1:]...)
|
||||
if configAI.DefaultProvider == b {
|
||||
configAI.DefaultProvider = "openai"
|
||||
}
|
||||
color.Green("%s deleted from the AI backend provider list", b)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundBackend {
|
||||
color.Red("Error: %s does not exist in configuration file. Please use k8sgpt auth new.", b)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
viper.Set("ai", configAI)
|
||||
if err := viper.WriteConfig(); err != nil {
|
||||
color.Red("Error writing config file: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// add flag for backends
|
||||
removeCmd.Flags().StringVarP(&backend, "backends", "b", "", "Backend AI providers to remove (separated by a comma)")
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var updateCmd = &cobra.Command{
|
||||
Use: "update",
|
||||
Short: "Update a backend provider",
|
||||
Long: "The command to update an AI backend provider",
|
||||
// Args: cobra.ExactArgs(1),
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
_ = cmd.MarkFlagRequired("backend")
|
||||
backend, _ := cmd.Flags().GetString("backend")
|
||||
if strings.ToLower(backend) == "azureopenai" {
|
||||
_ = cmd.MarkFlagRequired("engine")
|
||||
_ = cmd.MarkFlagRequired("baseurl")
|
||||
}
|
||||
organizationId, _ := cmd.Flags().GetString("organizationId")
|
||||
if strings.ToLower(backend) != "azureopenai" && strings.ToLower(backend) != "openai" {
|
||||
if organizationId != "" {
|
||||
color.Red("Error: organizationId must be empty for backends other than azureopenai or openai.")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
// get ai configuration
|
||||
err := viper.UnmarshalKey("ai", &configAI)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
backend, _ := cmd.Flags().GetString("backend")
|
||||
|
||||
if temperature > 1.0 || temperature < 0.0 {
|
||||
color.Red("Error: temperature ranges from 0 to 1.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
foundBackend := false
|
||||
for i, provider := range configAI.Providers {
|
||||
if backend == provider.Name {
|
||||
foundBackend = true
|
||||
if backend != "" {
|
||||
configAI.Providers[i].Name = backend
|
||||
color.Blue("Backend name updated successfully")
|
||||
}
|
||||
if model != "" {
|
||||
configAI.Providers[i].Model = model
|
||||
color.Blue("Model updated successfully")
|
||||
}
|
||||
if password != "" {
|
||||
configAI.Providers[i].Password = password
|
||||
color.Blue("Password updated successfully")
|
||||
}
|
||||
if baseURL != "" {
|
||||
configAI.Providers[i].BaseURL = baseURL
|
||||
color.Blue("Base URL updated successfully")
|
||||
}
|
||||
if engine != "" {
|
||||
configAI.Providers[i].Engine = engine
|
||||
}
|
||||
if organizationId != "" {
|
||||
configAI.Providers[i].OrganizationId = organizationId
|
||||
color.Blue("Organization Id updated successfully")
|
||||
}
|
||||
configAI.Providers[i].Temperature = temperature
|
||||
color.Green("%s updated in the AI backend provider list", backend)
|
||||
}
|
||||
}
|
||||
if !foundBackend {
|
||||
color.Red("Error: %s does not exist in configuration file. Please use k8sgpt auth new.", backend)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
viper.Set("ai", configAI)
|
||||
if err := viper.WriteConfig(); err != nil {
|
||||
color.Red("Error writing config file: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// update flag for backend
|
||||
updateCmd.Flags().StringVarP(&backend, "backend", "b", "", "Update backend AI provider")
|
||||
// update flag for model
|
||||
updateCmd.Flags().StringVarP(&model, "model", "m", "", "Update backend AI model")
|
||||
// update flag for password
|
||||
updateCmd.Flags().StringVarP(&password, "password", "p", "", "Update backend AI password")
|
||||
// update flag for url
|
||||
updateCmd.Flags().StringVarP(&baseURL, "baseurl", "u", "", "Update URL AI provider, (e.g `http://localhost:8080/v1`)")
|
||||
// add flag for temperature
|
||||
updateCmd.Flags().Float32VarP(&temperature, "temperature", "t", 0.7, "The sampling temperature, value ranges between 0 ( output be more deterministic) and 1 (more random)")
|
||||
// update flag for azure open ai engine/deployment name
|
||||
updateCmd.Flags().StringVarP(&engine, "engine", "e", "", "Update Azure AI deployment name")
|
||||
// update flag for organizationId
|
||||
updateCmd.Flags().StringVarP(&organizationId, "organizationId", "o", "", "Update OpenAI or Azure organization Id")
|
||||
}
|
||||
85
cmd/cache/add.go
vendored
@@ -1,85 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
region string
|
||||
//nolint:unused
|
||||
bucketName string
|
||||
storageAccount string
|
||||
containerName string
|
||||
projectId string
|
||||
endpoint string
|
||||
insecure bool
|
||||
)
|
||||
|
||||
// addCmd represents the add command
|
||||
var addCmd = &cobra.Command{
|
||||
Use: "add [cache type]",
|
||||
Short: "Add a remote cache",
|
||||
Long: `This command allows you to add a remote cache to store the results of an analysis.
|
||||
The supported cache types are:
|
||||
- Azure Blob storage (e.g., k8sgpt cache add azure)
|
||||
- Google Cloud storage (e.g., k8sgpt cache add gcs)
|
||||
- S3 (e.g., k8sgpt cache add s3)
|
||||
- Interplex (e.g., k8sgpt cache add interplex)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
color.Red("Error: Please provide a value for cache types. Run k8sgpt cache add --help")
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(color.YellowString("Adding remote based cache"))
|
||||
cacheType := args[0]
|
||||
remoteCache, err := cache.NewCacheProvider(strings.ToLower(cacheType), bucketName, region, endpoint, storageAccount, containerName, projectId, insecure)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = cache.AddRemoteCache(remoteCache)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
CacheCmd.AddCommand(addCmd)
|
||||
addCmd.Flags().StringVarP(®ion, "region", "r", "us-east-1", "The region to use for the AWS S3 or GCS cache")
|
||||
addCmd.Flags().StringVarP(&endpoint, "endpoint", "e", "", "The S3 or minio endpoint")
|
||||
addCmd.Flags().BoolVarP(&insecure, "insecure", "i", false, "Skip TLS verification for S3/Minio custom endpoint")
|
||||
addCmd.Flags().StringVarP(&bucketName, "bucket", "b", "", "The name of the AWS S3 bucket to use for the cache")
|
||||
addCmd.Flags().StringVarP(&projectId, "projectid", "p", "", "The GCP project ID")
|
||||
addCmd.Flags().StringVarP(&storageAccount, "storageacc", "s", "", "The Azure storage account name of the container")
|
||||
addCmd.Flags().StringVarP(&containerName, "container", "c", "", "The Azure container name to use for the cache")
|
||||
addCmd.MarkFlagsRequiredTogether("storageacc", "container")
|
||||
// Tedious check to ensure we don't include arguments from different providers
|
||||
addCmd.MarkFlagsMutuallyExclusive("region", "storageacc")
|
||||
addCmd.MarkFlagsMutuallyExclusive("region", "container")
|
||||
addCmd.MarkFlagsMutuallyExclusive("bucket", "storageacc")
|
||||
addCmd.MarkFlagsMutuallyExclusive("bucket", "container")
|
||||
addCmd.MarkFlagsMutuallyExclusive("projectid", "storageacc")
|
||||
addCmd.MarkFlagsMutuallyExclusive("projectid", "container")
|
||||
}
|
||||
35
cmd/cache/cache.go
vendored
@@ -1,35 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package cache
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// cacheCmd represents the cache command
|
||||
var CacheCmd = &cobra.Command{
|
||||
Use: "cache",
|
||||
Short: "For working with the cache the results of an analysis",
|
||||
Long: `Cache commands allow you to add a remote cache, list the contents of the cache, and remove items from the cache.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := cmd.Help()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
}
|
||||
45
cmd/cache/get.go
vendored
@@ -1,45 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
|
||||
"github.com/spf13/cobra"
|
||||
"os"
|
||||
)
|
||||
|
||||
// listCmd represents the list command
|
||||
var getCmd = &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "Get the current cache",
|
||||
Long: `Returns the current remote cache being used`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
// load remote cache if it is configured
|
||||
c, err := cache.GetCacheConfiguration()
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Current remote cache is: %s", c.GetName())
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
CacheCmd.AddCommand(getCmd)
|
||||
|
||||
}
|
||||
67
cmd/cache/list.go
vendored
@@ -1,67 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package cache
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// listCmd represents the list command
|
||||
var listCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the contents of the cache",
|
||||
Long: `This command allows you to list the contents of the cache.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
// load remote cache if it is configured
|
||||
c, err := cache.GetCacheConfiguration()
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
names, err := c.List()
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var headers []string
|
||||
obj := cache.CacheObjectDetails{}
|
||||
objType := reflect.TypeOf(obj)
|
||||
for i := 0; i < objType.NumField(); i++ {
|
||||
field := objType.Field(i)
|
||||
headers = append(headers, field.Name)
|
||||
}
|
||||
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader(headers)
|
||||
|
||||
for _, v := range names {
|
||||
table.Append([]string{v.Name, v.UpdatedAt.String()})
|
||||
}
|
||||
table.Render()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
CacheCmd.AddCommand(listCmd)
|
||||
|
||||
}
|
||||
83
cmd/cache/purge.go
vendored
@@ -1,83 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var all bool
|
||||
|
||||
var purgeCmd = &cobra.Command{
|
||||
Use: "purge [object name]",
|
||||
Short: "Purge a remote cache",
|
||||
Long: "This command allows you to delete/purge one object from the cache or all objects with --all flag.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
c, err := cache.GetCacheConfiguration()
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if all {
|
||||
fmt.Println(color.YellowString("Purging all objects from the remote cache."))
|
||||
names, err := c.List()
|
||||
if err != nil {
|
||||
color.Red("Error listing cache objects: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(names) == 0 {
|
||||
fmt.Println(color.GreenString("No objects to delete."))
|
||||
return
|
||||
}
|
||||
var failed []string
|
||||
for _, obj := range names {
|
||||
err := c.Remove(obj.Name)
|
||||
if err != nil {
|
||||
failed = append(failed, obj.Name)
|
||||
}
|
||||
}
|
||||
if len(failed) > 0 {
|
||||
color.Red("Failed to delete: %v", failed)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(color.GreenString("All objects deleted."))
|
||||
return
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
color.Red("Error: Please provide a value for object name or use --all. Run k8sgpt cache purge --help")
|
||||
os.Exit(1)
|
||||
}
|
||||
objectKey := args[0]
|
||||
fmt.Println(color.YellowString("Purging a remote cache."))
|
||||
err = c.Remove(objectKey)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(color.GreenString("Object deleted."))
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
purgeCmd.Flags().BoolVar(&all, "all", false, "Purge all objects in the cache")
|
||||
CacheCmd.AddCommand(purgeCmd)
|
||||
}
|
||||
43
cmd/cache/remove.go
vendored
@@ -1,43 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package cache
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// removeCmd represents the remove command
|
||||
var removeCmd = &cobra.Command{
|
||||
Use: "remove",
|
||||
Short: "Remove the remote cache",
|
||||
Long: `This command allows you to remove the remote cache and use the default filecache.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
err := cache.RemoveRemoteCache()
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
color.Green("Successfully removed the remote cache")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
CacheCmd.AddCommand(removeCmd)
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package customanalyzer
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/fatih/color"
|
||||
customAnalyzer "github.com/k8sgpt-ai/k8sgpt/pkg/custom_analyzer"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var (
|
||||
name string
|
||||
url string
|
||||
port int
|
||||
)
|
||||
|
||||
var addCmd = &cobra.Command{
|
||||
Use: "add",
|
||||
Aliases: []string{"add"},
|
||||
Short: "This command will add a custom analyzer from source",
|
||||
Long: "This command allows you to add/remote/list an existing custom analyzer.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := viper.UnmarshalKey("custom_analyzers", &configCustomAnalyzer)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
analyzer := customAnalyzer.NewCustomAnalyzer()
|
||||
|
||||
// Check if configuration is valid
|
||||
err = analyzer.Check(configCustomAnalyzer, name, url, port)
|
||||
if err != nil {
|
||||
color.Red("Error adding custom analyzer: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
configCustomAnalyzer = append(configCustomAnalyzer, customAnalyzer.CustomAnalyzerConfiguration{
|
||||
Name: name,
|
||||
Connection: customAnalyzer.Connection{
|
||||
Url: url,
|
||||
Port: port,
|
||||
},
|
||||
})
|
||||
|
||||
viper.Set("custom_analyzers", configCustomAnalyzer)
|
||||
if err := viper.WriteConfig(); err != nil {
|
||||
color.Red("Error writing config file: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
color.Green("%s added to the custom analyzers config list", name)
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
addCmd.Flags().StringVarP(&name, "name", "n", "my-custom-analyzer", "Name of the custom analyzer.")
|
||||
addCmd.Flags().StringVarP(&url, "url", "u", "localhost", "URL for the custom analyzer connection.")
|
||||
addCmd.Flags().IntVarP(&port, "port", "r", 8085, "Port for the custom analyzer connection.")
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package customanalyzer
|
||||
|
||||
import (
|
||||
customAnalyzer "github.com/k8sgpt-ai/k8sgpt/pkg/custom_analyzer"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var configCustomAnalyzer []customAnalyzer.CustomAnalyzerConfiguration
|
||||
|
||||
// authCmd represents the auth command
|
||||
var CustomAnalyzerCmd = &cobra.Command{
|
||||
Use: "custom-analyzer",
|
||||
Short: "Manage a custom analyzer",
|
||||
Long: `This command allows you to manage custom analyzers, including adding, removing, and listing them.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
_ = cmd.Help()
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// add subcommand to add custom analyzer
|
||||
CustomAnalyzerCmd.AddCommand(addCmd)
|
||||
// remove subcomment to remove custom analyzer
|
||||
CustomAnalyzerCmd.AddCommand(removeCmd)
|
||||
// list subcomment to list custom analyzer
|
||||
CustomAnalyzerCmd.AddCommand(listCmd)
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package customanalyzer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/fatih/color"
|
||||
customAnalyzer "github.com/k8sgpt-ai/k8sgpt/pkg/custom_analyzer"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var details bool
|
||||
|
||||
var listCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List configured custom analyzers",
|
||||
Long: "The list command displays a list of configured custom analyzers",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
// get custom_analyzers configuration
|
||||
err := viper.UnmarshalKey("custom_analyzers", &configCustomAnalyzer)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Get list of all Custom Analyers configured
|
||||
fmt.Print(color.YellowString("Active: \n"))
|
||||
for _, analyzer := range configCustomAnalyzer {
|
||||
fmt.Printf("> %s\n", color.GreenString(analyzer.Name))
|
||||
if details {
|
||||
printDetails(analyzer)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
listCmd.Flags().BoolVar(&details, "details", false, "Print custom analyzers configuration details")
|
||||
}
|
||||
|
||||
func printDetails(analyzer customAnalyzer.CustomAnalyzerConfiguration) {
|
||||
fmt.Printf(" - Url: %s\n", analyzer.Connection.Url)
|
||||
fmt.Printf(" - Port: %d\n", analyzer.Connection.Port)
|
||||
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package customanalyzer
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var (
|
||||
names string
|
||||
)
|
||||
|
||||
var removeCmd = &cobra.Command{
|
||||
Use: "remove",
|
||||
Short: "Remove custom analyzer(s)",
|
||||
Long: "The command to remove custom analyzer(s)",
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
// Ensure that the "names" flag is provided before running the command
|
||||
_ = cmd.MarkFlagRequired("names")
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if names == "" {
|
||||
// Display an error message and show command help if "names" is not set
|
||||
color.Red("Error: names must be set.")
|
||||
_ = cmd.Help()
|
||||
return
|
||||
}
|
||||
// Split the provided names by comma
|
||||
inputCustomAnalyzers := strings.Split(names, ",")
|
||||
|
||||
// Load the custom analyzers from the configuration file
|
||||
err := viper.UnmarshalKey("custom_analyzers", &configCustomAnalyzer)
|
||||
if err != nil {
|
||||
// Display an error message if the configuration cannot be loaded
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Iterate over each input analyzer name
|
||||
for _, inputAnalyzer := range inputCustomAnalyzers {
|
||||
foundAnalyzer := false
|
||||
// Search for the analyzer in the current configuration
|
||||
for i, analyzer := range configCustomAnalyzer {
|
||||
if analyzer.Name == inputAnalyzer {
|
||||
foundAnalyzer = true
|
||||
|
||||
// Remove the analyzer from the configuration list
|
||||
configCustomAnalyzer = append(configCustomAnalyzer[:i], configCustomAnalyzer[i+1:]...)
|
||||
color.Green("%s deleted from the custom analyzer list", analyzer.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundAnalyzer {
|
||||
// Display an error if the analyzer is not found in the configuration
|
||||
color.Red("Error: %s does not exist in configuration file. Please use k8sgpt custom-analyzer add.", inputAnalyzer)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the updated configuration back to the file
|
||||
viper.Set("custom_analyzers", configCustomAnalyzer)
|
||||
if err := viper.WriteConfig(); err != nil {
|
||||
// Display an error if the configuration cannot be written
|
||||
color.Red("Error writing config file: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// add flag for names
|
||||
removeCmd.Flags().StringVarP(&names, "names", "n", "", "Custom analyzers to remove (separated by a comma)")
|
||||
}
|
||||
113
cmd/dump/dump.go
@@ -1,113 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dump
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
)
|
||||
|
||||
type K8sGPTInfo struct {
|
||||
Version string
|
||||
Commit string
|
||||
Date string
|
||||
}
|
||||
type DumpOut struct {
|
||||
AIConfiguration ai.AIConfiguration
|
||||
ActiveFilters []string
|
||||
KubenetesServerVersion *version.Info
|
||||
K8sGPTInfo K8sGPTInfo
|
||||
}
|
||||
|
||||
var DumpCmd = &cobra.Command{
|
||||
Use: "dump",
|
||||
Short: "Creates a dumpfile for debugging issues with K8sGPT",
|
||||
Long: `The dump command will create a dump.*.json which will contain K8sGPT non-sensitive configuration information.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
// Fetch the configuration object(s)
|
||||
// get ai configuration
|
||||
var configAI ai.AIConfiguration
|
||||
err := viper.UnmarshalKey("ai", &configAI)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var newProvider []ai.AIProvider
|
||||
for _, config := range configAI.Providers {
|
||||
// we blank out the custom headers for data protection reasons
|
||||
config.CustomHeaders = make([]http.Header, 0)
|
||||
// blank out the password
|
||||
if len(config.Password) > 4 {
|
||||
config.Password = config.Password[:4] + "***"
|
||||
} else {
|
||||
// If the password is shorter than 4 characters
|
||||
config.Password = "***"
|
||||
}
|
||||
newProvider = append(newProvider, config)
|
||||
}
|
||||
configAI.Providers = newProvider
|
||||
activeFilters := viper.GetStringSlice("active_filters")
|
||||
kubecontext := viper.GetString("kubecontext")
|
||||
kubeconfig := viper.GetString("kubeconfig")
|
||||
client, err := kubernetes.NewClient(kubecontext, kubeconfig)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
v, err := client.Client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
color.Yellow("Could not find kubernetes server version")
|
||||
}
|
||||
var dumpOut DumpOut = DumpOut{
|
||||
AIConfiguration: configAI,
|
||||
ActiveFilters: activeFilters,
|
||||
KubenetesServerVersion: v,
|
||||
K8sGPTInfo: K8sGPTInfo{
|
||||
Version: viper.GetString("Version"),
|
||||
Commit: viper.GetString("Commit"),
|
||||
Date: viper.GetString("Date"),
|
||||
},
|
||||
}
|
||||
// Serialize dumpOut to JSON
|
||||
jsonData, err := json.MarshalIndent(dumpOut, "", " ")
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
// Write JSON data to file
|
||||
f := fmt.Sprintf("dump_%s.json", time.Now().Format("20060102150405"))
|
||||
err = os.WriteFile(f, jsonData, 0644)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
color.Green("Dump created successfully: %s", f)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/analyzer"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var addCmd = &cobra.Command{
|
||||
Use: "add [filter(s)]",
|
||||
Short: "Adds one or more new filters.",
|
||||
Long: `The add command adds one or more new filters to the default set of filters used by the analyze.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
inputFilters := strings.Split(args[0], ",")
|
||||
coreFilters, additionalFilters, integrationFilters := analyzer.ListFilters()
|
||||
|
||||
availableFilters := append(append(coreFilters, additionalFilters...), integrationFilters...)
|
||||
// Verify filter exist
|
||||
invalidFilters := []string{}
|
||||
for _, f := range inputFilters {
|
||||
if f == "" {
|
||||
color.Red("Filter cannot be empty. Please use correct syntax.")
|
||||
os.Exit(1)
|
||||
}
|
||||
foundFilter := false
|
||||
for _, filter := range availableFilters {
|
||||
if filter == f {
|
||||
foundFilter = true
|
||||
|
||||
// WARNING: This is to enable users correctly understand implications
|
||||
// of enabling logs
|
||||
if filter == "Log" {
|
||||
color.Yellow("Warning: by enabling logs, you will be sending potentially sensitive data to the AI backend.")
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundFilter {
|
||||
invalidFilters = append(invalidFilters, f)
|
||||
}
|
||||
}
|
||||
|
||||
if len(invalidFilters) != 0 {
|
||||
color.Red("Filter %s does not exist. Please use k8sgpt filters list", strings.Join(invalidFilters, ", "))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Get defined active_filters
|
||||
activeFilters := viper.GetStringSlice("active_filters")
|
||||
if len(activeFilters) == 0 {
|
||||
activeFilters = coreFilters
|
||||
}
|
||||
|
||||
mergedFilters := append(activeFilters, inputFilters...)
|
||||
|
||||
uniqueFilters, dupplicatedFilters := util.RemoveDuplicates(mergedFilters)
|
||||
|
||||
// Verify dupplicate
|
||||
if len(dupplicatedFilters) != 0 {
|
||||
color.Red("Duplicate filters found: %s", strings.Join(dupplicatedFilters, ", "))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
viper.Set("active_filters", uniqueFilters)
|
||||
|
||||
if err := viper.WriteConfig(); err != nil {
|
||||
color.Red("Error writing config file: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
color.Green("Filter %s added", strings.Join(inputFilters, ", "))
|
||||
},
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var FiltersCmd = &cobra.Command{
|
||||
Use: "filters",
|
||||
Aliases: []string{"filter"},
|
||||
Short: "Manage filters for analyzing Kubernetes resources",
|
||||
Long: `The filters command allows you to manage filters that are used to analyze Kubernetes resources.
|
||||
You can list available filters to analyze resources.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
_ = cmd.Help()
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
FiltersCmd.AddCommand(listCmd)
|
||||
FiltersCmd.AddCommand(addCmd)
|
||||
FiltersCmd.AddCommand(removeCmd)
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/analyzer"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/integration"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var listCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List available filters",
|
||||
Long: `The list command displays a list of available filters that can be used to analyze Kubernetes resources.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
activeFilters := viper.GetStringSlice("active_filters")
|
||||
coreFilters, additionalFilters, integrationFilters := analyzer.ListFilters()
|
||||
integration := integration.NewIntegration()
|
||||
availableFilters := append(append(coreFilters, additionalFilters...), integrationFilters...)
|
||||
|
||||
if len(activeFilters) == 0 {
|
||||
activeFilters = coreFilters
|
||||
}
|
||||
inactiveFilters := util.SliceDiff(availableFilters, activeFilters)
|
||||
fmt.Print(color.YellowString("Active: \n"))
|
||||
for _, filter := range activeFilters {
|
||||
// if the filter is an integration, mark this differently
|
||||
// but if the integration is inactive, remove
|
||||
if slices.Contains(integrationFilters, filter) {
|
||||
fmt.Printf("> %s\n", color.BlueString("%s (integration)", filter))
|
||||
} else {
|
||||
// This strange bit of logic will loop through every integration via
|
||||
// OwnsAnalyzer subcommand to check the filter and as the integrationFilters...
|
||||
// was no match, we know this isn't part of an active integration
|
||||
if _, err := integration.AnalyzerByIntegration(filter); err != nil {
|
||||
fmt.Printf("> %s\n", color.GreenString(filter))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// display inactive filters
|
||||
if len(inactiveFilters) != 0 {
|
||||
fmt.Print(color.YellowString("Unused: \n"))
|
||||
for _, filter := range inactiveFilters {
|
||||
// if the filter is an integration, mark this differently
|
||||
if slices.Contains(integrationFilters, filter) {
|
||||
fmt.Printf("> %s\n", color.BlueString("%s (integration)", filter))
|
||||
} else {
|
||||
fmt.Printf("> %s\n", color.RedString(filter))
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/analyzer"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var removeCmd = &cobra.Command{
|
||||
Use: "remove [filter(s)]",
|
||||
Short: "Remove one or more filters.",
|
||||
Long: `The add command remove one or more filters to the default set of filters used by the analyze.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
inputFilters := strings.Split(args[0], ",")
|
||||
|
||||
// Get defined active_filters
|
||||
activeFilters := viper.GetStringSlice("active_filters")
|
||||
coreFilters, _, _ := analyzer.ListFilters()
|
||||
|
||||
if len(activeFilters) == 0 {
|
||||
activeFilters = coreFilters
|
||||
}
|
||||
|
||||
// Check if input input filters is not empty
|
||||
for _, f := range inputFilters {
|
||||
if f == "" {
|
||||
color.Red("Filter cannot be empty. Please use correct syntax.")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// verify dupplicate filters example: k8sgpt filters remove Pod Pod
|
||||
uniqueFilters, dupplicatedFilters := util.RemoveDuplicates(inputFilters)
|
||||
if len(dupplicatedFilters) != 0 {
|
||||
color.Red("Duplicate filters found: %s", strings.Join(dupplicatedFilters, ", "))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Verify if filter exist in config file and update default_filter
|
||||
filterNotFound := []string{}
|
||||
for _, filter := range uniqueFilters {
|
||||
foundFilter := false
|
||||
for i, f := range activeFilters {
|
||||
if f == filter {
|
||||
foundFilter = true
|
||||
activeFilters = append(activeFilters[:i], activeFilters[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundFilter {
|
||||
filterNotFound = append(filterNotFound, filter)
|
||||
}
|
||||
}
|
||||
|
||||
if len(filterNotFound) != 0 {
|
||||
color.Red("Filter(s) %s does not exist in configuration file. Please use k8sgpt filters add.", strings.Join(filterNotFound, ", "))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
viper.Set("active_filters", activeFilters)
|
||||
|
||||
if err := viper.WriteConfig(); err != nil {
|
||||
color.Red("Error writing config file: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
color.Green("Filter(s) %s removed", strings.Join(inputFilters, ", "))
|
||||
},
|
||||
}
|
||||
@@ -1,31 +1,17 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
backend string
|
||||
backendType string
|
||||
backend string
|
||||
)
|
||||
|
||||
// generateCmd represents the auth command
|
||||
@@ -35,7 +21,7 @@ var GenerateCmd = &cobra.Command{
|
||||
Long: `Opens your browser to generate a key for your chosen backend.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
backendType = viper.GetString("backend_type")
|
||||
backendType := viper.GetString("backend_type")
|
||||
if backendType == "" {
|
||||
// Set the default backend
|
||||
backend = "openai"
|
||||
@@ -45,7 +31,11 @@ var GenerateCmd = &cobra.Command{
|
||||
backendType = backend
|
||||
}
|
||||
fmt.Println("")
|
||||
openbrowser("https://platform.openai.com/api-keys")
|
||||
color.Green("Opening: https://beta.openai.com/account/api-keys to generate a key for %s", backendType)
|
||||
color.Green("Please copy the generated key and run `k8sgpt auth` to add it to your config file")
|
||||
fmt.Println("")
|
||||
time.Sleep(5 * time.Second)
|
||||
openbrowser("https://beta.openai.com/account/api-keys")
|
||||
},
|
||||
}
|
||||
|
||||
@@ -56,15 +46,9 @@ func init() {
|
||||
|
||||
func openbrowser(url string) {
|
||||
var err error
|
||||
isGui := true
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
_, err = exec.LookPath("xdg-open")
|
||||
if err != nil {
|
||||
isGui = false
|
||||
} else {
|
||||
err = exec.Command("xdg-open", url).Start()
|
||||
}
|
||||
err = exec.Command("xdg-open", url).Start()
|
||||
case "windows":
|
||||
err = exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
|
||||
case "darwin":
|
||||
@@ -72,21 +56,7 @@ func openbrowser(url string) {
|
||||
default:
|
||||
err = fmt.Errorf("unsupported platform")
|
||||
}
|
||||
printInstructions(isGui, backend)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
func printInstructions(isGui bool, backendType string) {
|
||||
fmt.Println("")
|
||||
if isGui {
|
||||
color.Green("Opening: https://platform.openai.com/api-keys to generate a key for %s", backendType)
|
||||
fmt.Println("")
|
||||
} else {
|
||||
color.Green("Please open: https://platform.openai.com/api-keys to generate a key for %s", backendType)
|
||||
fmt.Println("")
|
||||
}
|
||||
color.Green("Please copy the generated key and run `k8sgpt auth add` to add it to your config file")
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/analyzer"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/integration"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var skipInstall bool
|
||||
|
||||
// activateCmd represents the activate command
|
||||
var activateCmd = &cobra.Command{
|
||||
Use: "activate [integration]",
|
||||
Short: "Activate an integration",
|
||||
Long: ``,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
integrationName := args[0]
|
||||
coreFilters, _, _ := analyzer.ListFilters()
|
||||
|
||||
// Update filters
|
||||
activeFilters := viper.GetStringSlice("active_filters")
|
||||
if len(activeFilters) == 0 {
|
||||
activeFilters = coreFilters
|
||||
}
|
||||
|
||||
integration := integration.NewIntegration()
|
||||
// Check if the integation exists
|
||||
err := integration.Activate(integrationName, namespace, activeFilters, skipInstall)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
color.Green("Activated integration %s", integrationName)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
IntegrationCmd.AddCommand(activateCmd)
|
||||
activateCmd.Flags().BoolVarP(&skipInstall, "no-install", "s", false, "Only activate the integration filter without installing the filter (for example, if that filter plugin is already deployed in cluster, we do not need to re-install it again)")
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/integration"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// deactivateCmd represents the deactivate command
|
||||
var deactivateCmd = &cobra.Command{
|
||||
Use: "deactivate [integration]",
|
||||
Short: "Deactivate an integration",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Long: `For example e.g. k8sgpt integration deactivate prometheus`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
integrationName := args[0]
|
||||
|
||||
integration := integration.NewIntegration()
|
||||
|
||||
if err := integration.Deactivate(integrationName, namespace); err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
color.Green("Deactivated integration %s", integrationName)
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
IntegrationCmd.AddCommand(deactivateCmd)
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
namespace string
|
||||
)
|
||||
|
||||
// IntegrationCmd represents the integrate command
|
||||
var IntegrationCmd = &cobra.Command{
|
||||
Use: "integration",
|
||||
Aliases: []string{"integrations"},
|
||||
Short: "Integrate another tool into K8sGPT",
|
||||
Long: `Integrate another tool into K8sGPT. For example:
|
||||
|
||||
k8sgpt integration activate prometheus
|
||||
|
||||
This would allow you to connect to prometheus running with your cluster.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
_ = cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
IntegrationCmd.PersistentFlags().StringVarP(&namespace, "namespace", "n", "default", "The namespace to use for the integration")
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/integration"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// listCmd represents the list command
|
||||
var listCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "Lists built-in integrations",
|
||||
Long: ``,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
integrationProvider := integration.NewIntegration()
|
||||
integrations := integrationProvider.List()
|
||||
|
||||
fmt.Println(color.YellowString("Active:"))
|
||||
for _, i := range integrations {
|
||||
b, err := integrationProvider.IsActivate(i)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if b {
|
||||
fmt.Printf("> %s\n", color.GreenString(i))
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println(color.YellowString("Unused: "))
|
||||
for _, i := range integrations {
|
||||
b, err := integrationProvider.IsActivate(i)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if !b {
|
||||
fmt.Printf("> %s\n", color.GreenString(i))
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
IntegrationCmd.AddCommand(listCmd)
|
||||
|
||||
}
|
||||
127
cmd/root.go
@@ -1,46 +1,22 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/k8sgpt-ai/k8sgpt/cmd/generate"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/adrg/xdg"
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/cmd/analyze"
|
||||
"github.com/k8sgpt-ai/k8sgpt/cmd/auth"
|
||||
"github.com/k8sgpt-ai/k8sgpt/cmd/cache"
|
||||
customanalyzer "github.com/k8sgpt-ai/k8sgpt/cmd/customAnalyzer"
|
||||
"github.com/k8sgpt-ai/k8sgpt/cmd/dump"
|
||||
"github.com/k8sgpt-ai/k8sgpt/cmd/filters"
|
||||
"github.com/k8sgpt-ai/k8sgpt/cmd/generate"
|
||||
"github.com/k8sgpt-ai/k8sgpt/cmd/integration"
|
||||
"github.com/k8sgpt-ai/k8sgpt/cmd/serve"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var (
|
||||
cfgFile string
|
||||
kubecontext string
|
||||
kubeconfig string
|
||||
verbose bool
|
||||
Version string
|
||||
Commit string
|
||||
Date string
|
||||
cfgFile string
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
version string
|
||||
)
|
||||
|
||||
// rootCmd represents the base command when called without any subcommands
|
||||
@@ -55,13 +31,8 @@ var rootCmd = &cobra.Command{
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute(v string, c string, d string) {
|
||||
Version = v
|
||||
Commit = c
|
||||
Date = d
|
||||
viper.Set("Version", Version)
|
||||
viper.Set("Commit", Commit)
|
||||
viper.Set("Date", Date)
|
||||
func Execute(v string) {
|
||||
version = v
|
||||
err := rootCmd.Execute()
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
@@ -69,23 +40,29 @@ func Execute(v string, c string, d string) {
|
||||
}
|
||||
|
||||
func init() {
|
||||
performConfigMigrationIfNeeded()
|
||||
|
||||
cobra.OnInitialize(initConfig)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
// Cobra supports persistent flags, which, if defined here,
|
||||
// will be global for your application.
|
||||
rootCmd.AddCommand(auth.AuthCmd)
|
||||
rootCmd.AddCommand(analyze.AnalyzeCmd)
|
||||
rootCmd.AddCommand(dump.DumpCmd)
|
||||
rootCmd.AddCommand(filters.FiltersCmd)
|
||||
rootCmd.AddCommand(generate.GenerateCmd)
|
||||
rootCmd.AddCommand(integration.IntegrationCmd)
|
||||
rootCmd.AddCommand(serve.ServeCmd)
|
||||
rootCmd.AddCommand(cache.CacheCmd)
|
||||
rootCmd.AddCommand(customanalyzer.CustomAnalyzerCmd)
|
||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", fmt.Sprintf("Default config file (%s/k8sgpt/k8sgpt.yaml)", xdg.ConfigHome))
|
||||
rootCmd.PersistentFlags().StringVar(&kubecontext, "kubecontext", "", "Kubernetes context to use. Only required if out-of-cluster.")
|
||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.k8sgpt.git.yaml)")
|
||||
rootCmd.PersistentFlags().StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Show detailed tool actions (e.g., API calls, checks).")
|
||||
// Cobra also supports local flags, which will only run
|
||||
// when this action is called directly.
|
||||
rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
|
||||
//Initialise the kubeconfig
|
||||
kubernetesClient, err := kubernetes.NewClient(masterURL, kubeconfig)
|
||||
if err != nil {
|
||||
color.Red("Error initialising kubernetes client: %v", err)
|
||||
}
|
||||
|
||||
viper.Set("kubernetesClient", kubernetesClient)
|
||||
|
||||
}
|
||||
|
||||
// initConfig reads in config file and ENV variables if set.
|
||||
@@ -94,60 +71,22 @@ func initConfig() {
|
||||
// Use config file from the flag.
|
||||
viper.SetConfigFile(cfgFile)
|
||||
} else {
|
||||
// the config will belocated under `~/.config/k8sgpt/k8sgpt.yaml` on linux
|
||||
configDir := filepath.Join(xdg.ConfigHome, "k8sgpt")
|
||||
// Find home directory.
|
||||
home, err := os.UserHomeDir()
|
||||
cobra.CheckErr(err)
|
||||
|
||||
viper.AddConfigPath(configDir)
|
||||
// Search config in home directory with name ".k8sgpt.git" (without extension).
|
||||
viper.AddConfigPath(home)
|
||||
viper.SetConfigType("yaml")
|
||||
viper.SetConfigName("k8sgpt")
|
||||
viper.SetConfigName(".k8sgpt")
|
||||
|
||||
_ = viper.SafeWriteConfig()
|
||||
viper.SafeWriteConfig()
|
||||
}
|
||||
|
||||
viper.Set("kubecontext", kubecontext)
|
||||
viper.Set("kubeconfig", kubeconfig)
|
||||
viper.Set("verbose", verbose)
|
||||
|
||||
viper.SetEnvPrefix("K8SGPT")
|
||||
viper.AutomaticEnv() // read in environment variables that match
|
||||
|
||||
// If a config file is found, read it in.
|
||||
if err := viper.ReadInConfig(); err == nil {
|
||||
_ = 1
|
||||
// fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed())
|
||||
}
|
||||
}
|
||||
|
||||
func performConfigMigrationIfNeeded() {
|
||||
oldConfig, err := getLegacyConfigFilePath()
|
||||
cobra.CheckErr(err)
|
||||
oldConfigExists, err := util.FileExists(oldConfig)
|
||||
cobra.CheckErr(err)
|
||||
|
||||
newConfig := getConfigFilePath()
|
||||
newConfigExists, err := util.FileExists(newConfig)
|
||||
cobra.CheckErr(err)
|
||||
|
||||
configDir := filepath.Dir(newConfig)
|
||||
err = util.EnsureDirExists(configDir)
|
||||
cobra.CheckErr(err)
|
||||
|
||||
if oldConfigExists && !newConfigExists {
|
||||
err = os.Rename(oldConfig, newConfig)
|
||||
cobra.CheckErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getConfigFilePath() string {
|
||||
return filepath.Join(xdg.ConfigHome, "k8sgpt", "k8sgpt.yaml")
|
||||
}
|
||||
|
||||
func getLegacyConfigFilePath() (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Join(home, ".k8sgpt.yaml"), nil
|
||||
}
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// Test that verbose flag is correctly set in viper.
|
||||
func TestInitConfig_VerboseFlag(t *testing.T) {
|
||||
verbose = true
|
||||
viper.Reset()
|
||||
initConfig()
|
||||
if !viper.GetBool("verbose") {
|
||||
t.Error("Expected verbose flag to be true")
|
||||
}
|
||||
}
|
||||
@@ -1,245 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
k8sgptserver "github.com/k8sgpt-ai/k8sgpt/pkg/server"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultTemperature float32 = 0.7
|
||||
defaultTopP float32 = 1.0
|
||||
defaultTopK int32 = 50
|
||||
defaultMaxTokens int = 2048
|
||||
)
|
||||
|
||||
var (
|
||||
port string
|
||||
metricsPort string
|
||||
backend string
|
||||
enableHttp bool
|
||||
enableMCP bool
|
||||
mcpPort string
|
||||
mcpHTTP bool
|
||||
// filters can be injected into the server (repeatable flag)
|
||||
filters []string
|
||||
)
|
||||
|
||||
var ServeCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Runs k8sgpt as a server",
|
||||
Long: `Runs k8sgpt as a server to allow for easy integration with other applications.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
var configAI ai.AIConfiguration
|
||||
err := viper.UnmarshalKey("ai", &configAI)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
var aiProvider *ai.AIProvider
|
||||
if len(configAI.Providers) == 0 {
|
||||
// we validate and set temperature for our backend
|
||||
temperature := func() float32 {
|
||||
env := os.Getenv("K8SGPT_TEMPERATURE")
|
||||
if env == "" {
|
||||
return defaultTemperature
|
||||
}
|
||||
temperature, err := strconv.ParseFloat(env, 32)
|
||||
if err != nil {
|
||||
color.Red("Unable to convert Temperature value: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if temperature > 1.0 || temperature < 0.0 {
|
||||
color.Red("Error: temperature ranges from 0 to 1.")
|
||||
os.Exit(1)
|
||||
}
|
||||
return float32(temperature)
|
||||
}
|
||||
topP := func() float32 {
|
||||
env := os.Getenv("K8SGPT_TOP_P")
|
||||
if env == "" {
|
||||
return defaultTopP
|
||||
}
|
||||
topP, err := strconv.ParseFloat(env, 32)
|
||||
if err != nil {
|
||||
color.Red("Unable to convert topP value: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if topP > 1.0 || topP < 0.0 {
|
||||
color.Red("Error: topP ranges from 0 to 1.")
|
||||
os.Exit(1)
|
||||
}
|
||||
return float32(topP)
|
||||
}
|
||||
topK := func() int32 {
|
||||
env := os.Getenv("K8SGPT_TOP_K")
|
||||
if env == "" {
|
||||
return defaultTopK
|
||||
}
|
||||
topK, err := strconv.ParseFloat(env, 32)
|
||||
if err != nil {
|
||||
color.Red("Unable to convert topK value: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if topK < 10 || topK > 100 {
|
||||
color.Red("Error: topK ranges from 1 to 100.")
|
||||
os.Exit(1)
|
||||
}
|
||||
return int32(topK)
|
||||
}
|
||||
maxTokens := func() int {
|
||||
env := os.Getenv("K8SGPT_MAX_TOKENS")
|
||||
if env == "" {
|
||||
return defaultMaxTokens
|
||||
}
|
||||
maxTokens, err := strconv.ParseInt(env, 10, 32)
|
||||
if err != nil {
|
||||
color.Red("Unable to convert maxTokens value: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return int(maxTokens)
|
||||
}
|
||||
// Check for env injection
|
||||
backend = os.Getenv("K8SGPT_BACKEND")
|
||||
password := os.Getenv("K8SGPT_PASSWORD")
|
||||
model := os.Getenv("K8SGPT_MODEL")
|
||||
baseURL := os.Getenv("K8SGPT_BASEURL")
|
||||
engine := os.Getenv("K8SGPT_ENGINE")
|
||||
proxyEndpoint := os.Getenv("K8SGPT_PROXY_ENDPOINT")
|
||||
providerId := os.Getenv("K8SGPT_PROVIDER_ID")
|
||||
// If the envs are set, allocate in place to the aiProvider
|
||||
// else exit with error
|
||||
envIsSet := backend != "" || password != "" || model != ""
|
||||
if envIsSet {
|
||||
aiProvider = &ai.AIProvider{
|
||||
Name: backend,
|
||||
Password: password,
|
||||
Model: model,
|
||||
BaseURL: baseURL,
|
||||
Engine: engine,
|
||||
ProxyEndpoint: proxyEndpoint,
|
||||
ProviderId: providerId,
|
||||
Temperature: temperature(),
|
||||
TopP: topP(),
|
||||
TopK: topK(),
|
||||
MaxTokens: maxTokens(),
|
||||
}
|
||||
|
||||
configAI.Providers = append(configAI.Providers, *aiProvider)
|
||||
|
||||
viper.Set("ai", configAI)
|
||||
if err := viper.WriteConfig(); err != nil {
|
||||
color.Red("Error writing config file: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
color.Red("Error: AI provider not specified in configuration. Please run k8sgpt auth")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
if aiProvider == nil {
|
||||
for _, provider := range configAI.Providers {
|
||||
if backend == provider.Name {
|
||||
// the pointer to the range variable is not really an issue here, as there
|
||||
// is a break right after, but to prevent potential future issues, a temp
|
||||
// variable is assigned
|
||||
p := provider
|
||||
aiProvider = &p
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if aiProvider == nil || aiProvider.Name == "" {
|
||||
color.Red("Error: AI provider %s not specified in configuration. Please run k8sgpt auth", backend)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger, err := zap.NewProduction()
|
||||
if err != nil {
|
||||
color.Red("failed to create logger: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
if err := logger.Sync(); err != nil {
|
||||
color.Red("failed to sync logger: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
if enableMCP {
|
||||
// Create and start MCP server
|
||||
mcpServer, err := k8sgptserver.NewMCPServer(mcpPort, aiProvider, mcpHTTP, logger)
|
||||
if err != nil {
|
||||
color.Red("Error creating MCP server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
go func() {
|
||||
if err := mcpServer.Start(); err != nil {
|
||||
color.Red("Error starting MCP server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
server := k8sgptserver.Config{
|
||||
Backend: aiProvider.Name,
|
||||
Port: port,
|
||||
MetricsPort: metricsPort,
|
||||
EnableHttp: enableHttp,
|
||||
Token: aiProvider.Password,
|
||||
Logger: logger,
|
||||
Filters: filters,
|
||||
}
|
||||
go func() {
|
||||
if err := server.ServeMetrics(); err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if err := server.Serve(); err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for both servers to exit
|
||||
select {}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// add flag for backend
|
||||
ServeCmd.Flags().StringVarP(&port, "port", "p", "8080", "Port to run the server on")
|
||||
ServeCmd.Flags().StringVarP(&metricsPort, "metrics-port", "", "8081", "Port to run the metrics-server on")
|
||||
ServeCmd.Flags().StringVarP(&backend, "backend", "b", "openai", "Backend AI provider")
|
||||
ServeCmd.Flags().BoolVarP(&enableHttp, "http", "", false, "Enable REST/http using gppc-gateway")
|
||||
ServeCmd.Flags().BoolVarP(&enableMCP, "mcp", "", false, "Enable Mission Control Protocol server")
|
||||
ServeCmd.Flags().StringVarP(&mcpPort, "mcp-port", "", "8089", "Port to run the MCP server on")
|
||||
ServeCmd.Flags().BoolVarP(&mcpHTTP, "mcp-http", "", false, "Enable HTTP mode for MCP server")
|
||||
// allow injecting filters into the running server (repeatable)
|
||||
ServeCmd.Flags().StringSliceVar(&filters, "filter", []string{}, "Filter to apply (can be specified multiple times)")
|
||||
}
|
||||
@@ -1,22 +1,6 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -26,21 +10,7 @@ var versionCmd = &cobra.Command{
|
||||
Short: "Print the version number of k8sgpt",
|
||||
Long: `All software has versions. This is k8sgpt's`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if Version == "dev" {
|
||||
details, ok := debug.ReadBuildInfo()
|
||||
if ok && details.Main.Version != "" && details.Main.Version != "(devel)" {
|
||||
Version = details.Main.Version
|
||||
for _, i := range details.Settings {
|
||||
if i.Key == "vcs.time" {
|
||||
Date = i.Value
|
||||
}
|
||||
if i.Key == "vcs.revision" {
|
||||
Commit = i.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Printf("k8sgpt: %s (%s), built at: %s\n", Version, Commit, Date)
|
||||
cmd.Printf("k8sgpt version %s", version)
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -1,20 +1,7 @@
|
||||
# Copyright 2023 The K8sGPT Authors.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM golang:1.24-alpine3.23 AS builder
|
||||
FROM golang:1.20.2-alpine3.16 AS builder
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
ARG VERSION
|
||||
ARG COMMIT
|
||||
ARG DATE
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
@@ -22,18 +9,18 @@ RUN go mod download
|
||||
|
||||
COPY ./ ./
|
||||
|
||||
RUN go build -o /workspace/k8sgpt -ldflags "-X main.version=${VERSION} -X main.commit=${COMMIT} -X main.date=${DATE}" ./
|
||||
RUN go build -o /workspace/k8sgpt ./
|
||||
|
||||
FROM gcr.io/distroless/static AS production
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/k8sgpt-ai/k8sgpt" \
|
||||
org.opencontainers.image.url="https://k8sgpt.ai" \
|
||||
org.opencontainers.image.title="k8sgpt" \
|
||||
org.opencontainers.image.vendor='The K8sGPT Authors' \
|
||||
org.opencontainers.image.licenses='Apache-2.0'
|
||||
org.opencontainers.image.vendor="the k8sgpt-ai maintainers" \
|
||||
org.opencontainers.image.licenses="MIT"
|
||||
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/k8sgpt .
|
||||
USER 65532:65532
|
||||
|
||||
ENTRYPOINT ["/k8sgpt"]
|
||||
ENTRYPOINT ["/k8sgpt"]
|
||||
@@ -1,463 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"target": {
|
||||
"limit": 100,
|
||||
"matchAny": false,
|
||||
"tags": [],
|
||||
"type": "dashboard"
|
||||
},
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "",
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"id": 27,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 2,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 8,
|
||||
"options": {
|
||||
"code": {
|
||||
"language": "plaintext",
|
||||
"showLineNumbers": false,
|
||||
"showMiniMap": false
|
||||
},
|
||||
"content": "",
|
||||
"mode": "markdown"
|
||||
},
|
||||
"pluginVersion": "9.4.7",
|
||||
"transparent": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 4,
|
||||
"w": 3,
|
||||
"x": 2,
|
||||
"y": 0
|
||||
},
|
||||
"id": 10,
|
||||
"options": {
|
||||
"code": {
|
||||
"language": "plaintext",
|
||||
"showLineNumbers": false,
|
||||
"showMiniMap": false
|
||||
},
|
||||
"content": "Useful links\n- [Website](https://k8sgpt.ai/)\n- [Documentation](https://docs.k8sgpt.ai/)\n- [GitHub](https://github.com/k8sgpt-ai)",
|
||||
"mode": "markdown"
|
||||
},
|
||||
"pluginVersion": "9.4.7",
|
||||
"transparent": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"description": "Total number of errors",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [],
|
||||
"min": 0,
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 7,
|
||||
"x": 5,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"options": {
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"showThresholdLabels": true,
|
||||
"showThresholdMarkers": true
|
||||
},
|
||||
"pluginVersion": "9.4.7",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(analyzer_errors{namespace=~\"$namespace\"})",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Errors",
|
||||
"type": "gauge"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"description": "Total number of errors per analyzer",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"displayName": "${__field.labels.analyzer_name}",
|
||||
"mappings": [],
|
||||
"min": 0,
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 5,
|
||||
"options": {
|
||||
"displayMode": "gradient",
|
||||
"minVizHeight": 10,
|
||||
"minVizWidth": 0,
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"showUnfilled": true
|
||||
},
|
||||
"pluginVersion": "9.4.7",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by (analyzer_name) (analyzer_errors{namespace=~\"$namespace\"})",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Errors per analyzer",
|
||||
"type": "bargauge"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"description": "Total number of errors time series",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"min": 0,
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"id": 4,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": false
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "9.4.7",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (analyzer_errors{namespace=~\"$namespace\"})",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Errors",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"description": "Total number of errors per analyzer time series",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineStyle": {
|
||||
"fill": "solid"
|
||||
},
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"min": 0,
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"id": 6,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "right",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "9.4.7",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by (analyzer_name) (analyzer_errors{namespace=~\"$namespace\"})",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Errors per analyzer",
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
"refresh": "",
|
||||
"revision": 1,
|
||||
"schemaVersion": 38,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"current": {
|
||||
"selected": true,
|
||||
"text": [
|
||||
"All"
|
||||
],
|
||||
"value": [
|
||||
"$__all"
|
||||
]
|
||||
},
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"definition": "label_values(analyzer_errors, namespace)",
|
||||
"hide": 0,
|
||||
"includeAll": true,
|
||||
"label": "Namespace",
|
||||
"multi": true,
|
||||
"name": "namespace",
|
||||
"options": [],
|
||||
"query": {
|
||||
"query": "label_values(analyzer_errors, namespace)",
|
||||
"refId": "StandardVariableQuery"
|
||||
},
|
||||
"refresh": 2,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"type": "query"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-5m",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "",
|
||||
"title": "K8sGPT",
|
||||
"uid": "JfxtNBP4z",
|
||||
"version": 1,
|
||||
"weekStart": ""
|
||||
}
|
||||
326
go.mod
@@ -1,299 +1,69 @@
|
||||
module github.com/k8sgpt-ai/k8sgpt
|
||||
|
||||
go 1.24.1
|
||||
|
||||
toolchain go1.24.11
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/fatih/color v1.18.0
|
||||
github.com/kedacore/keda/v2 v2.16.0
|
||||
github.com/magiconair/properties v1.8.9
|
||||
github.com/mittwald/go-helm-client v0.12.14
|
||||
github.com/ollama/ollama v0.13.4
|
||||
github.com/sashabaranov/go-openai v1.36.0
|
||||
github.com/schollz/progressbar/v3 v3.17.1
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/term v0.33.0
|
||||
helm.sh/helm/v3 v3.17.4
|
||||
k8s.io/api v0.32.3
|
||||
k8s.io/apimachinery v0.32.3
|
||||
k8s.io/client-go v0.32.3
|
||||
k8s.io/kubectl v0.32.2 // indirect
|
||||
|
||||
)
|
||||
|
||||
require github.com/adrg/xdg v0.5.3
|
||||
|
||||
require (
|
||||
buf.build/gen/go/interplex-ai/schemas/grpc/go v1.5.1-20241117203254-a91193b62179.1
|
||||
buf.build/gen/go/interplex-ai/schemas/protocolbuffers/go v1.35.2-20241117203254-a91193b62179.1
|
||||
buf.build/gen/go/k8sgpt-ai/k8sgpt/grpc-ecosystem/gateway/v2 v2.24.0-20241118152629-1379a5a1889d.1
|
||||
buf.build/gen/go/k8sgpt-ai/k8sgpt/grpc/go v1.5.1-20241118152629-1379a5a1889d.1
|
||||
buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go v1.35.2-20241118152629-1379a5a1889d.1
|
||||
cloud.google.com/go/storage v1.50.0
|
||||
cloud.google.com/go/vertexai v0.13.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0
|
||||
github.com/IBM/watsonx-go v1.0.1
|
||||
github.com/agiledragon/gomonkey/v2 v2.13.0
|
||||
github.com/aws/aws-sdk-go v1.55.7
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.14
|
||||
github.com/aws/aws-sdk-go-v2/service/bedrock v1.33.0
|
||||
github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.30.0
|
||||
github.com/aws/smithy-go v1.22.2
|
||||
github.com/cohere-ai/cohere-go/v2 v2.12.2
|
||||
github.com/go-logr/zapr v1.3.0
|
||||
github.com/google/generative-ai-go v0.19.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
|
||||
github.com/hupe1980/go-huggingface v0.0.15
|
||||
github.com/kyverno/policy-reporter-kyverno-plugin v1.6.4
|
||||
github.com/mark3labs/mcp-go v0.36.0
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/oracle/oci-go-sdk/v65 v65.79.0
|
||||
github.com/prometheus/prometheus v0.306.0
|
||||
github.com/pterm/pterm v0.12.80
|
||||
google.golang.org/api v0.239.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
sigs.k8s.io/controller-runtime v0.19.3
|
||||
sigs.k8s.io/gateway-api v1.2.1
|
||||
github.com/fatih/color v1.15.0
|
||||
github.com/sashabaranov/go-openai v1.5.7
|
||||
github.com/schollz/progressbar/v3 v3.13.1
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/viper v1.15.0
|
||||
golang.org/x/term v0.6.0
|
||||
k8s.io/api v0.26.3
|
||||
k8s.io/apimachinery v0.26.3
|
||||
k8s.io/client-go v0.26.3
|
||||
)
|
||||
|
||||
require (
|
||||
atomicgo.dev/cursor v0.2.0 // indirect
|
||||
atomicgo.dev/keyboard v0.2.9 // indirect
|
||||
atomicgo.dev/schedule v0.1.0 // indirect
|
||||
cel.dev/expr v0.23.0 // indirect
|
||||
cloud.google.com/go v0.120.0 // indirect
|
||||
cloud.google.com/go/ai v0.8.0 // indirect
|
||||
cloud.google.com/go/aiplatform v1.85.0 // indirect
|
||||
cloud.google.com/go/auth v0.16.2 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/longrunning v0.6.7 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.4 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect
|
||||
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/buger/jsonparser v1.1.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
|
||||
github.com/containerd/console v1.0.4 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/creack/pty v1.1.21 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/expr-lang/expr v1.17.7 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
|
||||
github.com/gookit/color v1.5.4 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/invopop/jsonschema v0.13.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.1 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/prometheus/sigv4 v0.2.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.6.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/segmentio/fasthash v1.0.3 // indirect
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
|
||||
github.com/sony/gobreaker v0.5.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
knative.dev/pkg v0.0.0-20241026180704-25f6002b00f3 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.3 // indirect
|
||||
github.com/containerd/containerd v1.7.29 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/docker/cli v26.1.4+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v28.3.0+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/go-errors/errors v1.5.1 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/gnostic v0.7.0
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.23.0-rc.1
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/rubenv/sql-migrate v1.7.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/spf13/afero v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa // indirect
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/text v0.27.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
github.com/subosito/gotenv v1.4.2 // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
golang.org/x/time v0.1.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.32.2
|
||||
k8s.io/apiserver v0.32.2 // indirect
|
||||
k8s.io/cli-runtime v0.32.2 // indirect
|
||||
k8s.io/component-base v0.32.2 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
|
||||
oras.land/oras-go v1.2.5 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.18.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.80.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
// v1.2.0 is taken from github.com/open-policy-agent/opa v0.42.0
|
||||
// v1.2.0 incompatible with github.com/docker/docker v23.0.0-rc.1+incompatible
|
||||
//replace oras.land/oras-go => oras.land/oras-go v1.2.4
|
||||
replace github.com/docker/docker => github.com/docker/docker v28.0.4+incompatible
|
||||
|
||||
replace dario.cat/mergo => github.com/imdario/mergo v1.0.1
|
||||
|
||||
|
Before Width: | Height: | Size: 76 KiB After Width: | Height: | Size: 104 KiB |
|
Before Width: | Height: | Size: 78 KiB After Width: | Height: | Size: 104 KiB |
|
Before Width: | Height: | Size: 22 KiB |
BIN
images/demo5.gif
|
Before Width: | Height: | Size: 1.4 MiB |
BIN
images/logo-black.png
Normal file
|
After Width: | Height: | Size: 236 KiB |
BIN
images/logo-white.png
Normal file
|
After Width: | Height: | Size: 234 KiB |
BIN
images/nodes.gif
|
Before Width: | Height: | Size: 120 KiB |
20
main.go
@@ -1,26 +1,12 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
Copyright © 2023 NAME HERE <EMAIL ADDRESS>
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/k8sgpt-ai/k8sgpt/cmd"
|
||||
|
||||
var (
|
||||
version = "dev"
|
||||
commit = "HEAD"
|
||||
date = "unknown"
|
||||
)
|
||||
var version = "dev"
|
||||
|
||||
func main() {
|
||||
cmd.Execute(version, commit, date)
|
||||
cmd.Execute(version)
|
||||
}
|
||||
|
||||
48
pkg/ai/ai.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
const (
|
||||
default_prompt = "Simplify the following Kubernetes error message and provide a solution in %s: %s"
|
||||
prompt_a = "Read the following input %s and provide possible scenarios for remediation in %s"
|
||||
prompt_b = "Considering the following input from the Kubernetes resource %s and the error message %s, provide possible scenarios for remediation in %s"
|
||||
prompt_c = "Reading the following %s error message and it's accompanying log message %s, how would you simplify this message?"
|
||||
)
|
||||
|
||||
type OpenAIClient struct {
|
||||
client *openai.Client
|
||||
language string
|
||||
}
|
||||
|
||||
func (c *OpenAIClient) Configure(token string, language string) error {
|
||||
client := openai.NewClient(token)
|
||||
if client == nil {
|
||||
return errors.New("error creating OpenAI client")
|
||||
}
|
||||
c.language = language
|
||||
c.client = client
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *OpenAIClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
|
||||
// Create a completion request
|
||||
resp, err := c.client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
|
||||
Model: openai.GPT3Dot5Turbo,
|
||||
Messages: []openai.ChatCompletionMessage{
|
||||
{
|
||||
Role: "user",
|
||||
Content: fmt.Sprintf(default_prompt, c.language, prompt),
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resp.Choices[0].Message.Content, nil
|
||||
}
|
||||
@@ -1,626 +0,0 @@
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai/bedrock_support"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
awsconfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrock"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/aws/smithy-go/middleware"
|
||||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
)
|
||||
|
||||
const amazonbedrockAIClientName = "amazonbedrock"
|
||||
|
||||
// AmazonBedRockClient represents the client for interacting with the Amazon Bedrock service.
|
||||
type AmazonBedRockClient struct {
|
||||
nopCloser
|
||||
|
||||
client BedrockRuntimeAPI
|
||||
mgmtClient BedrockManagementAPI
|
||||
model *bedrock_support.BedrockModel
|
||||
temperature float32
|
||||
topP float32
|
||||
maxTokens int
|
||||
models []bedrock_support.BedrockModel
|
||||
}
|
||||
|
||||
// AmazonCompletion BedRock support region list US East (N. Virginia),US West (Oregon),Asia Pacific (Singapore),Asia Pacific (Tokyo),Europe (Frankfurt)
|
||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/what-is-bedrock.html#bedrock-regions
|
||||
const BEDROCK_DEFAULT_REGION = "us-east-1" // default use us-east-1 region
|
||||
|
||||
const (
|
||||
US_East_1 = "us-east-1"
|
||||
US_West_2 = "us-west-2"
|
||||
AP_Southeast_1 = "ap-southeast-1"
|
||||
AP_Northeast_1 = "ap-northeast-1"
|
||||
EU_Central_1 = "eu-central-1"
|
||||
AP_South_1 = "ap-south-1"
|
||||
US_Gov_West_1 = "us-gov-west-1"
|
||||
US_Gov_East_1 = "us-gov-east-1"
|
||||
)
|
||||
|
||||
var BEDROCKER_SUPPORTED_REGION = []string{
|
||||
US_East_1,
|
||||
US_West_2,
|
||||
AP_Southeast_1,
|
||||
AP_Northeast_1,
|
||||
EU_Central_1,
|
||||
AP_South_1,
|
||||
US_Gov_West_1,
|
||||
US_Gov_East_1,
|
||||
}
|
||||
|
||||
var defaultModels = []bedrock_support.BedrockModel{
|
||||
|
||||
{
|
||||
Name: "anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "us.anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "us.anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "eu.anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "eu.anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "apac.anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "apac.anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "eu.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "eu.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "apac.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "apac.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "us.anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "us.anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "anthropic.claude-v2",
|
||||
Completion: &bedrock_support.CohereCompletion{},
|
||||
Response: &bedrock_support.CohereResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "anthropic.claude-v2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "anthropic.claude-v1",
|
||||
Completion: &bedrock_support.CohereCompletion{},
|
||||
Response: &bedrock_support.CohereResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "anthropic.claude-v1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "anthropic.claude-instant-v1",
|
||||
Completion: &bedrock_support.CohereCompletion{},
|
||||
Response: &bedrock_support.CohereResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "anthropic.claude-instant-v1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ai21.j2-ultra-v1",
|
||||
Completion: &bedrock_support.AI21{},
|
||||
Response: &bedrock_support.AI21Response{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "ai21.j2-ultra-v1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ai21.j2-jumbo-instruct",
|
||||
Completion: &bedrock_support.AI21{},
|
||||
Response: &bedrock_support.AI21Response{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "ai21.j2-jumbo-instruct",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "amazon.titan-text-express-v1",
|
||||
Completion: &bedrock_support.AmazonCompletion{},
|
||||
Response: &bedrock_support.AmazonResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "amazon.titan-text-express-v1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "amazon.nova-pro-v1:0",
|
||||
Completion: &bedrock_support.AmazonCompletion{},
|
||||
Response: &bedrock_support.NovaResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
// https://docs.aws.amazon.com/nova/latest/userguide/getting-started-api.html
|
||||
MaxTokens: 100, // max of 300k tokens
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "amazon.nova-pro-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "eu.amazon.nova-pro-v1:0",
|
||||
Completion: &bedrock_support.AmazonCompletion{},
|
||||
Response: &bedrock_support.NovaResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
// https://docs.aws.amazon.com/nova/latest/userguide/getting-started-api.html
|
||||
MaxTokens: 100, // max of 300k tokens
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "eu.amazon.nova-pro-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "us.amazon.nova-pro-v1:0",
|
||||
Completion: &bedrock_support.AmazonCompletion{},
|
||||
Response: &bedrock_support.NovaResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
// https://docs.aws.amazon.com/nova/latest/userguide/getting-started-api.html
|
||||
MaxTokens: 100, // max of 300k tokens
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "us.amazon.nova-pro-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "amazon.nova-lite-v1:0",
|
||||
Completion: &bedrock_support.AmazonCompletion{},
|
||||
Response: &bedrock_support.NovaResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100, // max of 300k tokens
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "amazon.nova-lite-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "eu.amazon.nova-lite-v1:0",
|
||||
Completion: &bedrock_support.AmazonCompletion{},
|
||||
Response: &bedrock_support.NovaResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100, // max of 300k tokens
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "eu.amazon.nova-lite-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "us.amazon.nova-lite-v1:0",
|
||||
Completion: &bedrock_support.AmazonCompletion{},
|
||||
Response: &bedrock_support.NovaResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100, // max of 300k tokens
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "us.amazon.nova-lite-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "anthropic.claude-3-haiku-20240307-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
// sensible defaults
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "anthropic.claude-3-haiku-20240307-v1:0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// NewAmazonBedRockClient creates a new AmazonBedRockClient with the given models
|
||||
func NewAmazonBedRockClient(models []bedrock_support.BedrockModel) *AmazonBedRockClient {
|
||||
if models == nil {
|
||||
models = defaultModels // Use default models if none provided
|
||||
}
|
||||
return &AmazonBedRockClient{
|
||||
models: models,
|
||||
}
|
||||
}
|
||||
|
||||
// GetModelOrDefault check config region
|
||||
func GetRegionOrDefault(region string) string {
|
||||
if os.Getenv("AWS_DEFAULT_REGION") != "" {
|
||||
region = os.Getenv("AWS_DEFAULT_REGION")
|
||||
}
|
||||
// Check if the provided model is in the list
|
||||
for _, m := range BEDROCKER_SUPPORTED_REGION {
|
||||
if m == region {
|
||||
return region // Return the provided model
|
||||
}
|
||||
}
|
||||
|
||||
// Return the default model if the provided model is not in the list
|
||||
return BEDROCK_DEFAULT_REGION
|
||||
}
|
||||
|
||||
func validateModelArn(model string) bool {
|
||||
var re = regexp.MustCompile(`(?m)^arn:(?P<Partition>[^:\n]*):bedrock:(?P<Region>[^:\n]*):(?P<AccountID>[^:\n]*):(?P<Ignore>(?P<ResourceType>[^:\/\n]*)[:\/])?(?P<Resource>.*)$`)
|
||||
return re.MatchString(model)
|
||||
}
|
||||
|
||||
func validateInferenceProfileArn(inferenceProfile string) bool {
|
||||
// Support both inference-profile and application-inference-profile formats
|
||||
var re = regexp.MustCompile(`(?m)^arn:(?P<Partition>[^:\n]*):bedrock:(?P<Region>[^:\n]*):(?P<AccountID>[^:\n]*):(?:inference-profile|application-inference-profile)\/(?P<ProfileName>.+)$`)
|
||||
return re.MatchString(inferenceProfile)
|
||||
}
|
||||
|
||||
// Get model from string
|
||||
func (a *AmazonBedRockClient) getModelFromString(model string) (*bedrock_support.BedrockModel, error) {
|
||||
if model == "" {
|
||||
return nil, errors.New("model name cannot be empty")
|
||||
}
|
||||
|
||||
// Trim spaces from the model name
|
||||
model = strings.TrimSpace(model)
|
||||
|
||||
// Try to find an exact match first
|
||||
for i := range a.models {
|
||||
if strings.EqualFold(model, a.models[i].Name) || strings.EqualFold(model, a.models[i].Config.ModelName) {
|
||||
// Create a copy to avoid returning a pointer to a loop variable
|
||||
modelCopy := a.models[i]
|
||||
return &modelCopy, nil
|
||||
}
|
||||
}
|
||||
|
||||
supportedModels := make([]string, len(a.models))
|
||||
for i, m := range a.models {
|
||||
supportedModels[i] = m.Name
|
||||
}
|
||||
|
||||
supportedRegions := BEDROCKER_SUPPORTED_REGION
|
||||
|
||||
// Pretty-print supported models and regions
|
||||
modelList := ""
|
||||
for _, m := range supportedModels {
|
||||
modelList += " - " + m + "\n"
|
||||
}
|
||||
regionList := ""
|
||||
for _, r := range supportedRegions {
|
||||
regionList += " - " + r + "\n"
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf(
|
||||
"model '%s' not found in supported models.\n\nSupported models:\n%sSupported regions:\n%s",
|
||||
model, modelList, regionList,
|
||||
)
|
||||
}
|
||||
|
||||
// Configure configures the AmazonBedRockClient with the provided configuration.
|
||||
func (a *AmazonBedRockClient) Configure(config IAIConfig) error {
|
||||
// Initialize models if not already initialized
|
||||
if a.models == nil {
|
||||
a.models = defaultModels
|
||||
}
|
||||
|
||||
// Get the model input
|
||||
modelInput := config.GetModel()
|
||||
|
||||
// Determine the appropriate region to use
|
||||
var region string
|
||||
|
||||
// Check if the model input is actually an inference profile ARN
|
||||
if validateInferenceProfileArn(modelInput) {
|
||||
// Extract the region from the inference profile ARN
|
||||
arnParts := strings.Split(modelInput, ":")
|
||||
if len(arnParts) >= 4 {
|
||||
region = arnParts[3]
|
||||
} else {
|
||||
return fmt.Errorf("could not extract region from inference profile ARN: %s", modelInput)
|
||||
}
|
||||
} else {
|
||||
// Use the provided region or default
|
||||
region = GetRegionOrDefault(config.GetProviderRegion())
|
||||
}
|
||||
|
||||
// Only create AWS clients if they haven't been injected (for testing)
|
||||
if a.client == nil || a.mgmtClient == nil {
|
||||
// Create a new AWS config with the determined region
|
||||
cfg, err := awsconfig.LoadDefaultConfig(context.Background(),
|
||||
awsconfig.WithRegion(region),
|
||||
)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "InvalidAccessKeyId") || strings.Contains(err.Error(), "SignatureDoesNotMatch") || strings.Contains(err.Error(), "NoCredentialProviders") {
|
||||
return fmt.Errorf("AWS credentials are invalid or missing. Please check your AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables or AWS config. Details: %v", err)
|
||||
}
|
||||
return fmt.Errorf("failed to load AWS config for region %s: %w", region, err)
|
||||
}
|
||||
|
||||
// Create clients with the config
|
||||
a.client = bedrockruntime.NewFromConfig(cfg)
|
||||
a.mgmtClient = bedrock.NewFromConfig(cfg)
|
||||
}
|
||||
|
||||
// Handle model selection based on input type
|
||||
if validateInferenceProfileArn(modelInput) {
|
||||
// Get the inference profile details
|
||||
profile, err := a.getInferenceProfile(context.Background(), modelInput)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get inference profile: %v", err)
|
||||
}
|
||||
// Extract the model ID from the inference profile
|
||||
modelID, err := a.extractModelFromInferenceProfile(profile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract model ID from inference profile: %v", err)
|
||||
}
|
||||
// Find the model configuration for the extracted model ID
|
||||
foundModel, err := a.getModelFromString(modelID)
|
||||
if err != nil {
|
||||
// Instead of failing, use a generic config for completion/response
|
||||
// But still warn user
|
||||
return fmt.Errorf("failed to find model configuration for %s: %v", modelID, err)
|
||||
}
|
||||
// Use the found model config for completion/response, but set ModelName to the profile ARN
|
||||
a.model = foundModel
|
||||
a.model.Config.ModelName = modelInput
|
||||
// Mark that we're using an inference profile
|
||||
// (could add a field if needed)
|
||||
} else {
|
||||
// Regular model ID provided
|
||||
foundModel, err := a.getModelFromString(modelInput)
|
||||
if err != nil {
|
||||
return fmt.Errorf("model '%s' is not supported: %v", modelInput, err)
|
||||
}
|
||||
a.model = foundModel
|
||||
a.model.Config.ModelName = foundModel.Config.ModelName
|
||||
}
|
||||
|
||||
// Set common configuration parameters
|
||||
a.temperature = config.GetTemperature()
|
||||
a.topP = config.GetTopP()
|
||||
a.maxTokens = config.GetMaxTokens()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getInferenceProfile retrieves the inference profile details from Amazon Bedrock
|
||||
func (a *AmazonBedRockClient) getInferenceProfile(ctx context.Context, inferenceProfileARN string) (*bedrock.GetInferenceProfileOutput, error) {
|
||||
// Extract the profile ID from the ARN
|
||||
// ARN format: arn:aws:bedrock:region:account-id:inference-profile/profile-id
|
||||
// or arn:aws:bedrock:region:account-id:application-inference-profile/profile-id
|
||||
parts := strings.Split(inferenceProfileARN, "/")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid inference profile ARN format: %s", inferenceProfileARN)
|
||||
}
|
||||
|
||||
profileID := parts[1]
|
||||
|
||||
// Create the input for the GetInferenceProfile API call
|
||||
input := &bedrock.GetInferenceProfileInput{
|
||||
InferenceProfileIdentifier: aws.String(profileID),
|
||||
}
|
||||
|
||||
// Call the GetInferenceProfile API
|
||||
output, err := a.mgmtClient.GetInferenceProfile(ctx, input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get inference profile: %w", err)
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// extractModelFromInferenceProfile extracts the model ID from the inference profile
|
||||
func (a *AmazonBedRockClient) extractModelFromInferenceProfile(profile *bedrock.GetInferenceProfileOutput) (string, error) {
|
||||
if profile == nil || len(profile.Models) == 0 {
|
||||
return "", fmt.Errorf("inference profile does not contain any models")
|
||||
}
|
||||
|
||||
// Check if the first model has a non-nil ModelArn
|
||||
if profile.Models[0].ModelArn == nil {
|
||||
return "", fmt.Errorf("model information is missing in inference profile")
|
||||
}
|
||||
|
||||
// Get the first model ARN from the profile
|
||||
modelARN := aws.ToString(profile.Models[0].ModelArn)
|
||||
if modelARN == "" {
|
||||
return "", fmt.Errorf("model ARN is empty in inference profile")
|
||||
}
|
||||
|
||||
// Extract the model ID from the ARN
|
||||
// ARN format: arn:aws:bedrock:region::foundation-model/model-id
|
||||
parts := strings.Split(modelARN, "/")
|
||||
if len(parts) != 2 {
|
||||
return "", fmt.Errorf("invalid model ARN format: %s", modelARN)
|
||||
}
|
||||
|
||||
modelID := parts[1]
|
||||
return modelID, nil
|
||||
}
|
||||
|
||||
// GetCompletion sends a request to the model for generating completion based on the provided prompt.
|
||||
func (a *AmazonBedRockClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
|
||||
// override config defaults
|
||||
a.model.Config.MaxTokens = a.maxTokens
|
||||
a.model.Config.Temperature = a.temperature
|
||||
a.model.Config.TopP = a.topP
|
||||
|
||||
supportedModels := make([]string, len(a.models))
|
||||
for i, m := range a.models {
|
||||
supportedModels[i] = m.Name
|
||||
}
|
||||
|
||||
// Allow valid inference profile ARNs as supported models
|
||||
if !bedrock_support.IsModelSupported(a.model.Config.ModelName, supportedModels) && !validateInferenceProfileArn(a.model.Config.ModelName) {
|
||||
return "", fmt.Errorf("model '%s' is not supported.\nSupported models:\n%s", a.model.Config.ModelName, func() string {
|
||||
s := ""
|
||||
for _, m := range supportedModels {
|
||||
s += " - " + m + "\n"
|
||||
}
|
||||
return s
|
||||
}())
|
||||
}
|
||||
|
||||
body, err := a.model.Completion.GetCompletion(ctx, prompt, a.model.Config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Build the parameters for the model invocation
|
||||
params := &bedrockruntime.InvokeModelInput{
|
||||
Body: body,
|
||||
ModelId: aws.String(a.model.Config.ModelName),
|
||||
ContentType: aws.String("application/json"),
|
||||
Accept: aws.String("application/json"),
|
||||
}
|
||||
|
||||
// Detect if the model name is an inference profile ARN and set the header if so
|
||||
var optFns []func(*bedrockruntime.Options)
|
||||
if validateInferenceProfileArn(a.model.Config.ModelName) {
|
||||
inferenceProfileArn := a.model.Config.ModelName
|
||||
optFns = append(optFns, func(options *bedrockruntime.Options) {
|
||||
options.APIOptions = append(options.APIOptions, func(stack *middleware.Stack) error {
|
||||
return stack.Initialize.Add(middleware.InitializeMiddlewareFunc("InferenceProfileHeader", func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) {
|
||||
req, ok := in.Parameters.(*smithyhttp.Request)
|
||||
if ok {
|
||||
req.Header.Set("X-Amzn-Bedrock-Inference-Profile-ARN", inferenceProfileArn)
|
||||
}
|
||||
return next.HandleInitialize(ctx, in)
|
||||
}), middleware.Before)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Invoke the model
|
||||
var resp *bedrockruntime.InvokeModelOutput
|
||||
if len(optFns) > 0 {
|
||||
resp, err = a.client.InvokeModel(ctx, params, optFns...)
|
||||
} else {
|
||||
resp, err = a.client.InvokeModel(ctx, params)
|
||||
}
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "InvalidAccessKeyId") || strings.Contains(err.Error(), "SignatureDoesNotMatch") || strings.Contains(err.Error(), "NoCredentialProviders") {
|
||||
return "", fmt.Errorf("AWS credentials are invalid or missing. Please check your AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables or AWS config. Details: %v", err)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Parse the response
|
||||
return a.model.Response.ParseResponse(resp.Body)
|
||||
}
|
||||
|
||||
// GetName returns the name of the AmazonBedRockClient.
|
||||
func (a *AmazonBedRockClient) GetName() string {
|
||||
return amazonbedrockAIClientName
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrock"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrock/types"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai/bedrock_support"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// Mock for Bedrock Management Client
|
||||
type MockBedrockClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockBedrockClient) GetInferenceProfile(ctx context.Context, params *bedrock.GetInferenceProfileInput, optFns ...func(*bedrock.Options)) (*bedrock.GetInferenceProfileOutput, error) {
|
||||
args := m.Called(ctx, params)
|
||||
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).(*bedrock.GetInferenceProfileOutput), args.Error(1)
|
||||
}
|
||||
|
||||
// Mock for Bedrock Runtime Client
|
||||
type MockBedrockRuntimeClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockBedrockRuntimeClient) InvokeModel(ctx context.Context, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) (*bedrockruntime.InvokeModelOutput, error) {
|
||||
args := m.Called(ctx, params)
|
||||
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).(*bedrockruntime.InvokeModelOutput), args.Error(1)
|
||||
}
|
||||
|
||||
// TestBedrockInferenceProfileARNWithMocks tests the inference profile ARN validation with mocks
|
||||
func TestBedrockInferenceProfileARNWithMocks(t *testing.T) {
|
||||
// Create test models
|
||||
testModels := []bedrock_support.BedrockModel{
|
||||
{
|
||||
Name: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a client with test models
|
||||
client := &AmazonBedRockClient{models: testModels}
|
||||
|
||||
// Create mock clients
|
||||
mockMgmtClient := new(MockBedrockClient)
|
||||
mockRuntimeClient := new(MockBedrockRuntimeClient)
|
||||
|
||||
// Inject mock clients into the AmazonBedRockClient
|
||||
client.mgmtClient = mockMgmtClient
|
||||
client.client = mockRuntimeClient
|
||||
|
||||
// Test with a valid inference profile ARN
|
||||
inferenceProfileARN := "arn:aws:bedrock:us-east-1:123456789012:inference-profile/my-profile"
|
||||
|
||||
// Setup mock response for GetInferenceProfile
|
||||
mockMgmtClient.On("GetInferenceProfile", mock.Anything, &bedrock.GetInferenceProfileInput{
|
||||
InferenceProfileIdentifier: aws.String("my-profile"),
|
||||
}).Return(&bedrock.GetInferenceProfileOutput{
|
||||
Models: []types.InferenceProfileModel{
|
||||
{
|
||||
ModelArn: aws.String("arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-5-sonnet-20240620-v1:0"),
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
// Configure the client with the inference profile ARN
|
||||
config := AIProvider{
|
||||
Model: inferenceProfileARN,
|
||||
ProviderRegion: "us-east-1",
|
||||
}
|
||||
|
||||
// Test the Configure method with the inference profile ARN
|
||||
err := client.Configure(&config)
|
||||
|
||||
// Verify that the configuration was successful
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, inferenceProfileARN, client.model.Config.ModelName)
|
||||
|
||||
// Verify that the mock was called
|
||||
mockMgmtClient.AssertExpectations(t)
|
||||
}
|
||||
@@ -1,262 +0,0 @@
|
||||
package ai
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai/bedrock_support"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Test models for unit testing
|
||||
var testModels = []bedrock_support.BedrockModel{
|
||||
{
|
||||
Name: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
Completion: &bedrock_support.CohereCompletion{},
|
||||
Response: &bedrock_support.CohereResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
Completion: &bedrock_support.CohereCompletion{},
|
||||
Response: &bedrock_support.CohereResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: "anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestBedrockModelConfig(t *testing.T) {
|
||||
client := &AmazonBedRockClient{models: testModels}
|
||||
|
||||
// Should return error for ARN input (no exact match)
|
||||
_, err := client.getModelFromString("arn:aws:bedrock:us-east-1:*:inference-policy/anthropic.claude-3-5-sonnet-20240620-v1:0")
|
||||
assert.NotNil(t, err, "Should return error for ARN input")
|
||||
}
|
||||
|
||||
func TestBedrockInvalidModel(t *testing.T) {
|
||||
client := &AmazonBedRockClient{models: testModels}
|
||||
|
||||
// Should return error for invalid model name
|
||||
_, err := client.getModelFromString("arn:aws:s3:us-east-1:*:inference-policy/anthropic.claude-3-5-sonnet-20240620-v1:0")
|
||||
assert.NotNil(t, err, "Should return error for invalid model name")
|
||||
}
|
||||
|
||||
func TestBedrockInferenceProfileARN(t *testing.T) {
|
||||
// Create a mock client with test models
|
||||
client := &AmazonBedRockClient{models: testModels}
|
||||
|
||||
// Test with a valid inference profile ARN
|
||||
inferenceProfileARN := "arn:aws:bedrock:us-east-1:123456789012:inference-profile/my-profile"
|
||||
config := AIProvider{
|
||||
Model: inferenceProfileARN,
|
||||
ProviderRegion: "us-east-1",
|
||||
}
|
||||
|
||||
// This will fail in a real environment without mocks, but we're just testing the validation logic
|
||||
err := client.Configure(&config)
|
||||
// We expect an error since we can't actually call AWS in tests
|
||||
assert.NotNil(t, err, "Error should not be nil without AWS mocks")
|
||||
|
||||
// Test with a valid application inference profile ARN
|
||||
appInferenceProfileARN := "arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/my-profile"
|
||||
config = AIProvider{
|
||||
Model: appInferenceProfileARN,
|
||||
ProviderRegion: "us-east-1",
|
||||
}
|
||||
|
||||
// This will fail in a real environment without mocks, but we're just testing the validation logic
|
||||
err = client.Configure(&config)
|
||||
// We expect an error since we can't actually call AWS in tests
|
||||
assert.NotNil(t, err, "Error should not be nil without AWS mocks")
|
||||
|
||||
// Test with an invalid inference profile ARN format
|
||||
invalidARN := "arn:aws:bedrock:us-east-1:123456789012:invalid-resource/my-profile"
|
||||
config = AIProvider{
|
||||
Model: invalidARN,
|
||||
ProviderRegion: "us-east-1",
|
||||
}
|
||||
|
||||
err = client.Configure(&config)
|
||||
assert.NotNil(t, err, "Error should not be nil for invalid inference profile ARN format")
|
||||
}
|
||||
|
||||
func TestBedrockGetCompletionInferenceProfile(t *testing.T) {
|
||||
modelName := "arn:aws:bedrock:us-east-1:*:inference-policy/anthropic.claude-3-5-sonnet-20240620-v1:0"
|
||||
var inferenceModelModels = []bedrock_support.BedrockModel{
|
||||
{
|
||||
Name: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: modelName,
|
||||
},
|
||||
},
|
||||
}
|
||||
client := &AmazonBedRockClient{models: inferenceModelModels}
|
||||
|
||||
config := AIProvider{
|
||||
Model: modelName,
|
||||
}
|
||||
err := client.Configure(&config)
|
||||
assert.Nil(t, err, "Error should be nil")
|
||||
assert.Equal(t, modelName, client.model.Config.ModelName, "Model name should match")
|
||||
}
|
||||
|
||||
func TestGetModelFromString(t *testing.T) {
|
||||
client := &AmazonBedRockClient{models: testModels}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
model string
|
||||
wantModel string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "exact model name match",
|
||||
model: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
wantModel: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "partial model name match",
|
||||
model: "claude-3-5-sonnet",
|
||||
wantModel: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "model name with different version",
|
||||
model: "anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
wantModel: "anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "non-existent model",
|
||||
model: "non-existent-model",
|
||||
wantModel: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty model name",
|
||||
model: "",
|
||||
wantModel: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "model name with extra spaces",
|
||||
model: " anthropic.claude-3-5-sonnet-20240620-v1:0 ",
|
||||
wantModel: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "case insensitive match",
|
||||
model: "ANTHROPIC.CLAUDE-3-5-SONNET-20240620-V1:0",
|
||||
wantModel: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotModel, err := client.getModelFromString(tt.model)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("getModelFromString() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr && gotModel.Name != tt.wantModel {
|
||||
t.Errorf("getModelFromString() = %v, want %v", gotModel.Name, tt.wantModel)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDefaultModels tests that the client works with default models
|
||||
func TestDefaultModels(t *testing.T) {
|
||||
client := &AmazonBedRockClient{}
|
||||
|
||||
// Configure should initialize default models
|
||||
err := client.Configure(&AIProvider{
|
||||
Model: "anthropic.claude-v2",
|
||||
})
|
||||
|
||||
assert.NoError(t, err, "Configure should not return an error")
|
||||
assert.NotNil(t, client.models, "Models should be initialized")
|
||||
assert.NotEmpty(t, client.models, "Models should not be empty")
|
||||
|
||||
// Test finding a default model
|
||||
model, err := client.getModelFromString("anthropic.claude-v2")
|
||||
assert.NoError(t, err, "Should find the model")
|
||||
assert.Equal(t, "anthropic.claude-v2", model.Name, "Should find the correct model")
|
||||
}
|
||||
|
||||
func TestValidateInferenceProfileArn(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
arn string
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
name: "valid inference profile ARN",
|
||||
arn: "arn:aws:bedrock:us-east-1:123456789012:inference-profile/my-profile",
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "valid application inference profile ARN",
|
||||
arn: "arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/my-profile",
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "invalid service in ARN",
|
||||
arn: "arn:aws:s3:us-east-1:123456789012:inference-profile/my-profile",
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "invalid resource type in ARN",
|
||||
arn: "arn:aws:bedrock:us-east-1:123456789012:model/my-profile",
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "malformed ARN",
|
||||
arn: "arn:aws:bedrock:us-east-1:inference-profile/my-profile",
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "not an ARN",
|
||||
arn: "not-an-arn",
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
arn: "",
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := validateInferenceProfileArn(tt.arn)
|
||||
assert.Equal(t, tt.valid, result, "validateInferenceProfileArn() result should match expected")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,141 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/sagemakerruntime"
|
||||
)
|
||||
|
||||
const amazonsagemakerAIClientName = "amazonsagemaker"
|
||||
|
||||
type SageMakerAIClient struct {
|
||||
nopCloser
|
||||
|
||||
client *sagemakerruntime.SageMakerRuntime
|
||||
model string
|
||||
temperature float32
|
||||
endpoint string
|
||||
topP float32
|
||||
topK int32
|
||||
maxTokens int
|
||||
}
|
||||
|
||||
type Generations []struct {
|
||||
Generation struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
} `json:"generation"`
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
Inputs [][]Message `json:"inputs"`
|
||||
Parameters Parameters `json:"parameters"`
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type Parameters struct {
|
||||
MaxNewTokens int `json:"max_new_tokens"`
|
||||
TopP float64 `json:"top_p"`
|
||||
TopK float64 `json:"top_k"`
|
||||
Temperature float64 `json:"temperature"`
|
||||
}
|
||||
|
||||
func (c *SageMakerAIClient) Configure(config IAIConfig) error {
|
||||
|
||||
// Create a new AWS session
|
||||
sess := session.Must(session.NewSessionWithOptions(session.Options{
|
||||
Config: aws.Config{Region: aws.String(config.GetProviderRegion())},
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
}))
|
||||
|
||||
// Create a new SageMaker runtime client
|
||||
c.client = sagemakerruntime.New(sess)
|
||||
c.model = config.GetModel()
|
||||
c.endpoint = config.GetEndpointName()
|
||||
c.temperature = config.GetTemperature()
|
||||
c.maxTokens = config.GetMaxTokens()
|
||||
c.topP = config.GetTopP()
|
||||
c.topK = config.GetTopK()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *SageMakerAIClient) GetCompletion(_ context.Context, prompt string) (string, error) {
|
||||
// Create a completion request
|
||||
request := Request{
|
||||
Inputs: [][]Message{
|
||||
{
|
||||
{Role: "system", Content: "DEFAULT_PROMPT"},
|
||||
{Role: "user", Content: prompt},
|
||||
},
|
||||
},
|
||||
|
||||
Parameters: Parameters{
|
||||
MaxNewTokens: int(c.maxTokens),
|
||||
TopP: float64(c.topP),
|
||||
TopK: float64(c.topK),
|
||||
Temperature: float64(c.temperature),
|
||||
},
|
||||
}
|
||||
|
||||
// Convert request to []byte
|
||||
bytesData, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Create an input object
|
||||
input := &sagemakerruntime.InvokeEndpointInput{
|
||||
Body: bytesData,
|
||||
EndpointName: aws.String(c.endpoint),
|
||||
ContentType: aws.String("application/json"), // Set the content type as per your model's requirements
|
||||
Accept: aws.String("application/json"), // Set the accept type as per your model's requirements
|
||||
CustomAttributes: aws.String("accept_eula=true"),
|
||||
}
|
||||
|
||||
// Call the InvokeEndpoint function
|
||||
result, err := c.client.InvokeEndpoint(input)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// // Define a slice of Generations
|
||||
var generations Generations
|
||||
|
||||
err = json.Unmarshal([]byte(string(result.Body)), &generations)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Check for length of generations
|
||||
if len(generations) != 1 {
|
||||
return "", fmt.Errorf("Expected exactly one generation, but got %d", len(generations))
|
||||
}
|
||||
|
||||
// Access the content
|
||||
content := generations[0].Generation.Content
|
||||
return content, nil
|
||||
}
|
||||
|
||||
func (c *SageMakerAIClient) GetName() string {
|
||||
return amazonsagemakerAIClientName
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
const azureAIClientName = "azureopenai"
|
||||
|
||||
type AzureAIClient struct {
|
||||
nopCloser
|
||||
|
||||
client *openai.Client
|
||||
model string
|
||||
temperature float32
|
||||
// organizationId string
|
||||
}
|
||||
|
||||
func (c *AzureAIClient) Configure(config IAIConfig) error {
|
||||
token := config.GetPassword()
|
||||
baseURL := config.GetBaseURL()
|
||||
engine := config.GetEngine()
|
||||
proxyEndpoint := config.GetProxyEndpoint()
|
||||
defaultConfig := openai.DefaultAzureConfig(token, baseURL)
|
||||
orgId := config.GetOrganizationId()
|
||||
|
||||
defaultConfig.AzureModelMapperFunc = func(model string) string {
|
||||
// If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
|
||||
azureModelMapping := map[string]string{
|
||||
model: engine,
|
||||
}
|
||||
return azureModelMapping[model]
|
||||
|
||||
}
|
||||
|
||||
if proxyEndpoint != "" {
|
||||
proxyUrl, err := url.Parse(proxyEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
transport := &http.Transport{
|
||||
Proxy: http.ProxyURL(proxyUrl),
|
||||
}
|
||||
|
||||
defaultConfig.HTTPClient = &http.Client{
|
||||
Transport: transport,
|
||||
}
|
||||
}
|
||||
if orgId != "" {
|
||||
defaultConfig.OrgID = orgId
|
||||
}
|
||||
|
||||
client := openai.NewClientWithConfig(defaultConfig)
|
||||
if client == nil {
|
||||
return errors.New("error creating Azure OpenAI client")
|
||||
}
|
||||
c.client = client
|
||||
c.model = config.GetModel()
|
||||
c.temperature = config.GetTemperature()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AzureAIClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
|
||||
// Create a completion request
|
||||
resp, err := c.client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
|
||||
Model: c.model,
|
||||
Messages: []openai.ChatCompletionMessage{
|
||||
{
|
||||
Role: openai.ChatMessageRoleUser,
|
||||
Content: prompt,
|
||||
},
|
||||
},
|
||||
Temperature: c.temperature,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resp.Choices[0].Message.Content, nil
|
||||
}
|
||||
|
||||
func (c *AzureAIClient) GetName() string {
|
||||
return azureAIClientName
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrock"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
)
|
||||
|
||||
// BedrockManagementAPI defines the interface for Bedrock management operations
|
||||
type BedrockManagementAPI interface {
|
||||
GetInferenceProfile(ctx context.Context, params *bedrock.GetInferenceProfileInput, optFns ...func(*bedrock.Options)) (*bedrock.GetInferenceProfileOutput, error)
|
||||
}
|
||||
|
||||
// BedrockRuntimeAPI defines the interface for Bedrock runtime operations
|
||||
type BedrockRuntimeAPI interface {
|
||||
InvokeModel(ctx context.Context, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) (*bedrockruntime.InvokeModelOutput, error)
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
package bedrock_support
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ICompletion interface {
|
||||
GetCompletion(ctx context.Context, prompt string, modelConfig BedrockModelConfig) ([]byte, error)
|
||||
}
|
||||
|
||||
type CohereCompletion struct {
|
||||
completion ICompletion
|
||||
}
|
||||
|
||||
func (a *CohereCompletion) GetCompletion(ctx context.Context, prompt string, modelConfig BedrockModelConfig) ([]byte, error) {
|
||||
request := map[string]interface{}{
|
||||
"prompt": fmt.Sprintf("\n\nHuman: %s \n\nAssistant:", prompt),
|
||||
"max_tokens_to_sample": modelConfig.MaxTokens,
|
||||
"temperature": modelConfig.Temperature,
|
||||
"top_p": modelConfig.TopP,
|
||||
}
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
type CohereMessagesCompletion struct {
|
||||
completion ICompletion
|
||||
}
|
||||
|
||||
func (a *CohereMessagesCompletion) GetCompletion(ctx context.Context, prompt string, modelConfig BedrockModelConfig) ([]byte, error) {
|
||||
request := map[string]interface{}{
|
||||
"max_tokens": modelConfig.MaxTokens,
|
||||
"temperature": modelConfig.Temperature,
|
||||
"top_p": modelConfig.TopP,
|
||||
"anthropic_version": "bedrock-2023-05-31", // Or another valid version
|
||||
"messages": []map[string]interface{}{
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
type AI21 struct {
|
||||
completion ICompletion
|
||||
}
|
||||
|
||||
func (a *AI21) GetCompletion(ctx context.Context, prompt string, modelConfig BedrockModelConfig) ([]byte, error) {
|
||||
request := map[string]interface{}{
|
||||
"prompt": prompt,
|
||||
"maxTokens": modelConfig.MaxTokens,
|
||||
"temperature": modelConfig.Temperature,
|
||||
"topP": modelConfig.TopP,
|
||||
}
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
type AmazonCompletion struct {
|
||||
completion ICompletion
|
||||
}
|
||||
|
||||
// Accepts a list of supported model names
|
||||
func IsModelSupported(modelName string, supportedModels []string) bool {
|
||||
for _, supportedModel := range supportedModels {
|
||||
if strings.EqualFold(modelName, supportedModel) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Note: The caller should check model support before calling GetCompletion.
|
||||
func (a *AmazonCompletion) GetCompletion(ctx context.Context, prompt string, modelConfig BedrockModelConfig) ([]byte, error) {
|
||||
if a == nil || modelConfig.ModelName == "" {
|
||||
return nil, fmt.Errorf("no model name provided to Bedrock completion")
|
||||
}
|
||||
if strings.Contains(modelConfig.ModelName, "nova") {
|
||||
return a.GetNovaCompletion(ctx, prompt, modelConfig)
|
||||
} else {
|
||||
return a.GetDefaultCompletion(ctx, prompt, modelConfig)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AmazonCompletion) GetDefaultCompletion(ctx context.Context, prompt string, modelConfig BedrockModelConfig) ([]byte, error) {
|
||||
request := map[string]interface{}{
|
||||
"inputText": fmt.Sprintf("\n\nUser: %s", prompt),
|
||||
"textGenerationConfig": map[string]interface{}{
|
||||
"maxTokenCount": modelConfig.MaxTokens,
|
||||
"temperature": modelConfig.Temperature,
|
||||
"topP": modelConfig.TopP,
|
||||
},
|
||||
}
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (a *AmazonCompletion) GetNovaCompletion(ctx context.Context, prompt string, modelConfig BedrockModelConfig) ([]byte, error) {
|
||||
request := map[string]interface{}{
|
||||
"inferenceConfig": map[string]interface{}{
|
||||
"max_new_tokens": modelConfig.MaxTokens,
|
||||
"temperature": modelConfig.Temperature,
|
||||
"topP": modelConfig.TopP,
|
||||
},
|
||||
"messages": []map[string]interface{}{
|
||||
{
|
||||
"role": "user",
|
||||
"content": []map[string]interface{}{
|
||||
{
|
||||
"text": prompt,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
@@ -1,182 +0,0 @@
|
||||
package bedrock_support
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCohereCompletion_GetCompletion(t *testing.T) {
|
||||
completion := &CohereCompletion{}
|
||||
modelConfig := BedrockModelConfig{
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.7,
|
||||
TopP: 0.9,
|
||||
}
|
||||
prompt := "Test prompt"
|
||||
|
||||
body, err := completion.GetCompletion(context.Background(), prompt, modelConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var request map[string]interface{}
|
||||
err = json.Unmarshal(body, &request)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "\n\nHuman: Test prompt \n\nAssistant:", request["prompt"])
|
||||
assert.Equal(t, 100, int(request["max_tokens_to_sample"].(float64)))
|
||||
assert.Equal(t, 0.7, request["temperature"])
|
||||
assert.Equal(t, 0.9, request["top_p"])
|
||||
}
|
||||
|
||||
func TestAI21_GetCompletion(t *testing.T) {
|
||||
completion := &AI21{}
|
||||
modelConfig := BedrockModelConfig{
|
||||
MaxTokens: 150,
|
||||
Temperature: 0.6,
|
||||
TopP: 0.8,
|
||||
}
|
||||
prompt := "Another test prompt"
|
||||
|
||||
body, err := completion.GetCompletion(context.Background(), prompt, modelConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var request map[string]interface{}
|
||||
err = json.Unmarshal(body, &request)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "Another test prompt", request["prompt"])
|
||||
assert.Equal(t, 150, int(request["maxTokens"].(float64)))
|
||||
assert.Equal(t, 0.6, request["temperature"])
|
||||
assert.Equal(t, 0.8, request["topP"])
|
||||
}
|
||||
|
||||
func TestAmazonCompletion_GetDefaultCompletion(t *testing.T) {
|
||||
completion := &AmazonCompletion{}
|
||||
modelConfig := BedrockModelConfig{
|
||||
MaxTokens: 200,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.7,
|
||||
ModelName: "amazon.titan-text-express-v1",
|
||||
}
|
||||
prompt := "Default test prompt"
|
||||
|
||||
body, err := completion.GetDefaultCompletion(context.Background(), prompt, modelConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var request map[string]interface{}
|
||||
err = json.Unmarshal(body, &request)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "\n\nUser: Default test prompt", request["inputText"])
|
||||
textConfig := request["textGenerationConfig"].(map[string]interface{})
|
||||
assert.Equal(t, 200, int(textConfig["maxTokenCount"].(float64)))
|
||||
assert.Equal(t, 0.5, textConfig["temperature"])
|
||||
assert.Equal(t, 0.7, textConfig["topP"])
|
||||
}
|
||||
|
||||
func TestAmazonCompletion_GetNovaCompletion(t *testing.T) {
|
||||
completion := &AmazonCompletion{}
|
||||
modelConfig := BedrockModelConfig{
|
||||
MaxTokens: 250,
|
||||
Temperature: 0.4,
|
||||
TopP: 0.6,
|
||||
ModelName: "amazon.nova-pro-v1:0",
|
||||
}
|
||||
prompt := "Nova test prompt"
|
||||
|
||||
body, err := completion.GetNovaCompletion(context.Background(), prompt, modelConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var request map[string]interface{}
|
||||
err = json.Unmarshal(body, &request)
|
||||
assert.NoError(t, err)
|
||||
|
||||
inferenceConfig := request["inferenceConfig"].(map[string]interface{})
|
||||
assert.Equal(t, 250, int(inferenceConfig["max_new_tokens"].(float64)))
|
||||
assert.Equal(t, 0.4, inferenceConfig["temperature"])
|
||||
assert.Equal(t, 0.6, inferenceConfig["topP"])
|
||||
|
||||
messages := request["messages"].([]interface{})
|
||||
message := messages[0].(map[string]interface{})
|
||||
content := message["content"].([]interface{})
|
||||
contentMap := content[0].(map[string]interface{})
|
||||
assert.Equal(t, "Nova test prompt", contentMap["text"])
|
||||
}
|
||||
|
||||
func TestAmazonCompletion_GetCompletion_Nova(t *testing.T) {
|
||||
completion := &AmazonCompletion{}
|
||||
modelConfig := BedrockModelConfig{
|
||||
MaxTokens: 250,
|
||||
Temperature: 0.4,
|
||||
TopP: 0.6,
|
||||
ModelName: "amazon.nova-pro-v1:0",
|
||||
}
|
||||
prompt := "Nova test prompt"
|
||||
|
||||
body, err := completion.GetCompletion(context.Background(), prompt, modelConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var request map[string]interface{}
|
||||
err = json.Unmarshal(body, &request)
|
||||
assert.NoError(t, err)
|
||||
|
||||
inferenceConfig := request["inferenceConfig"].(map[string]interface{})
|
||||
assert.Equal(t, 250, int(inferenceConfig["max_new_tokens"].(float64)))
|
||||
assert.Equal(t, 0.4, inferenceConfig["temperature"])
|
||||
assert.Equal(t, 0.6, inferenceConfig["topP"])
|
||||
|
||||
messages := request["messages"].([]interface{})
|
||||
message := messages[0].(map[string]interface{})
|
||||
content := message["content"].([]interface{})
|
||||
contentMap := content[0].(map[string]interface{})
|
||||
assert.Equal(t, "Nova test prompt", contentMap["text"])
|
||||
}
|
||||
|
||||
func TestAmazonCompletion_GetCompletion_Default(t *testing.T) {
|
||||
completion := &AmazonCompletion{}
|
||||
modelConfig := BedrockModelConfig{
|
||||
MaxTokens: 200,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.7,
|
||||
ModelName: "amazon.titan-text-express-v1",
|
||||
}
|
||||
prompt := "Default test prompt"
|
||||
|
||||
body, err := completion.GetCompletion(context.Background(), prompt, modelConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var request map[string]interface{}
|
||||
err = json.Unmarshal(body, &request)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "\n\nUser: Default test prompt", request["inputText"])
|
||||
textConfig := request["textGenerationConfig"].(map[string]interface{})
|
||||
assert.Equal(t, 200, int(textConfig["maxTokenCount"].(float64)))
|
||||
assert.Equal(t, 0.5, textConfig["temperature"])
|
||||
assert.Equal(t, 0.7, textConfig["topP"])
|
||||
}
|
||||
|
||||
func TestAmazonCompletion_GetCompletion_Inference_Profile(t *testing.T) {
|
||||
completion := &AmazonCompletion{}
|
||||
modelConfig := BedrockModelConfig{
|
||||
MaxTokens: 200,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.7,
|
||||
ModelName: "arn:aws:bedrock:us-east-1:*:inference-policy/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
}
|
||||
prompt := "Test prompt"
|
||||
|
||||
_, err := completion.GetCompletion(context.Background(), prompt, modelConfig)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestIsModelSupported(t *testing.T) {
|
||||
supported := []string{
|
||||
"anthropic.claude-v2",
|
||||
"anthropic.claude-v1",
|
||||
}
|
||||
assert.True(t, IsModelSupported("anthropic.claude-v2", supported))
|
||||
assert.False(t, IsModelSupported("unsupported-model", supported))
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package bedrock_support
|
||||
|
||||
type BedrockModelConfig struct {
|
||||
MaxTokens int
|
||||
Temperature float32
|
||||
TopP float32
|
||||
ModelName string
|
||||
}
|
||||
type BedrockModel struct {
|
||||
Name string
|
||||
Completion ICompletion
|
||||
Response IResponse
|
||||
Config BedrockModelConfig
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package bedrock_support
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBedrockModelConfig(t *testing.T) {
|
||||
config := BedrockModelConfig{
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.7,
|
||||
TopP: 0.9,
|
||||
ModelName: "test-model",
|
||||
}
|
||||
|
||||
assert.Equal(t, 100, config.MaxTokens)
|
||||
assert.Equal(t, float32(0.7), config.Temperature)
|
||||
assert.Equal(t, float32(0.9), config.TopP)
|
||||
assert.Equal(t, "test-model", config.ModelName)
|
||||
}
|
||||
|
||||
func TestBedrockModel(t *testing.T) {
|
||||
completion := &MockCompletion{}
|
||||
response := &MockResponse{}
|
||||
config := BedrockModelConfig{
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.7,
|
||||
TopP: 0.9,
|
||||
ModelName: "test-model",
|
||||
}
|
||||
|
||||
model := BedrockModel{
|
||||
Name: "Test Model",
|
||||
Completion: completion,
|
||||
Response: response,
|
||||
Config: config,
|
||||
}
|
||||
|
||||
assert.Equal(t, "Test Model", model.Name)
|
||||
assert.Equal(t, completion, model.Completion)
|
||||
assert.Equal(t, response, model.Response)
|
||||
assert.Equal(t, config, model.Config)
|
||||
}
|
||||
|
||||
// MockCompletion is a mock implementation of the ICompletion interface
|
||||
type MockCompletion struct{}
|
||||
|
||||
func (m *MockCompletion) GetCompletion(ctx context.Context, prompt string, config BedrockModelConfig) ([]byte, error) {
|
||||
return []byte(`{"prompt": "mock prompt"}`), nil
|
||||
}
|
||||
|
||||
// MockResponse is a mock implementation of the IResponse interface
|
||||
type MockResponse struct{}
|
||||
|
||||
func (m *MockResponse) ParseResponse(body []byte) (string, error) {
|
||||
return "mock response", nil
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
package bedrock_support
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type IResponse interface {
|
||||
ParseResponse(rawResponse []byte) (string, error)
|
||||
}
|
||||
|
||||
type CohereMessagesResponse struct {
|
||||
response IResponse
|
||||
}
|
||||
|
||||
func (a *CohereMessagesResponse) ParseResponse(rawResponse []byte) (string, error) {
|
||||
type InvokeModelResponseBody struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Role string `json:"role"`
|
||||
Model string `json:"model"`
|
||||
Content []struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text"`
|
||||
} `json:"content"`
|
||||
StopReason string `json:"stop_reason"`
|
||||
StopSequence interface{} `json:"stop_sequence"` // Could be null
|
||||
Usage struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
} `json:"usage"`
|
||||
}
|
||||
|
||||
output := &InvokeModelResponseBody{}
|
||||
err := json.Unmarshal(rawResponse, output)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Extract the text content from the Content array
|
||||
var resultText string
|
||||
for _, content := range output.Content {
|
||||
if content.Type == "text" {
|
||||
resultText += content.Text
|
||||
}
|
||||
}
|
||||
|
||||
return resultText, nil
|
||||
}
|
||||
|
||||
type CohereResponse struct {
|
||||
response IResponse
|
||||
}
|
||||
|
||||
func (a *CohereResponse) ParseResponse(rawResponse []byte) (string, error) {
|
||||
type InvokeModelResponseBody struct {
|
||||
Completion string `json:"completion"`
|
||||
Stop_reason string `json:"stop_reason"`
|
||||
}
|
||||
output := &InvokeModelResponseBody{}
|
||||
err := json.Unmarshal(rawResponse, output)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return output.Completion, nil
|
||||
}
|
||||
|
||||
type AI21Response struct {
|
||||
response IResponse
|
||||
}
|
||||
|
||||
func (a *AI21Response) ParseResponse(rawResponse []byte) (string, error) {
|
||||
type Data struct {
|
||||
Text string `json:"text"`
|
||||
}
|
||||
type Completion struct {
|
||||
Data Data `json:"data"`
|
||||
}
|
||||
type InvokeModelResponseBody struct {
|
||||
Completions []Completion `json:"completions"`
|
||||
}
|
||||
output := &InvokeModelResponseBody{}
|
||||
err := json.Unmarshal(rawResponse, output)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return output.Completions[0].Data.Text, nil
|
||||
}
|
||||
|
||||
type AmazonResponse struct {
|
||||
response IResponse
|
||||
}
|
||||
|
||||
type NovaResponse struct {
|
||||
response NResponse
|
||||
}
|
||||
type NResponse interface {
|
||||
ParseResponse(rawResponse []byte) (string, error)
|
||||
}
|
||||
|
||||
func (a *AmazonResponse) ParseResponse(rawResponse []byte) (string, error) {
|
||||
type Result struct {
|
||||
TokenCount int `json:"tokenCount"`
|
||||
OutputText string `json:"outputText"`
|
||||
CompletionReason string `json:"completionReason"`
|
||||
}
|
||||
type InvokeModelResponseBody struct {
|
||||
InputTextTokenCount int `json:"inputTextTokenCount"`
|
||||
Results []Result `json:"results"`
|
||||
}
|
||||
output := &InvokeModelResponseBody{}
|
||||
err := json.Unmarshal(rawResponse, output)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return output.Results[0].OutputText, nil
|
||||
}
|
||||
|
||||
func (a *NovaResponse) ParseResponse(rawResponse []byte) (string, error) {
|
||||
type Content struct {
|
||||
Text string `json:"text"`
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role"`
|
||||
Content []Content `json:"content"`
|
||||
}
|
||||
|
||||
type UsageDetails struct {
|
||||
InputTokens int `json:"inputTokens"`
|
||||
OutputTokens int `json:"outputTokens"`
|
||||
TotalTokens int `json:"totalTokens"`
|
||||
CacheReadInputTokenCount int `json:"cacheReadInputTokenCount"`
|
||||
CacheWriteInputTokenCount int `json:"cacheWriteInputTokenCount,omitempty"`
|
||||
}
|
||||
|
||||
type AmazonNovaResponse struct {
|
||||
Output struct {
|
||||
Message Message `json:"message"`
|
||||
} `json:"output"`
|
||||
StopReason string `json:"stopReason"`
|
||||
Usage UsageDetails `json:"usage"`
|
||||
}
|
||||
|
||||
response := &AmazonNovaResponse{}
|
||||
err := json.Unmarshal(rawResponse, response)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(response.Output.Message.Content) > 0 {
|
||||
return response.Output.Message.Content[0].Text, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
package bedrock_support
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCohereResponse_ParseResponse(t *testing.T) {
|
||||
response := &CohereResponse{}
|
||||
rawResponse := []byte(`{"completion": "Test completion", "stop_reason": "max_tokens"}`)
|
||||
|
||||
result, err := response.ParseResponse(rawResponse)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "Test completion", result)
|
||||
|
||||
invalidResponse := []byte(`{"completion": "Test completion", "invalid_json":]`)
|
||||
_, err = response.ParseResponse(invalidResponse)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestAI21Response_ParseResponse(t *testing.T) {
|
||||
response := &AI21Response{}
|
||||
rawResponse := []byte(`{"completions": [{"data": {"text": "AI21 test"}}], "id": "123"}`)
|
||||
|
||||
result, err := response.ParseResponse(rawResponse)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "AI21 test", result)
|
||||
|
||||
invalidResponse := []byte(`{"completions": [{"data": {"text": "AI21 test"}}, "invalid_json":]`)
|
||||
_, err = response.ParseResponse(invalidResponse)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestAmazonResponse_ParseResponse(t *testing.T) {
|
||||
response := &AmazonResponse{}
|
||||
rawResponse := []byte(`{"inputTextTokenCount": 10, "results": [{"tokenCount": 20, "outputText": "Amazon test", "completionReason": "stop"}]}`)
|
||||
|
||||
result, err := response.ParseResponse(rawResponse)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "Amazon test", result)
|
||||
|
||||
invalidResponse := []byte(`{"inputTextTokenCount": 10, "results": [{"tokenCount": 20, "outputText": "Amazon test", "invalid_json":]`)
|
||||
_, err = response.ParseResponse(invalidResponse)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestNovaResponse_ParseResponse(t *testing.T) {
|
||||
response := &NovaResponse{}
|
||||
rawResponse := []byte(`{"output": {"message": {"content": [{"text": "Nova test"}]}}, "stopReason": "stop", "usage": {"inputTokens": 10, "outputTokens": 20, "totalTokens": 30, "cacheReadInputTokenCount": 5}}`)
|
||||
|
||||
result, err := response.ParseResponse(rawResponse)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "Nova test", result)
|
||||
|
||||
rawResponseEmptyContent := []byte(`{"output": {"message": {"content": []}}, "stopReason": "stop", "usage": {"inputTokens": 10, "outputTokens": 20, "totalTokens": 30, "cacheReadInputTokenCount": 5}}`)
|
||||
|
||||
resultEmptyContent, errEmptyContent := response.ParseResponse(rawResponseEmptyContent)
|
||||
assert.NoError(t, errEmptyContent)
|
||||
assert.Equal(t, "", resultEmptyContent)
|
||||
|
||||
invalidResponse := []byte(`{"output": {"message": {"content": [{"text": "Nova test"}}, "invalid_json":]`)
|
||||
_, err = response.ParseResponse(invalidResponse)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
api "github.com/cohere-ai/cohere-go/v2"
|
||||
cohere "github.com/cohere-ai/cohere-go/v2/client"
|
||||
"github.com/cohere-ai/cohere-go/v2/option"
|
||||
)
|
||||
|
||||
const cohereAIClientName = "cohere"
|
||||
|
||||
type CohereClient struct {
|
||||
nopCloser
|
||||
|
||||
client *cohere.Client
|
||||
model string
|
||||
temperature float32
|
||||
maxTokens int
|
||||
}
|
||||
|
||||
func (c *CohereClient) Configure(config IAIConfig) error {
|
||||
token := config.GetPassword()
|
||||
|
||||
opts := []option.RequestOption{
|
||||
cohere.WithToken(token),
|
||||
}
|
||||
|
||||
baseURL := config.GetBaseURL()
|
||||
if baseURL != "" {
|
||||
opts = append(opts, cohere.WithBaseURL(baseURL))
|
||||
}
|
||||
|
||||
client := cohere.NewClient(opts...)
|
||||
if client == nil {
|
||||
return errors.New("error creating Cohere client")
|
||||
}
|
||||
|
||||
c.client = client
|
||||
c.model = config.GetModel()
|
||||
c.temperature = config.GetTemperature()
|
||||
c.maxTokens = config.GetMaxTokens()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CohereClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
|
||||
// Create a completion request
|
||||
response, err := c.client.Chat(ctx, &api.ChatRequest{
|
||||
Message: prompt,
|
||||
Model: &c.model,
|
||||
K: api.Int(0),
|
||||
Preamble: api.String(""),
|
||||
Temperature: api.Float64(float64(c.temperature)),
|
||||
RawPrompting: api.Bool(false),
|
||||
MaxTokens: api.Int(c.maxTokens),
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return response.Text, nil
|
||||
}
|
||||
|
||||
func (c *CohereClient) GetName() string {
|
||||
return cohereAIClientName
|
||||
}
|
||||
@@ -1,147 +0,0 @@
|
||||
package ai
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const CustomRestClientName = "customrest"
|
||||
|
||||
type CustomRestClient struct {
|
||||
nopCloser
|
||||
client *http.Client
|
||||
base *url.URL
|
||||
token string
|
||||
model string
|
||||
temperature float32
|
||||
topP float32
|
||||
topK int32
|
||||
}
|
||||
|
||||
type CustomRestRequest struct {
|
||||
Model string `json:"model"`
|
||||
|
||||
// Prompt is the textual prompt to send to the model.
|
||||
Prompt string `json:"prompt"`
|
||||
|
||||
// Options lists model-specific options. For example, temperature can be
|
||||
// set through this field, if the model supports it.
|
||||
Options map[string]interface{} `json:"options"`
|
||||
}
|
||||
|
||||
type CustomRestResponse struct {
|
||||
// Model is the model name that generated the response.
|
||||
Model string `json:"model"`
|
||||
|
||||
// CreatedAt is the timestamp of the response.
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
|
||||
// Response is the textual response itself.
|
||||
Response string `json:"response"`
|
||||
}
|
||||
|
||||
func (c *CustomRestClient) Configure(config IAIConfig) error {
|
||||
baseURL := config.GetBaseURL()
|
||||
if baseURL == "" {
|
||||
baseURL = defaultBaseURL
|
||||
}
|
||||
c.token = config.GetPassword()
|
||||
baseClientURL, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.base = baseClientURL
|
||||
|
||||
proxyEndpoint := config.GetProxyEndpoint()
|
||||
c.client = http.DefaultClient
|
||||
if proxyEndpoint != "" {
|
||||
proxyUrl, err := url.Parse(proxyEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
transport := &http.Transport{
|
||||
Proxy: http.ProxyURL(proxyUrl),
|
||||
}
|
||||
|
||||
c.client = &http.Client{
|
||||
Transport: transport,
|
||||
}
|
||||
}
|
||||
|
||||
c.model = config.GetModel()
|
||||
if c.model == "" {
|
||||
c.model = defaultModel
|
||||
}
|
||||
c.temperature = config.GetTemperature()
|
||||
c.topP = config.GetTopP()
|
||||
c.topK = config.GetTopK()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CustomRestClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
|
||||
var promptDetail struct {
|
||||
Language string `json:"language,omitempty"`
|
||||
Message string `json:"message"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
}
|
||||
prompt = strings.NewReplacer("\n", "\\n", "\t", "\\t").Replace(prompt)
|
||||
if err := json.Unmarshal([]byte(prompt), &promptDetail); err != nil {
|
||||
return "", err
|
||||
}
|
||||
generateRequest := &CustomRestRequest{
|
||||
Model: c.model,
|
||||
Prompt: promptDetail.Prompt,
|
||||
Options: map[string]interface{}{
|
||||
"temperature": c.temperature,
|
||||
"top_p": c.topP,
|
||||
"top_k": c.topK,
|
||||
"message": promptDetail.Message,
|
||||
"language": promptDetail.Language,
|
||||
},
|
||||
}
|
||||
requestBody, err := json.Marshal(generateRequest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
request, err := http.NewRequestWithContext(ctx, http.MethodPost, c.base.String(), bytes.NewBuffer(requestBody))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if c.token != "" {
|
||||
request.Header.Set("Authorization", "Bearer "+c.token)
|
||||
}
|
||||
request.Header.Set("Content-Type", "application/json")
|
||||
request.Header.Set("Accept", "application/x-ndjson")
|
||||
|
||||
response, err := c.client.Do(request)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
responseBody, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not read response body: %w", err)
|
||||
}
|
||||
|
||||
if response.StatusCode >= http.StatusBadRequest {
|
||||
return "", fmt.Errorf("Request Error, StatusCode: %d, ErrorMessage: %s", response.StatusCode, responseBody)
|
||||
}
|
||||
|
||||
var result CustomRestResponse
|
||||
if err := json.Unmarshal(responseBody, &result); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result.Response, nil
|
||||
}
|
||||
|
||||
func (c *CustomRestClient) GetName() string {
|
||||
return CustomRestClientName
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ai
|
||||
|
||||
import (
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// AIClientFactory is an interface for creating AI clients
|
||||
type AIClientFactory interface {
|
||||
NewClient(provider string) IAI
|
||||
}
|
||||
|
||||
// DefaultAIClientFactory is the default implementation of AIClientFactory
|
||||
type DefaultAIClientFactory struct{}
|
||||
|
||||
// NewClient creates a new AI client using the default implementation
|
||||
func (f *DefaultAIClientFactory) NewClient(provider string) IAI {
|
||||
return NewClient(provider)
|
||||
}
|
||||
|
||||
// ConfigProvider is an interface for accessing configuration
|
||||
type ConfigProvider interface {
|
||||
UnmarshalKey(key string, rawVal interface{}) error
|
||||
}
|
||||
|
||||
// ViperConfigProvider is the default implementation of ConfigProvider using Viper
|
||||
type ViperConfigProvider struct{}
|
||||
|
||||
// UnmarshalKey unmarshals a key from the configuration using Viper
|
||||
func (p *ViperConfigProvider) UnmarshalKey(key string, rawVal interface{}) error {
|
||||
return viper.UnmarshalKey(key, rawVal)
|
||||
}
|
||||
|
||||
// Default instances to be used
|
||||
var (
|
||||
DefaultClientFactory = &DefaultAIClientFactory{}
|
||||
DefaultConfigProvider = &ViperConfigProvider{}
|
||||
)
|
||||
|
||||
// For testing - these variables can be overridden in tests
|
||||
var (
|
||||
testAIClientFactory AIClientFactory = nil
|
||||
testConfigProvider ConfigProvider = nil
|
||||
)
|
||||
|
||||
// GetAIClientFactory returns the test factory if set, otherwise the default
|
||||
func GetAIClientFactory() AIClientFactory {
|
||||
if testAIClientFactory != nil {
|
||||
return testAIClientFactory
|
||||
}
|
||||
return DefaultClientFactory
|
||||
}
|
||||
|
||||
// GetConfigProvider returns the test provider if set, otherwise the default
|
||||
func GetConfigProvider() ConfigProvider {
|
||||
if testConfigProvider != nil {
|
||||
return testConfigProvider
|
||||
}
|
||||
return DefaultConfigProvider
|
||||
}
|
||||
|
||||
// For testing - set the test implementations
|
||||
func SetTestAIClientFactory(factory AIClientFactory) {
|
||||
testAIClientFactory = factory
|
||||
}
|
||||
|
||||
func SetTestConfigProvider(provider ConfigProvider) {
|
||||
testConfigProvider = provider
|
||||
}
|
||||
|
||||
// Reset test implementations
|
||||
func ResetTestImplementations() {
|
||||
testAIClientFactory = nil
|
||||
testConfigProvider = nil
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/google/generative-ai-go/genai"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
const googleAIClientName = "google"
|
||||
|
||||
type GoogleGenAIClient struct {
|
||||
client *genai.Client
|
||||
|
||||
model string
|
||||
temperature float32
|
||||
topP float32
|
||||
topK int32
|
||||
maxTokens int
|
||||
}
|
||||
|
||||
func (c *GoogleGenAIClient) Configure(config IAIConfig) error {
|
||||
ctx := context.Background()
|
||||
|
||||
// Access your API key as an environment variable (see "Set up your API key" above)
|
||||
token := config.GetPassword()
|
||||
authOption := option.WithAPIKey(token)
|
||||
if token[0] == '{' {
|
||||
authOption = option.WithCredentialsJSON([]byte(token))
|
||||
}
|
||||
|
||||
client, err := genai.NewClient(ctx, authOption)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating genai Google SDK client: %w", err)
|
||||
}
|
||||
|
||||
c.client = client
|
||||
c.model = config.GetModel()
|
||||
c.temperature = config.GetTemperature()
|
||||
c.topP = config.GetTopP()
|
||||
c.topK = config.GetTopK()
|
||||
c.maxTokens = config.GetMaxTokens()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *GoogleGenAIClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
|
||||
// Available models are at https://ai.google.dev/models e.g.gemini-pro.
|
||||
model := c.client.GenerativeModel(c.model)
|
||||
model.SetTemperature(c.temperature)
|
||||
model.SetTopP(c.topP)
|
||||
model.SetTopK(c.topK)
|
||||
model.SetMaxOutputTokens(int32(c.maxTokens))
|
||||
|
||||
// Google AI SDK is capable of different inputs than just text, for now set explicit text prompt type.
|
||||
// Similarly, we could stream the response. For now k8sgpt does not support streaming.
|
||||
resp, err := model.GenerateContent(ctx, genai.Text(prompt))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(resp.Candidates) == 0 {
|
||||
if resp.PromptFeedback.BlockReason == genai.BlockReasonSafety {
|
||||
for _, r := range resp.PromptFeedback.SafetyRatings {
|
||||
if !r.Blocked {
|
||||
continue
|
||||
}
|
||||
return "", fmt.Errorf("completion blocked due to %v with probability %v", r.Category.String(), r.Probability.String())
|
||||
}
|
||||
}
|
||||
return "", errors.New("no completion returned; unknown reason")
|
||||
}
|
||||
|
||||
// Format output.
|
||||
// TODO(bwplotka): Provider richer output in certain cases e.g. suddenly finished
|
||||
// completion based on finish reasons or safety rankings.
|
||||
got := resp.Candidates[0]
|
||||
var output string
|
||||
for _, part := range got.Content.Parts {
|
||||
switch o := part.(type) {
|
||||
case genai.Text:
|
||||
output += string(o)
|
||||
output += "\n"
|
||||
default:
|
||||
color.Yellow("found unsupported AI response part of type %T; ignoring", part)
|
||||
}
|
||||
}
|
||||
|
||||
if got.CitationMetadata != nil && len(got.CitationMetadata.CitationSources) > 0 {
|
||||
output += "Citations:\n"
|
||||
for _, source := range got.CitationMetadata.CitationSources {
|
||||
// TODO(bwplotka): Give details around what exactly words could be attributed to the citation.
|
||||
output += fmt.Sprintf("* %s, %s\n", *source.URI, source.License)
|
||||
}
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (c *GoogleGenAIClient) GetName() string {
|
||||
return googleAIClientName
|
||||
}
|
||||
|
||||
func (c *GoogleGenAIClient) Close() {
|
||||
if err := c.client.Close(); err != nil {
|
||||
color.Red("googleai client close error: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -1,193 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"cloud.google.com/go/vertexai/genai"
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
const googleVertexAIClientName = "googlevertexai"
|
||||
|
||||
type GoogleVertexAIClient struct {
|
||||
client *genai.Client
|
||||
|
||||
model string
|
||||
temperature float32
|
||||
topP float32
|
||||
topK int32
|
||||
maxTokens int
|
||||
}
|
||||
|
||||
// Vertex AI Gemini supported Regions
|
||||
// https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/gemini
|
||||
const VERTEXAI_DEFAULT_REGION = "us-central1" // default use us-east-1 region
|
||||
|
||||
const (
|
||||
US_Central_1 = "us-central1"
|
||||
US_West_4 = "us-west4"
|
||||
North_America_Northeast1 = "northamerica-northeast1"
|
||||
US_East_4 = "us-east4"
|
||||
US_West_1 = "us-west1"
|
||||
Asia_Northeast_3 = "asia-northeast3"
|
||||
Asia_Southeast_1 = "asia-southeast1"
|
||||
Asia_Northeast_1 = "asia-northeast1"
|
||||
)
|
||||
|
||||
var VERTEXAI_SUPPORTED_REGION = []string{
|
||||
US_Central_1,
|
||||
US_West_4,
|
||||
North_America_Northeast1,
|
||||
US_East_4,
|
||||
US_West_1,
|
||||
Asia_Northeast_3,
|
||||
Asia_Southeast_1,
|
||||
Asia_Northeast_1,
|
||||
}
|
||||
|
||||
const (
|
||||
ModelGeminiProV1 = "gemini-1.0-pro-001" // Retired Model
|
||||
ModelGeminiProV2_5 = "gemini-2.5-pro" // Latest Stable Model
|
||||
ModelGeminiFlashV2_5 = "gemini-2.5-flash" // Latest Stable Model
|
||||
ModelGeminiFlashV2 = "gemini-2.0-flash" // Latest Stable Model
|
||||
ModelGeminiFlashLiteV2 = "gemini-2.0-flash-lite" // Latest Stable Model
|
||||
ModelGeminiProV1_5 = "gemini-1.5-pro-002*" // Legacy Stable Model
|
||||
ModelGeminiFlashV1_5 = "gemini-1.5-flash-002*" // Legacy Stable Model
|
||||
)
|
||||
|
||||
var VERTEXAI_MODELS = []string{
|
||||
ModelGeminiProV2_5,
|
||||
ModelGeminiFlashV2_5,
|
||||
ModelGeminiFlashV2,
|
||||
ModelGeminiFlashLiteV2,
|
||||
ModelGeminiProV1_5,
|
||||
ModelGeminiFlashV1_5,
|
||||
ModelGeminiProV1,
|
||||
}
|
||||
|
||||
// GetModelOrDefault check config model
|
||||
func GetVertexAIModelOrDefault(model string) string {
|
||||
|
||||
// Check if the provided model is in the list
|
||||
for _, m := range VERTEXAI_MODELS {
|
||||
if m == model {
|
||||
return model // Return the provided model
|
||||
}
|
||||
}
|
||||
|
||||
// Return the default model if the provided model is not in the list
|
||||
return VERTEXAI_MODELS[0]
|
||||
}
|
||||
|
||||
// GetModelOrDefault check config region
|
||||
func GetVertexAIRegionOrDefault(region string) string {
|
||||
|
||||
// Check if the provided model is in the list
|
||||
for _, m := range VERTEXAI_SUPPORTED_REGION {
|
||||
if m == region {
|
||||
return region // Return the provided model
|
||||
}
|
||||
}
|
||||
|
||||
// Return the default model if the provided model is not in the list
|
||||
return VERTEXAI_DEFAULT_REGION
|
||||
}
|
||||
|
||||
func (g *GoogleVertexAIClient) Configure(config IAIConfig) error {
|
||||
ctx := context.Background()
|
||||
|
||||
// Currently you can access VertexAI either by being authenticated via OAuth or Bearer token so we need to consider both
|
||||
projectId := config.GetProviderId()
|
||||
region := GetVertexAIRegionOrDefault(config.GetProviderRegion())
|
||||
|
||||
client, err := genai.NewClient(ctx, projectId, region)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating genai Google SDK client: %w", err)
|
||||
}
|
||||
|
||||
g.client = client
|
||||
g.model = GetVertexAIModelOrDefault(config.GetModel())
|
||||
g.temperature = config.GetTemperature()
|
||||
g.topP = config.GetTopP()
|
||||
g.topK = config.GetTopK()
|
||||
g.maxTokens = config.GetMaxTokens()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *GoogleVertexAIClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
|
||||
|
||||
model := g.client.GenerativeModel(g.model)
|
||||
model.SetTemperature(g.temperature)
|
||||
model.SetTopP(g.topP)
|
||||
model.SetTopK(g.topK)
|
||||
model.SetMaxOutputTokens(int32(g.maxTokens))
|
||||
|
||||
// Google AI SDK is capable of different inputs than just text, for now set explicit text prompt type.
|
||||
// Similarly, we could stream the response. For now k8sgpt does not support streaming.
|
||||
resp, err := model.GenerateContent(ctx, genai.Text(prompt))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(resp.Candidates) == 0 {
|
||||
if resp.PromptFeedback.BlockReason > 0 {
|
||||
for _, r := range resp.PromptFeedback.SafetyRatings {
|
||||
if !r.Blocked {
|
||||
continue
|
||||
}
|
||||
return "", fmt.Errorf("completion blocked due to %v with probability %v", r.Category.String(), r.Probability.String())
|
||||
}
|
||||
}
|
||||
return "", errors.New("no completion returned; unknown reason")
|
||||
}
|
||||
|
||||
// Format output.
|
||||
// TODO(bwplotka): Provider richer output in certain cases e.g. suddenly finished
|
||||
// completion based on finish reasons or safety rankings.
|
||||
got := resp.Candidates[0]
|
||||
var output string
|
||||
for _, part := range got.Content.Parts {
|
||||
switch o := part.(type) {
|
||||
case genai.Text:
|
||||
output += string(o)
|
||||
output += "\n"
|
||||
default:
|
||||
color.Yellow("found unsupported AI response part of type %T; ignoring", part)
|
||||
}
|
||||
}
|
||||
|
||||
if got.CitationMetadata != nil && len(got.CitationMetadata.Citations) > 0 {
|
||||
output += "Citations:\n"
|
||||
for _, source := range got.CitationMetadata.Citations {
|
||||
// TODO(bwplotka): Give details around what exactly words could be attributed to the citation.
|
||||
output += fmt.Sprintf("* %s, %s\n", source.URI, source.License)
|
||||
}
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (g *GoogleVertexAIClient) GetName() string {
|
||||
return googleVertexAIClientName
|
||||
}
|
||||
|
||||
func (g *GoogleVertexAIClient) Close() {
|
||||
if err := g.client.Close(); err != nil {
|
||||
color.Red("googleai client close error: %v", err)
|
||||
}
|
||||
}
|
||||
102
pkg/ai/groq.go
@@ -1,102 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
const groqAIClientName = "groq"
|
||||
|
||||
// Default Groq API endpoint (OpenAI-compatible)
|
||||
const groqAPIBaseURL = "https://api.groq.com/openai/v1"
|
||||
|
||||
type GroqClient struct {
|
||||
nopCloser
|
||||
|
||||
client *openai.Client
|
||||
model string
|
||||
temperature float32
|
||||
topP float32
|
||||
}
|
||||
|
||||
func (c *GroqClient) Configure(config IAIConfig) error {
|
||||
token := config.GetPassword()
|
||||
defaultConfig := openai.DefaultConfig(token)
|
||||
proxyEndpoint := config.GetProxyEndpoint()
|
||||
|
||||
baseURL := config.GetBaseURL()
|
||||
if baseURL != "" {
|
||||
defaultConfig.BaseURL = baseURL
|
||||
} else {
|
||||
defaultConfig.BaseURL = groqAPIBaseURL
|
||||
}
|
||||
|
||||
transport := &http.Transport{}
|
||||
if proxyEndpoint != "" {
|
||||
proxyUrl, err := url.Parse(proxyEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
transport.Proxy = http.ProxyURL(proxyUrl)
|
||||
}
|
||||
|
||||
customHeaders := config.GetCustomHeaders()
|
||||
defaultConfig.HTTPClient = &http.Client{
|
||||
Transport: &OpenAIHeaderTransport{
|
||||
Origin: transport,
|
||||
Headers: customHeaders,
|
||||
},
|
||||
}
|
||||
|
||||
client := openai.NewClientWithConfig(defaultConfig)
|
||||
if client == nil {
|
||||
return errors.New("error creating Groq client")
|
||||
}
|
||||
c.client = client
|
||||
c.model = config.GetModel()
|
||||
c.temperature = config.GetTemperature()
|
||||
c.topP = config.GetTopP()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *GroqClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
|
||||
resp, err := c.client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
|
||||
Model: c.model,
|
||||
Messages: []openai.ChatCompletionMessage{
|
||||
{
|
||||
Role: "user",
|
||||
Content: prompt,
|
||||
},
|
||||
},
|
||||
Temperature: c.temperature,
|
||||
MaxTokens: maxToken,
|
||||
PresencePenalty: presencePenalty,
|
||||
FrequencyPenalty: frequencyPenalty,
|
||||
TopP: c.topP,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resp.Choices[0].Message.Content, nil
|
||||
}
|
||||
|
||||
func (c *GroqClient) GetName() string {
|
||||
return groqAIClientName
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hupe1980/go-huggingface"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
const huggingfaceAIClientName = "huggingface"
|
||||
|
||||
type HuggingfaceClient struct {
|
||||
nopCloser
|
||||
|
||||
client *huggingface.InferenceClient
|
||||
model string
|
||||
topP float32
|
||||
topK int32
|
||||
temperature float32
|
||||
maxTokens int
|
||||
}
|
||||
|
||||
func (c *HuggingfaceClient) Configure(config IAIConfig) error {
|
||||
token := config.GetPassword()
|
||||
|
||||
client := huggingface.NewInferenceClient(token)
|
||||
|
||||
c.client = client
|
||||
c.model = config.GetModel()
|
||||
c.topP = config.GetTopP()
|
||||
c.topK = config.GetTopK()
|
||||
c.temperature = config.GetTemperature()
|
||||
if config.GetMaxTokens() > 500 {
|
||||
c.maxTokens = 500
|
||||
} else {
|
||||
c.maxTokens = config.GetMaxTokens()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *HuggingfaceClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
|
||||
resp, err := c.client.Conversational(ctx, &huggingface.ConversationalRequest{
|
||||
Inputs: huggingface.ConverstationalInputs{
|
||||
Text: prompt,
|
||||
},
|
||||
Model: c.model,
|
||||
Parameters: huggingface.ConversationalParameters{
|
||||
TopP: ptr.To[float64](float64(c.topP)),
|
||||
TopK: ptr.To[int](int(c.topK)),
|
||||
Temperature: ptr.To[float64](float64(c.temperature)),
|
||||
MaxLength: &c.maxTokens,
|
||||
},
|
||||
Options: huggingface.Options{
|
||||
WaitForModel: ptr.To[bool](true),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resp.GeneratedText, nil
|
||||
}
|
||||
|
||||
func (c *HuggingfaceClient) GetName() string { return huggingfaceAIClientName }
|
||||
193
pkg/ai/iai.go
@@ -1,197 +1,8 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
import "context"
|
||||
|
||||
var (
|
||||
clients = []IAI{
|
||||
&OpenAIClient{},
|
||||
&AzureAIClient{},
|
||||
&LocalAIClient{},
|
||||
&OllamaClient{},
|
||||
&NoOpAIClient{},
|
||||
&CohereClient{},
|
||||
&AmazonBedRockClient{},
|
||||
&SageMakerAIClient{},
|
||||
&GoogleGenAIClient{},
|
||||
&HuggingfaceClient{},
|
||||
&GoogleVertexAIClient{},
|
||||
&OCIGenAIClient{},
|
||||
&CustomRestClient{},
|
||||
&IBMWatsonxAIClient{},
|
||||
&GroqClient{},
|
||||
}
|
||||
Backends = []string{
|
||||
openAIClientName,
|
||||
localAIClientName,
|
||||
ollamaClientName,
|
||||
azureAIClientName,
|
||||
cohereAIClientName,
|
||||
amazonbedrockAIClientName,
|
||||
amazonsagemakerAIClientName,
|
||||
googleAIClientName,
|
||||
noopAIClientName,
|
||||
huggingfaceAIClientName,
|
||||
googleVertexAIClientName,
|
||||
ociClientName,
|
||||
CustomRestClientName,
|
||||
ibmWatsonxAIClientName,
|
||||
groqAIClientName,
|
||||
}
|
||||
)
|
||||
|
||||
// IAI is an interface all clients (representing backends) share.
|
||||
type IAI interface {
|
||||
// Configure sets up client for given configuration. This is expected to be
|
||||
// executed once per client life-time (e.g. analysis CLI command invocation).
|
||||
Configure(config IAIConfig) error
|
||||
// GetCompletion generates text based on prompt.
|
||||
Configure(token string, language string) error
|
||||
GetCompletion(ctx context.Context, prompt string) (string, error)
|
||||
// GetName returns name of the backend/client.
|
||||
GetName() string
|
||||
// Close cleans all the resources. No other methods should be used on the
|
||||
// objects after this method is invoked.
|
||||
Close()
|
||||
}
|
||||
|
||||
type nopCloser struct{}
|
||||
|
||||
func (nopCloser) Close() {}
|
||||
|
||||
type IAIConfig interface {
|
||||
GetPassword() string
|
||||
GetModel() string
|
||||
GetBaseURL() string
|
||||
GetProxyEndpoint() string
|
||||
GetEndpointName() string
|
||||
GetEngine() string
|
||||
GetTemperature() float32
|
||||
GetProviderRegion() string
|
||||
GetTopP() float32
|
||||
GetTopK() int32
|
||||
GetMaxTokens() int
|
||||
GetProviderId() string
|
||||
GetCompartmentId() string
|
||||
GetOrganizationId() string
|
||||
GetCustomHeaders() []http.Header
|
||||
}
|
||||
|
||||
func NewClient(provider string) IAI {
|
||||
for _, c := range clients {
|
||||
if provider == c.GetName() {
|
||||
return c
|
||||
}
|
||||
}
|
||||
// default client
|
||||
return &OpenAIClient{}
|
||||
}
|
||||
|
||||
type AIConfiguration struct {
|
||||
Providers []AIProvider `mapstructure:"providers"`
|
||||
DefaultProvider string `mapstructure:"defaultprovider"`
|
||||
}
|
||||
|
||||
type AIProvider struct {
|
||||
Name string `mapstructure:"name"`
|
||||
Model string `mapstructure:"model"`
|
||||
Password string `mapstructure:"password" yaml:"password,omitempty"`
|
||||
BaseURL string `mapstructure:"baseurl" yaml:"baseurl,omitempty"`
|
||||
ProxyEndpoint string `mapstructure:"proxyEndpoint" yaml:"proxyEndpoint,omitempty"`
|
||||
ProxyPort string `mapstructure:"proxyPort" yaml:"proxyPort,omitempty"`
|
||||
EndpointName string `mapstructure:"endpointname" yaml:"endpointname,omitempty"`
|
||||
Engine string `mapstructure:"engine" yaml:"engine,omitempty"`
|
||||
Temperature float32 `mapstructure:"temperature" yaml:"temperature,omitempty"`
|
||||
ProviderRegion string `mapstructure:"providerregion" yaml:"providerregion,omitempty"`
|
||||
ProviderId string `mapstructure:"providerid" yaml:"providerid,omitempty"`
|
||||
CompartmentId string `mapstructure:"compartmentid" yaml:"compartmentid,omitempty"`
|
||||
TopP float32 `mapstructure:"topp" yaml:"topp,omitempty"`
|
||||
TopK int32 `mapstructure:"topk" yaml:"topk,omitempty"`
|
||||
MaxTokens int `mapstructure:"maxtokens" yaml:"maxtokens,omitempty"`
|
||||
OrganizationId string `mapstructure:"organizationid" yaml:"organizationid,omitempty"`
|
||||
CustomHeaders []http.Header `mapstructure:"customHeaders"`
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetBaseURL() string {
|
||||
return p.BaseURL
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetProxyEndpoint() string {
|
||||
return p.ProxyEndpoint
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetEndpointName() string {
|
||||
return p.EndpointName
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetTopP() float32 {
|
||||
return p.TopP
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetTopK() int32 {
|
||||
return p.TopK
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetMaxTokens() int {
|
||||
return p.MaxTokens
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetPassword() string {
|
||||
return p.Password
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetModel() string {
|
||||
return p.Model
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetEngine() string {
|
||||
return p.Engine
|
||||
}
|
||||
func (p *AIProvider) GetTemperature() float32 {
|
||||
return p.Temperature
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetProviderRegion() string {
|
||||
return p.ProviderRegion
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetProviderId() string {
|
||||
return p.ProviderId
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetCompartmentId() string {
|
||||
return p.CompartmentId
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetOrganizationId() string {
|
||||
return p.OrganizationId
|
||||
}
|
||||
|
||||
func (p *AIProvider) GetCustomHeaders() []http.Header {
|
||||
return p.CustomHeaders
|
||||
}
|
||||
|
||||
var passwordlessProviders = []string{"localai", "ollama", "amazonsagemaker", "amazonbedrock", "googlevertexai", "oci", "customrest"}
|
||||
|
||||
func NeedPassword(backend string) bool {
|
||||
for _, b := range passwordlessProviders {
|
||||
if b == backend {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
package interactive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/analysis"
|
||||
"github.com/pterm/pterm"
|
||||
)
|
||||
|
||||
type INTERACTIVE_STATE int
|
||||
|
||||
const (
|
||||
prompt = "Given the following context: "
|
||||
)
|
||||
|
||||
const (
|
||||
E_RUNNING INTERACTIVE_STATE = iota
|
||||
E_EXITED = iota
|
||||
)
|
||||
|
||||
type InteractionRunner struct {
|
||||
config *analysis.Analysis
|
||||
State chan INTERACTIVE_STATE
|
||||
contextWindow []byte
|
||||
}
|
||||
|
||||
func NewInteractionRunner(config *analysis.Analysis, contextWindow []byte) *InteractionRunner {
|
||||
return &InteractionRunner{
|
||||
config: config,
|
||||
contextWindow: contextWindow,
|
||||
State: make(chan INTERACTIVE_STATE),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *InteractionRunner) StartInteraction() {
|
||||
a.State <- E_RUNNING
|
||||
pterm.Println("Interactive mode enabled [type exit to close.]")
|
||||
for {
|
||||
|
||||
query := pterm.DefaultInteractiveTextInput.WithMultiLine(false)
|
||||
queryString, err := query.Show()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
if queryString == "" {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(queryString, "exit") {
|
||||
a.State <- E_EXITED
|
||||
continue
|
||||
}
|
||||
pterm.Println()
|
||||
contextWindow := fmt.Sprintf("%s %s %s", prompt, string(a.contextWindow),
|
||||
queryString)
|
||||
|
||||
response, err := a.config.AIClient.GetCompletion(a.config.Context,
|
||||
contextWindow)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
a.State <- E_EXITED
|
||||
continue
|
||||
}
|
||||
pterm.Println(response)
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package ai
|
||||
|
||||
const localAIClientName = "localai"
|
||||
|
||||
type LocalAIClient struct {
|
||||
OpenAIClient
|
||||
}
|
||||
|
||||
func (a *LocalAIClient) GetName() string {
|
||||
return localAIClientName
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
const noopAIClientName = "noopai"
|
||||
|
||||
type NoOpAIClient struct {
|
||||
nopCloser
|
||||
}
|
||||
|
||||
func (c *NoOpAIClient) Configure(_ IAIConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *NoOpAIClient) GetCompletion(_ context.Context, prompt string) (string, error) {
|
||||
response := "I am a noop response to the prompt " + prompt
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (c *NoOpAIClient) GetName() string {
|
||||
return noopAIClientName
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/generativeai"
|
||||
"github.com/oracle/oci-go-sdk/v65/generativeaiinference"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const ociClientName = "oci"
|
||||
|
||||
type ociModelVendor string
|
||||
|
||||
const (
|
||||
vendorCohere = "cohere"
|
||||
vendorMeta = "meta"
|
||||
)
|
||||
|
||||
type OCIGenAIClient struct {
|
||||
nopCloser
|
||||
|
||||
client *generativeaiinference.GenerativeAiInferenceClient
|
||||
model *generativeai.Model
|
||||
modelID string
|
||||
compartmentId string
|
||||
temperature float32
|
||||
topP float32
|
||||
topK int32
|
||||
maxTokens int
|
||||
}
|
||||
|
||||
func (c *OCIGenAIClient) GetName() string {
|
||||
return ociClientName
|
||||
}
|
||||
|
||||
func (c *OCIGenAIClient) Configure(config IAIConfig) error {
|
||||
config.GetEndpointName()
|
||||
c.modelID = config.GetModel()
|
||||
c.temperature = config.GetTemperature()
|
||||
c.topP = config.GetTopP()
|
||||
c.topK = config.GetTopK()
|
||||
c.maxTokens = config.GetMaxTokens()
|
||||
c.compartmentId = config.GetCompartmentId()
|
||||
provider := common.DefaultConfigProvider()
|
||||
client, err := generativeaiinference.NewGenerativeAiInferenceClientWithConfigurationProvider(provider)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.client = &client
|
||||
model, err := c.getModel(provider)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.model = model
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *OCIGenAIClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
|
||||
request := c.newChatRequest(prompt)
|
||||
response, err := c.client.Chat(ctx, request)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return extractGeneratedText(response.ChatResponse)
|
||||
}
|
||||
|
||||
func (c *OCIGenAIClient) newChatRequest(prompt string) generativeaiinference.ChatRequest {
|
||||
return generativeaiinference.ChatRequest{
|
||||
ChatDetails: generativeaiinference.ChatDetails{
|
||||
CompartmentId: &c.compartmentId,
|
||||
ServingMode: c.getServingMode(),
|
||||
ChatRequest: c.getChatModelRequest(prompt),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *OCIGenAIClient) getChatModelRequest(prompt string) generativeaiinference.BaseChatRequest {
|
||||
temperatureF64 := float64(c.temperature)
|
||||
topPF64 := float64(c.topP)
|
||||
topK := int(c.topK)
|
||||
|
||||
switch c.getVendor() {
|
||||
case vendorMeta:
|
||||
messages := []generativeaiinference.Message{
|
||||
generativeaiinference.UserMessage{
|
||||
Content: []generativeaiinference.ChatContent{
|
||||
generativeaiinference.TextContent{
|
||||
Text: &prompt,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// 0 is invalid for Meta vendor type, instead use -1 to disable topK sampling.
|
||||
if topK == 0 {
|
||||
topK = -1
|
||||
}
|
||||
return generativeaiinference.GenericChatRequest{
|
||||
Messages: messages,
|
||||
TopK: &topK,
|
||||
TopP: &topPF64,
|
||||
Temperature: &temperatureF64,
|
||||
MaxTokens: &c.maxTokens,
|
||||
}
|
||||
default: // Default to cohere
|
||||
return generativeaiinference.CohereChatRequest{
|
||||
Message: &prompt,
|
||||
MaxTokens: &c.maxTokens,
|
||||
Temperature: &temperatureF64,
|
||||
TopK: &topK,
|
||||
TopP: &topPF64,
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func extractGeneratedText(llmInferenceResponse generativeaiinference.BaseChatResponse) (string, error) {
|
||||
switch response := llmInferenceResponse.(type) {
|
||||
case generativeaiinference.GenericChatResponse:
|
||||
if len(response.Choices) > 0 && len(response.Choices[0].Message.GetContent()) > 0 {
|
||||
if content, ok := response.Choices[0].Message.GetContent()[0].(generativeaiinference.TextContent); ok {
|
||||
return *content.Text, nil
|
||||
}
|
||||
}
|
||||
return "", errors.New("no text found in oci response")
|
||||
case generativeaiinference.CohereChatResponse:
|
||||
return *response.Text, nil
|
||||
default:
|
||||
return "", fmt.Errorf("unknown oci response type: %s", reflect.TypeOf(llmInferenceResponse).Name())
|
||||
}
|
||||
}
|
||||
|
||||
func (c *OCIGenAIClient) getServingMode() generativeaiinference.ServingMode {
|
||||
if c.isBaseModel() {
|
||||
return generativeaiinference.OnDemandServingMode{
|
||||
ModelId: &c.modelID,
|
||||
}
|
||||
}
|
||||
return generativeaiinference.DedicatedServingMode{
|
||||
EndpointId: &c.modelID,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *OCIGenAIClient) getModel(provider common.ConfigurationProvider) (*generativeai.Model, error) {
|
||||
client, err := generativeai.NewGenerativeAiClientWithConfigurationProvider(provider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response, err := client.GetModel(context.Background(), generativeai.GetModelRequest{
|
||||
ModelId: &c.modelID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &response.Model, nil
|
||||
}
|
||||
|
||||
func (c *OCIGenAIClient) isBaseModel() bool {
|
||||
return c.model != nil && c.model.Type == generativeai.ModelTypeBase
|
||||
}
|
||||
|
||||
func (c *OCIGenAIClient) getVendor() ociModelVendor {
|
||||
if c.model == nil || c.model.Vendor == nil {
|
||||
return ""
|
||||
}
|
||||
return ociModelVendor(*c.model.Vendor)
|
||||
}
|
||||
102
pkg/ai/ollama.go
@@ -1,102 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
ollama "github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
const ollamaClientName = "ollama"
|
||||
|
||||
type OllamaClient struct {
|
||||
nopCloser
|
||||
|
||||
client *ollama.Client
|
||||
model string
|
||||
temperature float32
|
||||
topP float32
|
||||
}
|
||||
|
||||
const (
|
||||
defaultBaseURL = "http://localhost:11434"
|
||||
defaultModel = "llama3"
|
||||
)
|
||||
|
||||
func (c *OllamaClient) Configure(config IAIConfig) error {
|
||||
baseURL := config.GetBaseURL()
|
||||
if baseURL == "" {
|
||||
baseURL = defaultBaseURL
|
||||
}
|
||||
baseClientURL, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
proxyEndpoint := config.GetProxyEndpoint()
|
||||
httpClient := http.DefaultClient
|
||||
if proxyEndpoint != "" {
|
||||
proxyUrl, err := url.Parse(proxyEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
transport := &http.Transport{
|
||||
Proxy: http.ProxyURL(proxyUrl),
|
||||
}
|
||||
|
||||
httpClient = &http.Client{
|
||||
Transport: transport,
|
||||
}
|
||||
}
|
||||
|
||||
c.client = ollama.NewClient(baseClientURL, httpClient)
|
||||
if c.client == nil {
|
||||
return errors.New("error creating Ollama client")
|
||||
}
|
||||
c.model = config.GetModel()
|
||||
if c.model == "" {
|
||||
c.model = defaultModel
|
||||
}
|
||||
c.temperature = config.GetTemperature()
|
||||
c.topP = config.GetTopP()
|
||||
return nil
|
||||
}
|
||||
func (c *OllamaClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
|
||||
req := &ollama.GenerateRequest{
|
||||
Model: c.model,
|
||||
Prompt: prompt,
|
||||
Stream: new(bool),
|
||||
Options: map[string]interface{}{
|
||||
"temperature": c.temperature,
|
||||
"top_p": c.topP,
|
||||
},
|
||||
}
|
||||
completion := ""
|
||||
respFunc := func(resp ollama.GenerateResponse) error {
|
||||
completion = resp.Response
|
||||
return nil
|
||||
}
|
||||
err := c.client.Generate(ctx, req, respFunc)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return completion, nil
|
||||
}
|
||||
func (a *OllamaClient) GetName() string {
|
||||
return ollamaClientName
|
||||
}
|
||||