feat: enable REST/http api support

Signed-off-by: Sahil Badla <sahil_badla@intuit.com>
This commit is contained in:
Sahil Badla
2024-01-16 10:01:12 -08:00
parent d661dfe585
commit 5ea1b35cd5
29 changed files with 2420 additions and 543 deletions

View File

@@ -96,7 +96,7 @@ jobs:
outputs: type=docker,dest=/tmp/${{ env.IMAGE_NAME }}-image.tar
- name: Upload image as artifact
uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4
uses: actions/upload-artifact@1eb3cb2b3e0f29609092a73eb033bb759a334595 # v4
with:
name: ${{ env.IMAGE_NAME }}-image.tar
path: /tmp/${{ env.IMAGE_NAME }}-image.tar

View File

@@ -49,7 +49,11 @@ jobs:
with:
go-version: '1.21'
- name: Download Syft
<<<<<<< HEAD
uses: anchore/sbom-action/download-syft@719133684c7d294116626d1344fe64f0d2ff3e9e # v0.15.2
=======
uses: anchore/sbom-action/download-syft@c7f031d9249a826a082ea14c79d3b686a51d485a # v0.15.3
>>>>>>> 2351033 (feat: enable REST/http api support)
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5
with:
@@ -104,7 +108,11 @@ jobs:
cache-to: type=gha,scope=${{ github.ref_name }}-${{ env.IMAGE_TAG }}
- name: Generate SBOM
<<<<<<< HEAD
uses: anchore/sbom-action@719133684c7d294116626d1344fe64f0d2ff3e9e # v0.15.2
=======
uses: anchore/sbom-action@c7f031d9249a826a082ea14c79d3b686a51d485a # v0.15.3
>>>>>>> 2351033 (feat: enable REST/http api support)
with:
image: ${{ env.IMAGE_TAG }}
artifact-name: sbom-${{ env.IMAGE_NAME }}

View File

@@ -23,7 +23,7 @@ nfpms:
homepage: https://k8sgpt.ai
description: >-
K8sGPT is a tool for scanning your kubernetes clusters, diagnosing and triaging issues in simple english. It has SRE experience codified into its analyzers and helps to pull out the most relevant information to enrich it with AI.
license: "MIT"
license: "Apache-2.0"
formats:
- deb
- rpm
@@ -57,7 +57,7 @@ archives:
brews:
- name: k8sgpt
homepage: https://k8sgpt.ai
tap:
repository:
owner: k8sgpt-ai
name: homebrew-k8sgpt

View File

@@ -1 +1 @@
{".":"0.3.24"}
{".":"0.3.26"}

View File

@@ -1,5 +1,73 @@
# Changelog
## [0.3.26](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.3.25...v0.3.26) (2024-01-14)
### Features
* initial Prometheus analyzers ([#855](https://github.com/k8sgpt-ai/k8sgpt/issues/855)) ([45fa827](https://github.com/k8sgpt-ai/k8sgpt/commit/45fa827c046b91d901a08bec1a892d9c0917f350))
* interactive mode ([#854](https://github.com/k8sgpt-ai/k8sgpt/issues/854)) ([9da75e0](https://github.com/k8sgpt-ai/k8sgpt/commit/9da75e02bc17146898377e4f90b7f59c5a8e0eee))
* unify aiClientName const for all providers ([#848](https://github.com/k8sgpt-ai/k8sgpt/issues/848)) ([5c17c24](https://github.com/k8sgpt-ai/k8sgpt/commit/5c17c240550609d9fb7771fe67fe1ab19660b4da))
### Bug Fixes
* **deps:** update module github.com/aws/aws-sdk-go to v1.49.16 ([#847](https://github.com/k8sgpt-ai/k8sgpt/issues/847)) ([ce4910b](https://github.com/k8sgpt-ai/k8sgpt/commit/ce4910bc5d064f80076877d7a096fff903308b63))
* **deps:** update module github.com/aws/aws-sdk-go to v1.49.17 ([#852](https://github.com/k8sgpt-ai/k8sgpt/issues/852)) ([85ebd12](https://github.com/k8sgpt-ai/k8sgpt/commit/85ebd12c30d369c5ef9a42b5a834d091523a7b6e))
* **deps:** update module github.com/aws/aws-sdk-go to v1.49.18 ([#856](https://github.com/k8sgpt-ai/k8sgpt/issues/856)) ([4106d39](https://github.com/k8sgpt-ai/k8sgpt/commit/4106d39c322940413ebfd9ac0bf6f5bd31830e93))
* **deps:** update module github.com/aws/aws-sdk-go to v1.49.19 ([#859](https://github.com/k8sgpt-ai/k8sgpt/issues/859)) ([6a2f315](https://github.com/k8sgpt-ai/k8sgpt/commit/6a2f315b2f4344f2924b7915e8a1393f9732a1e9))
* **deps:** update module github.com/sashabaranov/go-openai to v1.17.11 ([#853](https://github.com/k8sgpt-ai/k8sgpt/issues/853)) ([1979c86](https://github.com/k8sgpt-ai/k8sgpt/commit/1979c86d0f59921d55cd4229a37d604a6f1dc578))
* **deps:** update module github.com/sashabaranov/go-openai to v1.17.11 ([#861](https://github.com/k8sgpt-ai/k8sgpt/issues/861)) ([40b5b7e](https://github.com/k8sgpt-ai/k8sgpt/commit/40b5b7e185c8d335bdefb131988b9900ad26bac3))
* **deps:** update module gopkg.in/yaml.v2 to v3 ([#864](https://github.com/k8sgpt-ai/k8sgpt/issues/864)) ([36ba6c5](https://github.com/k8sgpt-ai/k8sgpt/commit/36ba6c5147a9ed75c14dbba4bc06cae903e651a4))
* **deps:** update module gopkg.in/yaml.v2 to v3 ([#865](https://github.com/k8sgpt-ai/k8sgpt/issues/865)) ([c55025d](https://github.com/k8sgpt-ai/k8sgpt/commit/c55025d04ebf9da0f6092aabb0b043ccef05164c))
### Other
* **deps:** update actions/upload-artifact digest to 1eb3cb2 ([#867](https://github.com/k8sgpt-ai/k8sgpt/issues/867)) ([4ce56f3](https://github.com/k8sgpt-ai/k8sgpt/commit/4ce56f38b4338a6a2fe69f588b0f17e0b54d0ae6))
* **deps:** update anchore/sbom-action action to v0.15.3 ([#850](https://github.com/k8sgpt-ai/k8sgpt/issues/850)) ([12f764d](https://github.com/k8sgpt-ai/k8sgpt/commit/12f764d5846accbd987d40f69a153dceb9954f39))
### Docs
* adjusted README information about providers ([#844](https://github.com/k8sgpt-ai/k8sgpt/issues/844)) ([745e960](https://github.com/k8sgpt-ai/k8sgpt/commit/745e960f492e6dd0e50aa4a1ce7239c677025024))
## [0.3.25](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.3.24...v0.3.25) (2024-01-05)
### Features
* added Google GenAI client; simplified IAI/clients API surface. ([#829](https://github.com/k8sgpt-ai/k8sgpt/issues/829)) ([e7d4149](https://github.com/k8sgpt-ai/k8sgpt/commit/e7d41496ddaa145c70079852da8b2ce3b3b7289f))
* code_cov badge ([#821](https://github.com/k8sgpt-ai/k8sgpt/issues/821)) ([fcd29a5](https://github.com/k8sgpt-ai/k8sgpt/commit/fcd29a547d73ba48935762e2f568f5755f5c6ed3))
* coverage reports ([#819](https://github.com/k8sgpt-ai/k8sgpt/issues/819)) ([3d0ba3e](https://github.com/k8sgpt-ai/k8sgpt/commit/3d0ba3e78cabaf5f1262c5b5b16ebabad974fa87))
### Bug Fixes
* **deps:** update module github.com/aws/aws-sdk-go to v1.49.10 ([#811](https://github.com/k8sgpt-ai/k8sgpt/issues/811)) ([e5cc4a2](https://github.com/k8sgpt-ai/k8sgpt/commit/e5cc4a28cb3682e7094e6ceddf91b65da991ddb6))
* **deps:** update module github.com/aws/aws-sdk-go to v1.49.12 ([#813](https://github.com/k8sgpt-ai/k8sgpt/issues/813)) ([91613ba](https://github.com/k8sgpt-ai/k8sgpt/commit/91613baa5cc5244c93deb344abcdd905802eef30))
* **deps:** update module github.com/aws/aws-sdk-go to v1.49.14 ([#822](https://github.com/k8sgpt-ai/k8sgpt/issues/822)) ([526e22f](https://github.com/k8sgpt-ai/k8sgpt/commit/526e22f88b8de15eceb10965b045ef0366ff2d6c))
* **deps:** update module github.com/aws/aws-sdk-go to v1.49.15 ([#835](https://github.com/k8sgpt-ai/k8sgpt/issues/835)) ([e78ff05](https://github.com/k8sgpt-ai/k8sgpt/commit/e78ff054190cd54cabe17d77ac69443e517f1e55))
* **deps:** update module github.com/prometheus/client_golang to v1.18.0 ([#814](https://github.com/k8sgpt-ai/k8sgpt/issues/814)) ([6eb8f67](https://github.com/k8sgpt-ai/k8sgpt/commit/6eb8f6793ed989ba3ac7ed00336345f68b09bf45))
* **deps:** update module github.com/sashabaranov/go-openai to v1.17.10 ([#824](https://github.com/k8sgpt-ai/k8sgpt/issues/824)) ([4314804](https://github.com/k8sgpt-ai/k8sgpt/commit/4314804ca7e782f5149dc2078ba9c859edc4688a))
* **deps:** update module golang.org/x/term to v0.16.0 ([#831](https://github.com/k8sgpt-ai/k8sgpt/issues/831)) ([4de989c](https://github.com/k8sgpt-ai/k8sgpt/commit/4de989c803ee43a02d75112d1b3a54daee3dd9af))
* **deps:** update module google.golang.org/api to v0.155.0 ([#836](https://github.com/k8sgpt-ai/k8sgpt/issues/836)) ([105a239](https://github.com/k8sgpt-ai/k8sgpt/commit/105a239d94384f4096c01d9978564040773ab56e))
* no explain case, improved readability. ([#825](https://github.com/k8sgpt-ai/k8sgpt/issues/825)) ([035348d](https://github.com/k8sgpt-ai/k8sgpt/commit/035348d8a0d290ac26b42425945eaafe038cedc5))
### Other
* added basic server startup test ([#817](https://github.com/k8sgpt-ai/k8sgpt/issues/817)) ([3e7cea7](https://github.com/k8sgpt-ai/k8sgpt/commit/3e7cea7bd39253718bc3d2f8b10ac5fc9b98cbc2))
* **deps:** pin codecov/codecov-action action to eaaf4be ([#820](https://github.com/k8sgpt-ai/k8sgpt/issues/820)) ([2f0f2df](https://github.com/k8sgpt-ai/k8sgpt/commit/2f0f2dfa8a5957cb8b10864c14d7883158723a6a))
* **deps:** update anchore/sbom-action action to v0.15.2 ([#823](https://github.com/k8sgpt-ai/k8sgpt/issues/823)) ([70c6892](https://github.com/k8sgpt-ai/k8sgpt/commit/70c68929d8d963c0bd17390c76e366d4339f56b9))
* lint fixes ([#833](https://github.com/k8sgpt-ai/k8sgpt/issues/833)) ([a7e9b48](https://github.com/k8sgpt-ai/k8sgpt/commit/a7e9b486bad7c2d62878e470a755d1fef3803680))
* remove code cov ([#832](https://github.com/k8sgpt-ai/k8sgpt/issues/832)) ([a774265](https://github.com/k8sgpt-ai/k8sgpt/commit/a77426593d7f3a8cfa810336ff08a2266db7fb4f))
### Dependency Updates
* go module bump to fix CVE: GHSA-45x7-px36-x8w8 & GHSA-7ww5-4wqc-m92c ([#810](https://github.com/k8sgpt-ai/k8sgpt/issues/810)) ([b17fd7c](https://github.com/k8sgpt-ai/k8sgpt/commit/b17fd7c98644afa70d414fcb32e49e61e1c831ad))
## [0.3.24](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.3.23...v0.3.24) (2023-12-23)

217
README.md
View File

@@ -38,7 +38,7 @@ brew install k8sgpt
**32 bit:**
<!---x-release-please-start-version-->
```
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.3.24/k8sgpt_386.rpm
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.3.26/k8sgpt_386.rpm
sudo rpm -ivh k8sgpt_386.rpm
```
<!---x-release-please-end-->
@@ -47,7 +47,7 @@ brew install k8sgpt
<!---x-release-please-start-version-->
```
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.3.24/k8sgpt_amd64.rpm
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.3.26/k8sgpt_amd64.rpm
sudo rpm -ivh -i k8sgpt_amd64.rpm
```
<!---x-release-please-end-->
@@ -59,7 +59,7 @@ brew install k8sgpt
**32 bit:**
<!---x-release-please-start-version-->
```
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.3.24/k8sgpt_386.deb
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.3.26/k8sgpt_386.deb
sudo dpkg -i k8sgpt_386.deb
```
<!---x-release-please-end-->
@@ -67,7 +67,7 @@ brew install k8sgpt
<!---x-release-please-start-version-->
```
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.3.24/k8sgpt_amd64.deb
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.3.26/k8sgpt_amd64.deb
sudo dpkg -i k8sgpt_amd64.deb
```
<!---x-release-please-end-->
@@ -80,14 +80,14 @@ brew install k8sgpt
**32 bit:**
<!---x-release-please-start-version-->
```
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.3.24/k8sgpt_386.apk
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.3.26/k8sgpt_386.apk
apk add k8sgpt_386.apk
```
<!---x-release-please-end-->
**64 bit:**
<!---x-release-please-start-version-->
```
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.3.24/k8sgpt_amd64.apk
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.3.26/k8sgpt_amd64.apk
apk add k8sgpt_amd64.apk
```
<!---x-release-please-end-->x
@@ -125,14 +125,14 @@ _This mode of operation is ideal for continuous monitoring of your cluster and c
## Quick Start
* Currently the default AI provider is OpenAI, you will need to generate an API key from [OpenAI](https://openai.com)
* Currently, the default AI provider is OpenAI, you will need to generate an API key from [OpenAI](https://openai.com)
* You can do this by running `k8sgpt generate` to open a browser link to generate it
* Run `k8sgpt auth add` to set it in k8sgpt.
* You can provide the password directly using the `--password` flag.
* Run `k8sgpt filters` to manage the active filters used by the analyzer. By default, all filters are executed during analysis.
* Run `k8sgpt analyze` to run a scan.
* And use `k8sgpt analyze --explain` to get a more detailed explanation of the issues.
* You also run `k8sgpt analyze --with-doc` (with or without the explain flag) to get the official documentation from kubernetes.
* You also run `k8sgpt analyze --with-doc` (with or without the explain flag) to get the official documentation from Kubernetes.
## Analyzers
@@ -291,200 +291,28 @@ grpcurl -plaintext -d '{"namespace": "k8sgpt", "explain": false}' localhost:8080
```
</details>
## LLM AI Backends
## Key Features
K8sGPT uses the chosen LLM, generative AI provider when you want to explain the analysis results using --explain flag e.g. `k8sgpt analyze --explain`. You can use `--backend` flag to specify a configured provider (it's `openai` by default).
<details>
<summary> LocalAI provider </summary>
To run local models, it is possible to use OpenAI compatible APIs, for instance [LocalAI](https://github.com/go-skynet/LocalAI) which uses [llama.cpp](https://github.com/ggerganov/llama.cpp) and [ggml](https://github.com/ggerganov/ggml) to run inference on consumer-grade hardware. Models supported by LocalAI for instance are Vicuna, Alpaca, LLaMA, Cerebras, GPT4ALL, GPT4ALL-J and koala.
To run local inference, you need to download the models first, for instance you can find `ggml` compatible models in [huggingface.com](https://huggingface.co/models?search=ggml) (for example vicuna, alpaca and koala).
### Start the API server
To start the API server, follow the instruction in [LocalAI](https://github.com/go-skynet/LocalAI#example-use-gpt4all-j-model).
### Run k8sgpt
To run k8sgpt, run `k8sgpt auth add` with the `localai` backend:
You can list available providers using `k8sgpt auth list`:
```
k8sgpt auth add --backend localai --model <model_name> --baseurl http://localhost:8080/v1 --temperature 0.7
```
Now you can analyze with the `localai` backend:
```
k8sgpt analyze --explain --backend localai
```
</details>
<details>
<summary> AzureOpenAI provider </summary>
<em>Prerequisites:</em> an Azure OpenAI deployment is needed, please visit MS official [documentation](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource) to create your own.
To authenticate with k8sgpt, you will need the Azure OpenAI endpoint of your tenant `"https://your Azure OpenAI Endpoint"`, the api key to access your deployment, the deployment name of your model and the model name itself.
To run k8sgpt, run `k8sgpt auth` with the `azureopenai` backend:
```
k8sgpt auth add --backend azureopenai --baseurl https://<your Azure OpenAI endpoint> --engine <deployment_name> --model <model_name>
```
Lastly, enter your Azure API key, after the prompt.
Now you are ready to analyze with the azure openai backend:
```
k8sgpt analyze --explain --backend azureopenai
```
</details>
<details>
<summary>Cohere provider</summary>
<em>Prerequisites:</em> a Cohere API key is needed, please visit the [Cohere dashboard](https://dashboard.cohere.ai/api-keys) to create one.
To run k8sgpt, run `k8sgpt auth` with the `cohere` backend:
```
k8sgpt auth add --backend cohere --model command-nightly
```
Lastly, enter your Cohere API key, after the prompt.
Now you are ready to analyze with the Cohere backend:
```
k8sgpt analyze --explain --backend cohere
```
</details>
<details>
<summary>Amazon Bedrock provider</summary>
<em>Prerequisites</em>
Bedrock API access is needed.
<img src="images/bedrock.png" width="500px;" />
As illustrated below, you will need to enable this in the [AWS Console](https://eu-central-1.console.aws.amazon.com/bedrock/home?region=eu-central-1#/modelaccess)
In addition to this you will need to set the follow local environmental variables:
```
- AWS_ACCESS_KEY
- AWS_SECRET_ACCESS_KEY
- AWS_DEFAULT_REGION
```
```
k8sgpt auth add --backend amazonbedrock --model anthropic.claude-v2
```
#### Usage
```
k8sgpt analyze -e -b amazonbedrock
0 argocd/argocd-application-controller(argocd-application-controller)
- Error: StatefulSet uses the service argocd/argocd-application-controller which does not exist.
You're right, I don't have enough context to determine if a StatefulSet is correctly configured to use a non-existent service. A StatefulSet manages Pods with persistent storage, and the Pods are created from the same spec. The service name referenced in the StatefulSet configuration would need to match an existing Kubernetes service for the Pods to connect to. Without more details on the specific StatefulSet and environment, I can't confirm whether the configuration is valid or not.
```
</details>
<details>
<summary>Amazon SageMaker Provider</summary>
#### Prerequisites
1. **AWS CLI Configuration**: Make sure you have the AWS Command Line Interface (CLI) configured on your machine. If you haven't already configured the AWS CLI, you can follow the official AWS documentation for instructions on how to do it: [AWS CLI Configuration Guide](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html).
2. **SageMaker Instance**: You need to have an Amazon SageMaker instance set up. If you don't have one already, you can follow the step-by-step instructions provided in this repository for creating a SageMaker instance: [llm-sagemaker-jumpstart-cdk](https://github.com/zaremb/llm-sagemaker-jumpstart-cdk).
#### Backend Configuration
To add amazonsagemaker backend two parameters are required:
* `--endpointname` Amazon SageMaker endpoint name.
* `--providerRegion` AWS region where SageMaker instance is created. `k8sgpt` uses this region to connect to SageMaker (not the one defined with AWS CLI or environment variables )
To add amazonsagemaker as a backend run:
```bash
k8sgpt auth add --backend amazonsagemaker --providerRegion eu-west-1 --endpointname endpoint-xxxxxxxxxx
```
#### Optional params
Optionally, when adding the backend and later by changing the configuration file, you can set the following parameters:
`-l, --maxtokens int` Specify a maximum output length. Adjust (1-...) to control text length. Higher values produce longer output, lower values limit length (default 2048)
`-t, --temperature float32` The sampling temperature, value ranges between 0 ( output be more deterministic) and 1 (more random) (default 0.7)
`-c, --topp float32` Probability Cutoff: Set a threshold (0.0-1.0) to limit word choices. Higher values add randomness, lower values increase predictability. (default 0.5)
To make amazonsagemaker as a default backend run:
```bash
k8sgpt auth default -p amazonsagemaker
```
#### AmazonSageMaker Usage
```bash
./k8sgpt analyze -e -b amazonsagemaker
100% |███████████████████████████████████████████████████████████████████████████████████████████████████████████████████| (1/1, 14 it/min)
AI Provider: amazonsagemaker
0 default/nginx(nginx)
- Error: Back-off pulling image "nginxx"
Error: Back-off pulling image "nginxx"
Solution:
1. Check if the image exists in the registry by running `docker image ls nginxx`.
2. If the image is not found, try pulling it by running `docker pull nginxx`.
3. If the image is still not available, check if there are any network issues by running `docker network inspect` and `docker network list`.
4. If the issue persists, try restarting the Docker daemon by running `sudo service docker restart`.
```
</details>
<details>
<summary>Setting a new default AI provider</summary>
There may be scenarios where you wish to have K8sGPT plugged into several default AI providers. In this case you may wish to use one as a new default, other than OpenAI which is the project default.
_To view available providers_
```
k8sgpt auth list
Default:
Default:
> openai
Active:
Active:
Unused:
> openai
> azureopenai
Unused:
> localai
> noopai
> amazonbedrock
> azureopenai
> cohere
> amazonbedrock
> amazonsagemaker
> google
> noopai
```
For detailed documentation on how to configure and use each provider see [here](https://docs.k8sgpt.ai/reference/providers/backend/).
_To set a new default provider_
@@ -493,15 +321,12 @@ k8sgpt auth default -p azureopenai
Default provider set to azureopenai
```
</details>
## Key Features
<details>
With this option, the data is anonymized before being sent to the AI Backend. During the analysis execution, `k8sgpt` retrieves sensitive data (Kubernetes object names, labels, etc.). This data is masked when sent to the AI backend and replaced by a key that can be used to de-anonymize the data when the solution is returned to the user.
<summary> Anonymization </summary>
1. Error reported during analysis:

View File

@@ -16,23 +16,27 @@ package analyze
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/fatih/color"
"github.com/k8sgpt-ai/k8sgpt/pkg/ai/interactive"
"github.com/k8sgpt-ai/k8sgpt/pkg/analysis"
"github.com/spf13/cobra"
)
var (
explain bool
backend string
output string
filters []string
language string
nocache bool
namespace string
anonymize bool
maxConcurrency int
withDoc bool
explain bool
backend string
output string
filters []string
language string
nocache bool
namespace string
anonymize bool
maxConcurrency int
withDoc bool
interactiveMode bool
)
// AnalyzeCmd represents the problems command
@@ -43,7 +47,10 @@ var AnalyzeCmd = &cobra.Command{
Long: `This command will find problems within your Kubernetes cluster and
provide you with a list of issues that need to be resolved`,
Run: func(cmd *cobra.Command, args []string) {
<<<<<<< HEAD
=======
>>>>>>> 2351033 (feat: enable REST/http api support)
// Create analysis configuration first.
config, err := analysis.NewAnalysis(
backend,
@@ -54,11 +61,16 @@ var AnalyzeCmd = &cobra.Command{
explain,
maxConcurrency,
withDoc,
<<<<<<< HEAD
=======
interactiveMode,
>>>>>>> 2351033 (feat: enable REST/http api support)
)
if err != nil {
color.Red("Error: %v", err)
os.Exit(1)
}
defer config.Close()
config.RunAnalysis()
@@ -68,19 +80,42 @@ var AnalyzeCmd = &cobra.Command{
os.Exit(1)
}
}
// print results
output, err := config.PrintOutput(output)
output_data, err := config.PrintOutput(output)
if err != nil {
color.Red("Error: %v", err)
os.Exit(1)
}
fmt.Println(string(output))
fmt.Println(string(output_data))
if interactiveMode && explain {
if output == "json" {
color.Yellow("Caution: interactive mode using --json enabled may use additional tokens.")
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
interactiveClient := interactive.NewInteractionRunner(config, output_data)
go interactiveClient.StartInteraction()
for {
select {
case res := <-sigs:
switch res {
default:
os.Exit(0)
}
case res := <-interactiveClient.State:
switch res {
case interactive.E_EXITED:
os.Exit(0)
}
}
}
}
},
}
func init() {
// namespace flag
AnalyzeCmd.Flags().StringVarP(&namespace, "namespace", "n", "", "Namespace to analyze")
// no cache flag
@@ -101,4 +136,6 @@ func init() {
AnalyzeCmd.Flags().IntVarP(&maxConcurrency, "max-concurrency", "m", 10, "Maximum number of concurrent requests to the Kubernetes API server")
// kubernetes doc flag
AnalyzeCmd.Flags().BoolVarP(&withDoc, "with-doc", "d", false, "Give me the official documentation of the involved field")
// interactive mode flag
AnalyzeCmd.Flags().BoolVarP(&interactiveMode, "interactive", "i", false, "Enable interactive mode that allows further conversation with LLM about the problem. Works only with --explain flag")
}

View File

@@ -149,7 +149,7 @@ func init() {
// add flag for url
addCmd.Flags().StringVarP(&baseURL, "baseurl", "u", "", "URL AI provider, (e.g `http://localhost:8080/v1`)")
// add flag for endpointName
addCmd.Flags().StringVarP(&endpointName, "endpointname", "n", "", "Endpoint Name, (e.g `endpoint-xxxxxxxxxxxx`)")
addCmd.Flags().StringVarP(&endpointName, "endpointname", "n", "", "Endpoint Name, e.g. `endpoint-xxxxxxxxxxxx` (only for amazonbedrock, amazonsagemaker backends)")
// add flag for topP
addCmd.Flags().Float32VarP(&topP, "topp", "c", 0.5, "Probability Cutoff: Set a threshold (0.0-1.0) to limit word choices. Higher values add randomness, lower values increase predictability.")
// max tokens
@@ -157,7 +157,7 @@ func init() {
// add flag for temperature
addCmd.Flags().Float32VarP(&temperature, "temperature", "t", 0.7, "The sampling temperature, value ranges between 0 ( output be more deterministic) and 1 (more random)")
// add flag for azure open ai engine/deployment name
addCmd.Flags().StringVarP(&engine, "engine", "e", "", "Azure AI deployment name")
addCmd.Flags().StringVarP(&engine, "engine", "e", "", "Azure AI deployment name (only for azureopenai backend)")
//add flag for amazonbedrock region name
addCmd.Flags().StringVarP(&providerRegion, "providerRegion", "r", "", "Provider Region name")
addCmd.Flags().StringVarP(&providerRegion, "providerRegion", "r", "", "Provider Region name (only for amazonbedrock backend)")
}

View File

@@ -15,11 +15,12 @@ package generate
import (
"fmt"
"os/exec"
"runtime"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"os/exec"
"runtime"
)
var (
@@ -85,6 +86,6 @@ func printInstructions(isGui bool, backendType string) {
color.Green("Please open: https://beta.openai.com/account/api-keys to generate a key for %s", backendType)
fmt.Println("")
}
color.Green("Please copy the generated key and run `k8sgpt auth` to add it to your config file")
color.Green("Please copy the generated key and run `k8sgpt auth add` to add it to your config file")
fmt.Println("")
}

37
go.mod
View File

@@ -7,7 +7,12 @@ require (
github.com/fatih/color v1.16.0
github.com/magiconair/properties v1.8.7
github.com/mittwald/go-helm-client v0.12.5
<<<<<<< HEAD
github.com/sashabaranov/go-openai v1.17.10
=======
github.com/prometheus/prometheus v1.8.2-0.20211119115433-692a54649ed7
github.com/sashabaranov/go-openai v1.18.2
>>>>>>> 2351033 (feat: enable REST/http api support)
github.com/schollz/progressbar/v3 v3.14.1
github.com/spf13/cobra v1.8.0
github.com/spf13/viper v1.18.2
@@ -29,44 +34,73 @@ require (
cloud.google.com/go/storage v1.36.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1
<<<<<<< HEAD
github.com/aws/aws-sdk-go v1.49.14
github.com/cohere-ai/cohere-go v0.2.0
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0
github.com/olekukonko/tablewriter v0.0.5
google.golang.org/api v0.155.0
=======
github.com/aws/aws-sdk-go v1.49.21
github.com/cohere-ai/cohere-go v0.2.0
github.com/google/generative-ai-go v0.5.0
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0
github.com/olekukonko/tablewriter v0.0.5
github.com/pterm/pterm v0.12.74
google.golang.org/api v0.155.0
gopkg.in/yaml.v2 v2.4.0
>>>>>>> 2351033 (feat: enable REST/http api support)
sigs.k8s.io/controller-runtime v0.16.3
sigs.k8s.io/gateway-api v1.0.0
)
require (
atomicgo.dev/cursor v0.2.0 // indirect
atomicgo.dev/keyboard v0.2.9 // indirect
atomicgo.dev/schedule v0.1.0 // indirect
cloud.google.com/go v0.110.10 // indirect
cloud.google.com/go/ai v0.3.0 // indirect
cloud.google.com/go/compute v1.23.3 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.5 // indirect
cloud.google.com/go/longrunning v0.5.4 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect
github.com/Microsoft/hcsshim v0.11.4 // indirect
<<<<<<< HEAD
=======
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
>>>>>>> 2351033 (feat: enable REST/http api support)
github.com/anchore/go-struct-converter v0.0.0-20230627203149-c72ef8859ca9 // indirect
github.com/cohere-ai/tokenizer v1.1.1 // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/distribution/reference v0.5.0 // indirect
github.com/dlclark/regexp2 v1.10.0 // indirect
github.com/evanphx/json-patch/v5 v5.7.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/gookit/color v1.5.4 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lithammer/fuzzysearch v1.1.8 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
@@ -75,6 +109,7 @@ require (
google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect
gopkg.in/evanphx/json-patch.v5 v5.7.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
require (
@@ -207,8 +242,6 @@ require (
google.golang.org/protobuf v1.32.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.28.4
k8s.io/apiserver v0.28.4 // indirect
k8s.io/cli-runtime v0.28.4 // indirect

1364
go.sum

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 79 KiB

View File

@@ -2,25 +2,21 @@ package ai
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"strings"
"github.com/fatih/color"
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/bedrockruntime"
)
const amazonbedrockAIClientName = "amazonbedrock"
// AmazonBedRockClient represents the client for interacting with the Amazon Bedrock service.
type AmazonBedRockClient struct {
nopCloser
client *bedrockruntime.BedrockRuntime
language string
model string
temperature float32
}
@@ -91,8 +87,8 @@ func GetRegionOrDefault(region string) string {
return BEDROCK_DEFAULT_REGION
}
// Configure configures the AmazonBedRockClient with the provided configuration and language.
func (a *AmazonBedRockClient) Configure(config IAIConfig, language string) error {
// Configure configures the AmazonBedRockClient with the provided configuration.
func (a *AmazonBedRockClient) Configure(config IAIConfig) error {
// Create a new AWS session
providerRegion := GetRegionOrDefault(config.GetProviderRegion())
@@ -107,7 +103,6 @@ func (a *AmazonBedRockClient) Configure(config IAIConfig, language string) error
// Create a new BedrockRuntime client
a.client = bedrockruntime.New(sess)
a.language = language
a.model = GetModelOrDefault(config.GetModel())
a.temperature = config.GetTemperature()
@@ -115,7 +110,7 @@ func (a *AmazonBedRockClient) Configure(config IAIConfig, language string) error
}
// GetCompletion sends a request to the model for generating completion based on the provided prompt.
func (a *AmazonBedRockClient) GetCompletion(ctx context.Context, prompt string, promptTmpl string) (string, error) {
func (a *AmazonBedRockClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
// Prepare the input data for the model invocation
request := map[string]interface{}{
@@ -152,45 +147,7 @@ func (a *AmazonBedRockClient) GetCompletion(ctx context.Context, prompt string,
return output.Completion, nil
}
// Parse generates a completion for the provided prompt using the Amazon Bedrock model.
func (a *AmazonBedRockClient) Parse(ctx context.Context, prompt []string, cache cache.ICache, promptTmpl string) (string, error) {
inputKey := strings.Join(prompt, " ")
// Check for cached data
cacheKey := util.GetCacheKey(a.GetName(), a.language, inputKey)
if !cache.IsCacheDisabled() && cache.Exists(cacheKey) {
response, err := cache.Load(cacheKey)
if err != nil {
return "", err
}
if response != "" {
output, err := base64.StdEncoding.DecodeString(response)
if err != nil {
color.Red("error decoding cached data: %v", err)
return "", nil
}
return string(output), nil
}
}
response, err := a.GetCompletion(ctx, inputKey, promptTmpl)
if err != nil {
return "", err
}
err = cache.Store(cacheKey, base64.StdEncoding.EncodeToString([]byte(response)))
if err != nil {
color.Red("error storing value to cache: %v", err)
return "", nil
}
return response, nil
}
// GetName returns the name of the AmazonBedRockClient.
func (a *AmazonBedRockClient) GetName() string {
return "amazonbedrock"
return amazonbedrockAIClientName
}

View File

@@ -15,24 +15,20 @@ package ai
import (
"context"
"encoding/base64"
"fmt"
"strings"
"encoding/json"
"github.com/fatih/color"
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sagemakerruntime"
)
const amazonsagemakerAIClientName = "amazonsagemaker"
type SageMakerAIClient struct {
nopCloser
client *sagemakerruntime.SageMakerRuntime
language string
model string
temperature float32
endpoint string
@@ -63,7 +59,7 @@ type Parameters struct {
Temperature float64 `json:"temperature"`
}
func (c *SageMakerAIClient) Configure(config IAIConfig, language string) error {
func (c *SageMakerAIClient) Configure(config IAIConfig) error {
// Create a new AWS session
sess := session.Must(session.NewSessionWithOptions(session.Options{
@@ -71,7 +67,6 @@ func (c *SageMakerAIClient) Configure(config IAIConfig, language string) error {
SharedConfigState: session.SharedConfigEnable,
}))
c.language = language
// Create a new SageMaker runtime client
c.client = sagemakerruntime.New(sess)
c.model = config.GetModel()
@@ -82,18 +77,13 @@ func (c *SageMakerAIClient) Configure(config IAIConfig, language string) error {
return nil
}
func (c *SageMakerAIClient) GetCompletion(ctx context.Context, prompt string, promptTmpl string) (string, error) {
func (c *SageMakerAIClient) GetCompletion(_ context.Context, prompt string) (string, error) {
// Create a completion request
if len(promptTmpl) == 0 {
promptTmpl = PromptMap["default"]
}
request := Request{
Inputs: [][]Message{
{
{Role: "system", Content: "DEFAULT_PROMPT"},
{Role: "user", Content: fmt.Sprintf(promptTmpl, c.language, prompt)},
{Role: "user", Content: prompt},
},
},
@@ -142,29 +132,6 @@ func (c *SageMakerAIClient) GetCompletion(ctx context.Context, prompt string, pr
return content, nil
}
func (a *SageMakerAIClient) Parse(ctx context.Context, prompt []string, cache cache.ICache, promptTmpl string) (string, error) {
// parse the text with the AI backend
inputKey := strings.Join(prompt, " ")
// Check for cached data
sEnc := base64.StdEncoding.EncodeToString([]byte(inputKey))
cacheKey := util.GetCacheKey(a.GetName(), a.language, sEnc)
response, err := a.GetCompletion(ctx, inputKey, promptTmpl)
if err != nil {
color.Red("error getting completion: %v", err)
return "", err
}
err = cache.Store(cacheKey, base64.StdEncoding.EncodeToString([]byte(response)))
if err != nil {
color.Red("error storing value to cache: %v", err)
return "", err
}
return response, nil
}
func (a *SageMakerAIClient) GetName() string {
return "amazonsagemaker"
func (c *SageMakerAIClient) GetName() string {
return amazonsagemakerAIClientName
}

View File

@@ -2,27 +2,22 @@ package ai
import (
"context"
"encoding/base64"
"errors"
"fmt"
"strings"
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
"github.com/fatih/color"
"github.com/sashabaranov/go-openai"
)
const azureAIClientName = "azureopenai"
type AzureAIClient struct {
nopCloser
client *openai.Client
language string
model string
temperature float32
}
func (c *AzureAIClient) Configure(config IAIConfig, lang string) error {
func (c *AzureAIClient) Configure(config IAIConfig) error {
token := config.GetPassword()
baseURL := config.GetBaseURL()
engine := config.GetEngine()
@@ -40,21 +35,20 @@ func (c *AzureAIClient) Configure(config IAIConfig, lang string) error {
if client == nil {
return errors.New("error creating Azure OpenAI client")
}
c.language = lang
c.client = client
c.model = config.GetModel()
c.temperature = config.GetTemperature()
return nil
}
func (c *AzureAIClient) GetCompletion(ctx context.Context, prompt string, promptTmpl string) (string, error) {
func (c *AzureAIClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
// Create a completion request
resp, err := c.client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
Model: c.model,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: fmt.Sprintf(default_prompt, c.language, prompt),
Content: prompt,
},
},
Temperature: c.temperature,
@@ -65,42 +59,6 @@ func (c *AzureAIClient) GetCompletion(ctx context.Context, prompt string, prompt
return resp.Choices[0].Message.Content, nil
}
func (a *AzureAIClient) Parse(ctx context.Context, prompt []string, cache cache.ICache, promptTmpl string) (string, error) {
inputKey := strings.Join(prompt, " ")
// Check for cached data
cacheKey := util.GetCacheKey(a.GetName(), a.language, inputKey)
if !cache.IsCacheDisabled() && cache.Exists(cacheKey) {
response, err := cache.Load(cacheKey)
if err != nil {
return "", err
}
if response != "" {
output, err := base64.StdEncoding.DecodeString(response)
if err != nil {
color.Red("error decoding cached data: %v", err)
return "", nil
}
return string(output), nil
}
}
response, err := a.GetCompletion(ctx, inputKey, promptTmpl)
if err != nil {
return "", err
}
err = cache.Store(cacheKey, base64.StdEncoding.EncodeToString([]byte(response)))
if err != nil {
color.Red("error storing value to cache: %v", err)
return "", nil
}
return response, nil
}
func (a *AzureAIClient) GetName() string {
return "azureopenai"
func (c *AzureAIClient) GetName() string {
return azureAIClientName
}

View File

@@ -15,26 +15,22 @@ package ai
import (
"context"
"encoding/base64"
"errors"
"fmt"
"strings"
"github.com/cohere-ai/cohere-go"
"github.com/fatih/color"
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
)
const cohereAIClientName = "cohere"
type CohereClient struct {
nopCloser
client *cohere.Client
language string
model string
temperature float32
}
func (c *CohereClient) Configure(config IAIConfig, language string) error {
func (c *CohereClient) Configure(config IAIConfig) error {
token := config.GetPassword()
client, err := cohere.CreateClient(token)
@@ -50,21 +46,17 @@ func (c *CohereClient) Configure(config IAIConfig, language string) error {
if client == nil {
return errors.New("error creating Cohere client")
}
c.language = language
c.client = client
c.model = config.GetModel()
c.temperature = config.GetTemperature()
return nil
}
func (c *CohereClient) GetCompletion(ctx context.Context, prompt, promptTmpl string) (string, error) {
func (c *CohereClient) GetCompletion(_ context.Context, prompt string) (string, error) {
// Create a completion request
if len(promptTmpl) == 0 {
promptTmpl = PromptMap["default"]
}
resp, err := c.client.Generate(cohere.GenerateOptions{
Model: c.model,
Prompt: fmt.Sprintf(strings.TrimSpace(promptTmpl), c.language, prompt),
Prompt: prompt,
MaxTokens: cohere.Uint(2048),
Temperature: cohere.Float64(float64(c.temperature)),
K: cohere.Int(0),
@@ -77,42 +69,6 @@ func (c *CohereClient) GetCompletion(ctx context.Context, prompt, promptTmpl str
return resp.Generations[0].Text, nil
}
func (a *CohereClient) Parse(ctx context.Context, prompt []string, cache cache.ICache, promptTmpl string) (string, error) {
inputKey := strings.Join(prompt, " ")
// Check for cached data
cacheKey := util.GetCacheKey(a.GetName(), a.language, inputKey)
if !cache.IsCacheDisabled() && cache.Exists(cacheKey) {
response, err := cache.Load(cacheKey)
if err != nil {
return "", err
}
if response != "" {
output, err := base64.StdEncoding.DecodeString(response)
if err != nil {
color.Red("error decoding cached data: %v", err)
return "", nil
}
return string(output), nil
}
}
response, err := a.GetCompletion(ctx, inputKey, promptTmpl)
if err != nil {
return "", err
}
err = cache.Store(cacheKey, base64.StdEncoding.EncodeToString([]byte(response)))
if err != nil {
color.Red("error storing value to cache: %v", err)
return "", nil
}
return response, nil
}
func (a *CohereClient) GetName() string {
return "cohere"
func (c *CohereClient) GetName() string {
return cohereAIClientName
}

119
pkg/ai/googlegenai.go Normal file
View File

@@ -0,0 +1,119 @@
/*
Copyright 2023 The K8sGPT Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ai
import (
"context"
"errors"
"fmt"
"github.com/fatih/color"
"github.com/google/generative-ai-go/genai"
"google.golang.org/api/option"
)
const googleAIClientName = "google"
type GoogleGenAIClient struct {
client *genai.Client
model string
temperature float32
topP float32
maxTokens int
}
func (c *GoogleGenAIClient) Configure(config IAIConfig) error {
ctx := context.Background()
// Access your API key as an environment variable (see "Set up your API key" above)
token := config.GetPassword()
authOption := option.WithAPIKey(token)
if token[0] == '{' {
authOption = option.WithCredentialsJSON([]byte(token))
}
client, err := genai.NewClient(ctx, authOption)
if err != nil {
return fmt.Errorf("creating genai Google SDK client: %w", err)
}
c.client = client
c.model = config.GetModel()
c.temperature = config.GetTemperature()
c.topP = config.GetTopP()
c.maxTokens = config.GetMaxTokens()
return nil
}
func (c *GoogleGenAIClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
// Available models are at https://ai.google.dev/models e.g.gemini-pro.
model := c.client.GenerativeModel(c.model)
model.SetTemperature(c.temperature)
model.SetTopP(c.topP)
model.SetMaxOutputTokens(int32(c.maxTokens))
// Google AI SDK is capable of different inputs than just text, for now set explicit text prompt type.
// Similarly, we could stream the response. For now k8sgpt does not support streaming.
resp, err := model.GenerateContent(ctx, genai.Text(prompt))
if err != nil {
return "", err
}
if len(resp.Candidates) == 0 {
if resp.PromptFeedback.BlockReason == genai.BlockReasonSafety {
for _, r := range resp.PromptFeedback.SafetyRatings {
if !r.Blocked {
continue
}
return "", fmt.Errorf("complection blocked due to %v with probability %v", r.Category.String(), r.Probability.String())
}
}
return "", errors.New("no complection returned; unknown reason")
}
// Format output.
// TODO(bwplotka): Provider richer output in certain cases e.g. suddenly finished
// completion based on finish reasons or safety rankings.
got := resp.Candidates[0]
var output string
for _, part := range got.Content.Parts {
switch o := part.(type) {
case genai.Text:
output += string(o)
output += "\n"
default:
color.Yellow("found unsupported AI response part of type %T; ignoring", part)
}
}
if got.CitationMetadata != nil && len(got.CitationMetadata.CitationSources) > 0 {
output += "Citations:\n"
for _, source := range got.CitationMetadata.CitationSources {
// TODO(bwplotka): Give details around what exactly words could be attributed to the citation.
output += fmt.Sprintf("* %s, %s\n", *source.URI, source.License)
}
}
return output, nil
}
func (c *GoogleGenAIClient) GetName() string {
return googleAIClientName
}
func (c *GoogleGenAIClient) Close() {
if err := c.client.Close(); err != nil {
color.Red("googleai client close error: %v", err)
}
}

View File

@@ -15,8 +15,6 @@ package ai
import (
"context"
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
)
var (
@@ -28,25 +26,38 @@ var (
&CohereClient{},
&AmazonBedRockClient{},
&SageMakerAIClient{},
&GoogleGenAIClient{},
}
Backends = []string{
"openai",
"localai",
"azureopenai",
"noopai",
"cohere",
"amazonbedrock",
"amazonsagemaker",
openAIClientName,
localAIClientName,
azureAIClientName,
cohereAIClientName,
amazonbedrockAIClientName,
amazonsagemakerAIClientName,
googleAIClientName,
noopAIClientName,
}
)
// IAI is an interface all clients (representing backends) share.
type IAI interface {
Configure(config IAIConfig, language string) error
GetCompletion(ctx context.Context, prompt string, promptTmpl string) (string, error)
Parse(ctx context.Context, prompt []string, cache cache.ICache, promptTmpl string) (string, error)
// Configure sets up client for given configuration. This is expected to be
// executed once per client life-time (e.g. analysis CLI command invocation).
Configure(config IAIConfig) error
// GetCompletion generates text based on prompt.
GetCompletion(ctx context.Context, prompt string) (string, error)
// GetName returns name of the backend/client.
GetName() string
// Close cleans all the resources. No other methods should be used on the
// objects after this method is invoked.
Close()
}
type nopCloser struct{}
func (nopCloser) Close() {}
type IAIConfig interface {
GetPassword() string
GetModel() string

View File

@@ -0,0 +1,67 @@
package interactive
import (
"fmt"
"strings"
"github.com/fatih/color"
"github.com/k8sgpt-ai/k8sgpt/pkg/analysis"
"github.com/pterm/pterm"
)
type INTERACTIVE_STATE int
const (
prompt = "Given the following context: "
)
const (
E_RUNNING INTERACTIVE_STATE = iota
E_EXITED = iota
)
type InteractionRunner struct {
config *analysis.Analysis
State chan INTERACTIVE_STATE
contextWindow []byte
}
func NewInteractionRunner(config *analysis.Analysis, contextWindow []byte) *InteractionRunner {
return &InteractionRunner{
config: config,
contextWindow: contextWindow,
State: make(chan INTERACTIVE_STATE),
}
}
func (a *InteractionRunner) StartInteraction() {
a.State <- E_RUNNING
pterm.Println("Interactive mode enabled [type exit to close.]")
for {
query := pterm.DefaultInteractiveTextInput.WithMultiLine(false)
queryString, err := query.Show()
if err != nil {
fmt.Println(err)
}
if queryString == "" {
continue
}
if strings.Contains(queryString, "exit") {
a.State <- E_EXITED
continue
}
pterm.Println()
contextWindow := fmt.Sprintf("%s %s %s", prompt, string(a.contextWindow),
queryString)
response, err := a.config.AIClient.GetCompletion(a.config.Context,
contextWindow)
if err != nil {
color.Red("Error: %v", err)
a.State <- E_EXITED
continue
}
pterm.Println(response)
}
}

View File

@@ -1,9 +1,11 @@
package ai
const localAIClientName = "localai"
type LocalAIClient struct {
OpenAIClient
}
func (a *LocalAIClient) GetName() string {
return "localai"
return localAIClientName
}

View File

@@ -15,58 +15,23 @@ package ai
import (
"context"
"encoding/base64"
"fmt"
"strings"
"github.com/fatih/color"
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
)
const noopAIClientName = "noopai"
type NoOpAIClient struct {
client string
language string
model string
nopCloser
}
func (c *NoOpAIClient) Configure(config IAIConfig, language string) error {
token := config.GetPassword()
c.language = language
c.client = fmt.Sprintf("I am a noop client with the token %s ", token)
c.model = config.GetModel()
func (c *NoOpAIClient) Configure(_ IAIConfig) error {
return nil
}
func (c *NoOpAIClient) GetCompletion(ctx context.Context, prompt string, promptTmpl string) (string, error) {
// Create a completion request
func (c *NoOpAIClient) GetCompletion(_ context.Context, prompt string) (string, error) {
response := "I am a noop response to the prompt " + prompt
return response, nil
}
func (a *NoOpAIClient) Parse(ctx context.Context, prompt []string, cache cache.ICache, promptTmpl string) (string, error) {
// parse the text with the AI backend
inputKey := strings.Join(prompt, " ")
// Check for cached data
sEnc := base64.StdEncoding.EncodeToString([]byte(inputKey))
cacheKey := util.GetCacheKey(a.GetName(), a.language, sEnc)
response, err := a.GetCompletion(ctx, inputKey, promptTmpl)
if err != nil {
color.Red("error getting completion: %v", err)
return "", err
}
err = cache.Store(cacheKey, base64.StdEncoding.EncodeToString([]byte(response)))
if err != nil {
color.Red("error storing value to cache: %v", err)
return "", nil
}
return response, nil
}
func (a *NoOpAIClient) GetName() string {
return "noopai"
func (c *NoOpAIClient) GetName() string {
return noopAIClientName
}

View File

@@ -15,22 +15,17 @@ package ai
import (
"context"
"encoding/base64"
"errors"
"fmt"
"strings"
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
"github.com/sashabaranov/go-openai"
"github.com/fatih/color"
)
const openAIClientName = "openai"
type OpenAIClient struct {
nopCloser
client *openai.Client
language string
model string
temperature float32
}
@@ -43,7 +38,7 @@ const (
topP = 1.0
)
func (c *OpenAIClient) Configure(config IAIConfig, language string) error {
func (c *OpenAIClient) Configure(config IAIConfig) error {
token := config.GetPassword()
defaultConfig := openai.DefaultConfig(token)
@@ -56,24 +51,20 @@ func (c *OpenAIClient) Configure(config IAIConfig, language string) error {
if client == nil {
return errors.New("error creating OpenAI client")
}
c.language = language
c.client = client
c.model = config.GetModel()
c.temperature = config.GetTemperature()
return nil
}
func (c *OpenAIClient) GetCompletion(ctx context.Context, prompt string, promptTmpl string) (string, error) {
func (c *OpenAIClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
// Create a completion request
if len(promptTmpl) == 0 {
promptTmpl = PromptMap["default"]
}
resp, err := c.client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
Model: c.model,
Messages: []openai.ChatCompletionMessage{
{
Role: "user",
Content: fmt.Sprintf(promptTmpl, c.language, prompt),
Content: prompt,
},
},
Temperature: c.temperature,
@@ -88,42 +79,6 @@ func (c *OpenAIClient) GetCompletion(ctx context.Context, prompt string, promptT
return resp.Choices[0].Message.Content, nil
}
func (a *OpenAIClient) Parse(ctx context.Context, prompt []string, cache cache.ICache, promptTmpl string) (string, error) {
inputKey := strings.Join(prompt, " ")
// Check for cached data
cacheKey := util.GetCacheKey(a.GetName(), a.language, inputKey)
if !cache.IsCacheDisabled() && cache.Exists(cacheKey) {
response, err := cache.Load(cacheKey)
if err != nil {
return "", err
}
if response != "" {
output, err := base64.StdEncoding.DecodeString(response)
if err != nil {
color.Red("error decoding cached data: %v", err)
return "", nil
}
return string(output), nil
}
}
response, err := a.GetCompletion(ctx, inputKey, promptTmpl)
if err != nil {
return "", err
}
err = cache.Store(cacheKey, base64.StdEncoding.EncodeToString([]byte(response)))
if err != nil {
color.Red("error storing value to cache: %v", err)
return "", nil
}
return response, nil
}
func (a *OpenAIClient) GetName() string {
return "openai"
func (c *OpenAIClient) GetName() string {
return openAIClientName
}

View File

@@ -8,10 +8,52 @@ const (
`
trivy_vuln_prompt = "Explain the following trivy scan result and the detail risk or root cause of the CVE ID, then provide a solution. Response in %s: %s"
trivy_conf_prompt = "Explain the following trivy scan result and the detail risk or root cause of the security check, then provide a solution."
prom_conf_prompt = `Simplify the following Prometheus error message delimited by triple dashes written in --- %s --- language; --- %s ---.
This error came when validating the Prometheus configuration file.
Provide step by step instructions to fix, with suggestions, referencing Prometheus documentation if relevant.
Write the output in the following format in no more than 300 characters:
Error: {Explain error here}
Solution: {Step by step solution here}
`
prom_relabel_prompt = `
Return your prompt in this language: %s, beginning with
The following is a list of the form:
job_name:
{Prometheus job_name}
relabel_configs:
{Prometheus relabel_configs}
kubernetes_sd_configs:
{Prometheus service discovery config}
---
%s
---
For each job_name, describe the Kubernetes service and pod labels,
namespaces, ports, and containers they match.
Return the message:
Discovered and parsed Prometheus scrape configurations.
For targets to be scraped by Prometheus, ensure they are running with
at least one of the following label sets:
Then for each job, write this format:
- Job: {job_name}
- Service Labels:
- {list of service labels}
- Pod Labels:
- {list of pod labels}
- Namespaces:
- {list of namespaces}
- Ports:
- {list of ports}
- Containers:
- {list of container names}
`
)
var PromptMap = map[string]string{
"default": default_prompt,
"VulnerabilityReport": trivy_vuln_prompt, // for Trivy integration, the key should match `Result.Kind` in pkg/common/types.go
"ConfigAuditReport": trivy_conf_prompt,
"default": default_prompt,
"VulnerabilityReport": trivy_vuln_prompt, // for Trivy integration, the key should match `Result.Kind` in pkg/common/types.go
"ConfigAuditReport": trivy_conf_prompt,
"PrometheusConfigValidate": prom_conf_prompt,
"PrometheusConfigRelabelReport": prom_relabel_prompt,
}

View File

@@ -15,6 +15,7 @@ package analysis
import (
"context"
"encoding/base64"
"errors"
"fmt"
"reflect"
@@ -36,6 +37,7 @@ type Analysis struct {
Context context.Context
Filters []string
Client *kubernetes.Client
Language string
AIClient ai.IAI
Results []common.Result
Errors []string
@@ -72,6 +74,10 @@ func NewAnalysis(
explain bool,
maxConcurrency int,
withDoc bool,
<<<<<<< HEAD
=======
interactiveMode bool,
>>>>>>> 2351033 (feat: enable REST/http api support)
) (*Analysis, error) {
// Get kubernetes client from viper.
kubecontext := viper.GetString("kubecontext")
@@ -95,6 +101,10 @@ func NewAnalysis(
Context: context.Background(),
Filters: filters,
Client: client,
<<<<<<< HEAD
=======
Language: language,
>>>>>>> 2351033 (feat: enable REST/http api support)
Namespace: namespace,
Cache: cache,
Explain: explain,
@@ -134,7 +144,11 @@ func NewAnalysis(
}
aiClient := ai.NewClient(aiProvider.Name)
<<<<<<< HEAD
if err := aiClient.Configure(&aiProvider, language); err != nil {
=======
if err := aiClient.Configure(&aiProvider); err != nil {
>>>>>>> 2351033 (feat: enable REST/http api support)
return nil, err
}
a.AIClient = aiClient
@@ -269,14 +283,14 @@ func (a *Analysis) GetAIResults(output string, anonymize bool) error {
}
texts = append(texts, failure.Text)
}
// If the resource `Kind` comes from a "integration plugin", maybe a customized prompt template will be involved.
var promptTemplate string
promptTemplate := ai.PromptMap["default"]
// If the resource `Kind` comes from an "integration plugin",
// maybe a customized prompt template will be involved.
if prompt, ok := ai.PromptMap[analysis.Kind]; ok {
promptTemplate = prompt
} else {
promptTemplate = ai.PromptMap["default"]
}
parsedText, err := a.AIClient.Parse(a.Context, texts, a.Cache, promptTemplate)
result, err := a.getAIResultForSanitizedFailures(texts, promptTemplate)
if err != nil {
// FIXME: can we avoid checking if output is json multiple times?
// maybe implement the progress bar better?
@@ -284,23 +298,22 @@ func (a *Analysis) GetAIResults(output string, anonymize bool) error {
_ = bar.Exit()
}
// Check for exhaustion
// Check for exhaustion.
if strings.Contains(err.Error(), "status code: 429") {
return fmt.Errorf("exhausted API quota for AI provider %s: %v", a.AIClient.GetName(), err)
} else {
return fmt.Errorf("failed while calling AI provider %s: %v", a.AIClient.GetName(), err)
}
return fmt.Errorf("failed while calling AI provider %s: %v", a.AIClient.GetName(), err)
}
if anonymize {
for _, failure := range analysis.Error {
for _, s := range failure.Sensitive {
parsedText = strings.ReplaceAll(parsedText, s.Masked, s.Unmasked)
result = strings.ReplaceAll(result, s.Masked, s.Unmasked)
}
}
}
analysis.Details = parsedText
analysis.Details = result
if output != "json" {
_ = bar.Add(1)
}
@@ -308,3 +321,44 @@ func (a *Analysis) GetAIResults(output string, anonymize bool) error {
}
return nil
}
func (a *Analysis) getAIResultForSanitizedFailures(texts []string, promptTmpl string) (string, error) {
inputKey := strings.Join(texts, " ")
// Check for cached data.
// TODO(bwplotka): This might depend on model too (or even other client configuration pieces), fix it in later PRs.
cacheKey := util.GetCacheKey(a.AIClient.GetName(), a.Language, inputKey)
if !a.Cache.IsCacheDisabled() && a.Cache.Exists(cacheKey) {
response, err := a.Cache.Load(cacheKey)
if err != nil {
return "", err
}
if response != "" {
output, err := base64.StdEncoding.DecodeString(response)
if err == nil {
return string(output), nil
}
color.Red("error decoding cached data; ignoring cache item: %v", err)
}
}
// Process template.
prompt := fmt.Sprintf(strings.TrimSpace(promptTmpl), a.Language, inputKey)
response, err := a.AIClient.GetCompletion(a.Context, prompt)
if err != nil {
return "", err
}
if err = a.Cache.Store(cacheKey, base64.StdEncoding.EncodeToString([]byte(response))); err != nil {
color.Red("error storing value to cache; value won't be cached: %v", err)
}
return response, nil
}
func (a *Analysis) Close() {
if a.AIClient == nil {
return
}
a.AIClient.Close()
}

View File

@@ -18,6 +18,7 @@ import (
"fmt"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
"github.com/k8sgpt-ai/k8sgpt/pkg/integration/prometheus"
"github.com/k8sgpt-ai/k8sgpt/pkg/integration/trivy"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
"github.com/spf13/viper"
@@ -44,7 +45,8 @@ type Integration struct {
}
var integrations = map[string]IIntegration{
"trivy": trivy.NewTrivy(),
"trivy": trivy.NewTrivy(),
"prometheus": prometheus.NewPrometheus(),
}
func NewIntegration() *Integration {

View File

@@ -0,0 +1,290 @@
package prometheus
import (
"bytes"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"net/http"
"path/filepath"
"strings"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
promconfig "github.com/prometheus/prometheus/config"
yaml "gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const (
prometheusContainerName = "prometheus"
configReloaderContainerName = "config-reloader"
prometheusConfigFlag = "--config.file="
configReloaderConfigFlag = "--config-file="
)
var prometheusPodLabels = map[string]string{
"app": "prometheus",
"app.kubernetes.io/name": "prometheus",
}
type ConfigAnalyzer struct {
}
// podConfig groups a specific pod with the Prometheus configuration and any
// other state used for informing the common.Result.
type podConfig struct {
b []byte
pod *corev1.Pod
}
func (c *ConfigAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
ctx := a.Context
client := a.Client.GetClient()
namespace := a.Namespace
kind := ConfigValidate
podConfigs, err := findPrometheusPodConfigs(ctx, client, namespace)
if err != nil {
return nil, err
}
var preAnalysis = map[string]common.PreAnalysis{}
for _, pc := range podConfigs {
var failures []common.Failure
pod := pc.pod
// Check upstream validation.
// The Prometheus configuration structs do not generally have validation
// methods and embed their validation logic in the UnmarshalYAML methods.
config, err := unmarshalPromConfigBytes(pc.b)
if err != nil {
failures = append(failures, common.Failure{
Text: fmt.Sprintf("error validating Prometheus YAML configuration: %s", err),
})
}
_, err = yaml.Marshal(config)
if err != nil {
failures = append(failures, common.Failure{
Text: fmt.Sprintf("error validating Prometheus struct configuration: %s", err),
})
}
// Check for empty scrape config.
if len(config.ScrapeConfigs) == 0 {
failures = append(failures, common.Failure{
Text: "no scrape configurations. Prometheus will not scrape any metrics.",
})
}
if len(failures) > 0 {
preAnalysis[fmt.Sprintf("%s/%s", pod.Namespace, pod.Name)] = common.PreAnalysis{
Pod: *pod,
FailureDetails: failures,
}
}
}
for key, value := range preAnalysis {
var currentAnalysis = common.Result{
Kind: kind,
Name: key,
Error: value.FailureDetails,
}
parent, _ := util.GetParent(a.Client, value.Pod.ObjectMeta)
currentAnalysis.ParentObject = parent
a.Results = append(a.Results, currentAnalysis)
}
return a.Results, nil
}
func configKey(namespace string, volume *corev1.Volume) (string, error) {
if volume.ConfigMap != nil {
return fmt.Sprintf("configmap/%s/%s", namespace, volume.ConfigMap.Name), nil
} else if volume.Secret != nil {
return fmt.Sprintf("secret/%s/%s", namespace, volume.Secret.SecretName), nil
} else {
return "", errors.New("volume format must be ConfigMap or Secret")
}
}
func findPrometheusPodConfigs(ctx context.Context, client kubernetes.Interface, namespace string) ([]podConfig, error) {
var configs []podConfig
pods, err := findPrometheusPods(ctx, client, namespace)
if err != nil {
return nil, err
}
var configCache = make(map[string]bool)
for _, pod := range pods {
// Extract volume of Promethues config.
volume, key, err := findPrometheusConfigVolumeAndKey(ctx, client, &pod)
if err != nil {
return nil, err
}
// See if we processed it already; if so, don't process again.
ck, err := configKey(pod.Namespace, volume)
if err != nil {
return nil, err
}
_, ok := configCache[ck]
if ok {
continue
}
configCache[ck] = true
// Extract Prometheus config bytes from volume.
b, err := extractPrometheusConfigFromVolume(ctx, client, volume, pod.Namespace, key)
if err != nil {
return nil, err
}
configs = append(configs, podConfig{
pod: &pod,
b: b,
})
}
return configs, nil
}
func findPrometheusPods(ctx context.Context, client kubernetes.Interface, namespace string) ([]corev1.Pod, error) {
var proms []corev1.Pod
for k, v := range prometheusPodLabels {
pods, err := util.GetPodListByLabels(client, namespace, map[string]string{
k: v,
})
if err != nil {
return nil, err
}
proms = append(proms, pods.Items...)
}
// If we still haven't found any Prometheus pods, make a last-ditch effort to
// scrape the namespace for "prometheus" containers.
if len(proms) == 0 {
pods, err := client.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{})
if err != nil {
return nil, err
}
for _, pod := range pods.Items {
for _, c := range pod.Spec.Containers {
if c.Name == prometheusContainerName {
proms = append(proms, pod)
}
}
}
}
return proms, nil
}
func findPrometheusConfigPath(ctx context.Context, client kubernetes.Interface, pod *corev1.Pod) (string, error) {
var path string
var err error
for _, container := range pod.Spec.Containers {
for _, arg := range container.Args {
// Prefer the config-reloader container config file as it normally
// references the ConfigMap or Secret volume mount.
// Fallback to the prometheus container if that's not found.
if strings.HasPrefix(arg, prometheusConfigFlag) {
path = strings.TrimLeft(arg, prometheusConfigFlag)
}
if strings.HasPrefix(arg, configReloaderConfigFlag) {
path = strings.TrimLeft(arg, configReloaderConfigFlag)
}
}
if container.Name == configReloaderContainerName {
return path, nil
}
}
if path == "" {
err = fmt.Errorf("prometheus config path not found in pod: %s", pod.Name)
}
return path, err
}
func findPrometheusConfigVolumeAndKey(ctx context.Context, client kubernetes.Interface, pod *corev1.Pod) (*corev1.Volume, string, error) {
path, err := findPrometheusConfigPath(ctx, client, pod)
if err != nil {
return nil, "", err
}
// Find the volumeMount the config path is pointing to.
var volumeName = ""
for _, container := range pod.Spec.Containers {
for _, vm := range container.VolumeMounts {
if strings.HasPrefix(path, vm.MountPath) {
volumeName = vm.Name
break
}
}
}
// Get the actual Volume from the name.
for _, volume := range pod.Spec.Volumes {
if volume.Name == volumeName {
return &volume, filepath.Base(path), nil
}
}
return nil, "", errors.New("volume for Prometheus config not found")
}
func extractPrometheusConfigFromVolume(ctx context.Context, client kubernetes.Interface, volume *corev1.Volume, namespace, key string) ([]byte, error) {
var b []byte
var ok bool
// Check for Secret volume.
if vs := volume.Secret; vs != nil {
s, err := client.CoreV1().Secrets(namespace).Get(ctx, vs.SecretName, v1.GetOptions{})
if err != nil {
return nil, err
}
b, ok = s.Data[key]
if !ok {
return nil, fmt.Errorf("unable to find file key in secret: %s", key)
}
}
// Check for ConfigMap volume.
if vcm := volume.ConfigMap; vcm != nil {
cm, err := client.CoreV1().ConfigMaps(namespace).Get(ctx, vcm.Name, v1.GetOptions{})
if err != nil {
return nil, err
}
s, ok := cm.Data[key]
b = []byte(s)
if !ok {
return nil, fmt.Errorf("unable to find file key in configmap: %s", key)
}
}
return b, nil
}
func unmarshalPromConfigBytes(b []byte) (*promconfig.Config, error) {
var config promconfig.Config
// Unmarshal the data into a Prometheus config.
if err := yaml.Unmarshal(b, &config); err == nil {
return &config, nil
// If there were errors, try gunziping the data.
} else if content := http.DetectContentType(b); content == "application/x-gzip" {
r, err := gzip.NewReader(bytes.NewBuffer(b))
if err != nil {
return &config, err
}
gunzipBytes, err := io.ReadAll(r)
if err != nil {
return &config, err
}
err = yaml.Unmarshal(gunzipBytes, &config)
if err != nil {
return nil, err
}
return &config, nil
} else {
return &config, err
}
}

View File

@@ -0,0 +1,105 @@
package prometheus
import (
"context"
"errors"
"fmt"
"os"
"github.com/fatih/color"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/spf13/viper"
)
const (
ConfigValidate = "PrometheusConfigValidate"
ConfigRelabel = "PrometheusConfigRelabelReport"
)
type Prometheus struct {
}
func NewPrometheus() *Prometheus {
return &Prometheus{}
}
func (p *Prometheus) Deploy(namespace string) error {
// no-op
color.Green("Activating prometheus integration...")
// TODO(pintohutch): add timeout or inherit an upstream context
// for better signal management.
ctx := context.Background()
kubecontext := viper.GetString("kubecontext")
kubeconfig := viper.GetString("kubeconfig")
client, err := kubernetes.NewClient(kubecontext, kubeconfig)
if err != nil {
color.Red("Error initialising kubernetes client: %v", err)
os.Exit(1)
}
// We just care about existing deployments.
// Try and find Prometheus configurations in the cluster using the provided namespace.
//
// Note: We could cache this state and inject it into the various analyzers
// to save additional parsing later.
// However, the state of the cluster can change from activation to analysis,
// so we would want to run this again on each analyze call anyway.
//
// One consequence of this is one can run `activate` in one namespace
// and run `analyze` in another, without issues, as long as Prometheus
// is found in both.
// We accept this as a trade-off for the time-being to avoid having the tool
// manage Prometheus on the behalf of users.
podConfigs, err := findPrometheusPodConfigs(ctx, client.GetClient(), namespace)
if err != nil {
color.Red("Error discovering Prometheus worklads: %v", err)
os.Exit(1)
}
if len(podConfigs) == 0 {
color.Yellow(fmt.Sprintf(`Prometheus installation not found in namespace: %s.
Please ensure Prometheus is deployed to analyze.`, namespace))
return errors.New("no prometheus installation found")
}
// Prime state of the analyzer so
color.Green("Found existing installation")
return nil
}
func (p *Prometheus) UnDeploy(_ string) error {
// no-op
// We just care about existing deployments.
color.Yellow("Integration will leave Prometheus resources deployed. This is an effective no-op in the cluster.")
return nil
}
func (p *Prometheus) AddAnalyzer(mergedMap *map[string]common.IAnalyzer) {
(*mergedMap)[ConfigValidate] = &ConfigAnalyzer{}
(*mergedMap)[ConfigRelabel] = &RelabelAnalyzer{}
}
func (p *Prometheus) GetAnalyzerName() []string {
return []string{ConfigValidate, ConfigRelabel}
}
func (p *Prometheus) GetNamespace() (string, error) {
return "", nil
}
func (p *Prometheus) OwnsAnalyzer(analyzer string) bool {
return (analyzer == ConfigValidate) || (analyzer == ConfigRelabel)
}
func (t *Prometheus) IsActivate() bool {
activeFilters := viper.GetStringSlice("active_filters")
for _, filter := range t.GetAnalyzerName() {
for _, af := range activeFilters {
if af == filter {
return true
}
}
}
return false
}

View File

@@ -0,0 +1,85 @@
package prometheus
import (
"fmt"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
discoverykube "github.com/prometheus/prometheus/discovery/kubernetes"
"gopkg.in/yaml.v2"
)
type RelabelAnalyzer struct {
}
func (r *RelabelAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
ctx := a.Context
client := a.Client.GetClient()
namespace := a.Namespace
kind := ConfigRelabel
podConfigs, err := findPrometheusPodConfigs(ctx, client, namespace)
if err != nil {
return nil, err
}
var preAnalysis = map[string]common.PreAnalysis{}
for _, pc := range podConfigs {
var failures []common.Failure
pod := pc.pod
// Check upstream validation.
// The Prometheus configuration structs do not generally have validation
// methods and embed their validation logic in the UnmarshalYAML methods.
config, _ := unmarshalPromConfigBytes(pc.b)
// Limit output for brevity.
limit := 6
i := 0
for _, sc := range config.ScrapeConfigs {
if i == limit {
break
}
if sc == nil {
continue
}
brc, _ := yaml.Marshal(sc.RelabelConfigs)
var bsd []byte
for _, cfg := range sc.ServiceDiscoveryConfigs {
ks, ok := cfg.(*discoverykube.SDConfig)
if !ok {
continue
}
bsd, _ = yaml.Marshal(ks)
}
// Don't bother with relabel analysis if the scrape config
// or service discovery config are empty.
if len(brc) == 0 || len(bsd) == 0 {
continue
}
failures = append(failures, common.Failure{
Text: fmt.Sprintf("job_name:\n%s\nrelabel_configs:\n%s\nkubernetes_sd_configs:\n%s\n", sc.JobName, string(brc), string(bsd)),
})
i++
}
if len(failures) > 0 {
preAnalysis[fmt.Sprintf("%s/%s", pod.Namespace, pod.Name)] = common.PreAnalysis{
Pod: *pod,
FailureDetails: failures,
}
}
}
for key, value := range preAnalysis {
var currentAnalysis = common.Result{
Kind: kind,
Name: key,
Error: value.FailureDetails,
}
parent, _ := util.GetParent(a.Client, value.Pod.ObjectMeta)
currentAnalysis.ParentObject = parent
a.Results = append(a.Results, currentAnalysis)
}
return a.Results, nil
}

View File

@@ -33,12 +33,18 @@ func (h *handler) Analyze(ctx context.Context, i *schemav1.AnalyzeRequest) (
i.Explain,
int(i.MaxConcurrency),
false, // Kubernetes Doc disabled in server mode
false, // Interactive mode disabled in server mode
)
config.Context = ctx // Replace context for correct timeouts.
<<<<<<< HEAD
=======
>>>>>>> 2351033 (feat: enable REST/http api support)
if err != nil {
return &schemav1.AnalyzeResponse{}, err
}
defer config.Close()
config.RunAnalysis()
if i.Explain {