mirror of
https://github.com/k8sgpt-ai/k8sgpt.git
synced 2025-08-02 08:06:18 +00:00
test: added tests for the Node analyzer (#1014)
* Added new tests for the `Node` analyzer defined in the `pkg/analyzer` package. * The addition of these new tests has increased the code coverage of the node.go file to over 96%. Partially addresses: https://github.com/k8sgpt-ai/k8sgpt/issues/889 Signed-off-by: VaibhavMalik4187 <vaibhavmalik2018@gmail.com> Co-authored-by: Alex Jones <alexsimonjones@gmail.com>
This commit is contained in:
parent
f60467cd4d
commit
a1d0d0a180
@ -15,110 +15,155 @@ package analyzer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/magiconair/properties/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestNodeAnalyzerNodeReady(t *testing.T) {
|
||||
clientset := fake.NewSimpleClientset(&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "KubeletReady",
|
||||
Message: "kubelet is posting ready status",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
func TestNodeAnalyzer(t *testing.T) {
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: clientset,
|
||||
},
|
||||
Context: context.Background(),
|
||||
}
|
||||
nodeAnalyzer := NodeAnalyzer{}
|
||||
var analysisResults []common.Result
|
||||
analysisResults, err := nodeAnalyzer.Analyze(config)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.Equal(t, len(analysisResults), 0)
|
||||
}
|
||||
|
||||
func TestNodeAnalyzerNodeDiskPressure(t *testing.T) {
|
||||
clientset := fake.NewSimpleClientset(&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "KubeletHasDiskPressure",
|
||||
Message: "kubelet has disk pressure",
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Node{
|
||||
// A node without Status Conditions shouldn't contribute to failures.
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Node1",
|
||||
Namespace: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Node{
|
||||
// Nodes are not filtered using namespace.
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Node2",
|
||||
Namespace: "default",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
// Won't contribute to failures.
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
// Will contribute to failures.
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
{
|
||||
// Will contribute to failures.
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
},
|
||||
// Non-false statuses for the default cases contribute to failures.
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionUnknown,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionUnknown,
|
||||
},
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionUnknown,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionUnknown,
|
||||
},
|
||||
// A cloud provider may set their own condition and/or a new status
|
||||
// might be introduced. In such cases a failure is assumed and
|
||||
// the code shouldn't break, although it might be a false positive.
|
||||
{
|
||||
Type: "UnknownNodeConditionType",
|
||||
Status: "CompletelyUnknown",
|
||||
},
|
||||
// These won't contribute to failures.
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Node{
|
||||
// A node without any failures shouldn't be present in the results.
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Node3",
|
||||
Namespace: "test",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
// Won't contribute to failures.
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "test",
|
||||
}
|
||||
|
||||
nAnalyzer := NodeAnalyzer{}
|
||||
results, err := nAnalyzer.Analyze(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: clientset,
|
||||
expectations := []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "Node2",
|
||||
failuresCount: 11,
|
||||
},
|
||||
Context: context.Background(),
|
||||
}
|
||||
nodeAnalyzer := NodeAnalyzer{}
|
||||
var analysisResults []common.Result
|
||||
analysisResults, err := nodeAnalyzer.Analyze(config)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.Equal(t, len(analysisResults), 1)
|
||||
}
|
||||
|
||||
// A cloud provider may set their own condition and/or a new status might be introduced
|
||||
// In such cases a failure is assumed and the code shouldn't break, although it might be a false positive
|
||||
func TestNodeAnalyzerNodeUnknownType(t *testing.T) {
|
||||
clientset := fake.NewSimpleClientset(&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: "UnknownNodeConditionType",
|
||||
Status: "CompletelyUnknown",
|
||||
Reason: "KubeletHasTheUnknown",
|
||||
Message: "kubelet has the unknown",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.Equal(t, len(expectations), len(results))
|
||||
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: clientset,
|
||||
},
|
||||
Context: context.Background(),
|
||||
for i, result := range results {
|
||||
require.Equal(t, expectations[i].name, result.Name)
|
||||
require.Equal(t, expectations[i].failuresCount, len(result.Error))
|
||||
}
|
||||
nodeAnalyzer := NodeAnalyzer{}
|
||||
var analysisResults []common.Result
|
||||
analysisResults, err := nodeAnalyzer.Analyze(config)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.Equal(t, len(analysisResults), 1)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user