kata-monitor: drop the runtime info from the sandbox cache

We keep the container engine info in the sandbox cache map, as the value
associated to the pod id (the key). Since we used that in
getMonitorAddress() only (which is gone) we can avoid storing that
information. Let's drop it.
Keep the map structure and the [put,delete]IfExists functions as we may
want to move to an event based cache update process sooner or later, and
we will need those.

Signed-off-by: Francesco Giudici <fgiudici@redhat.com>
This commit is contained in:
Francesco Giudici 2021-07-21 10:07:21 +02:00
parent 97dcc5f78a
commit 68a6f011b5
5 changed files with 23 additions and 56 deletions

View File

@ -132,9 +132,9 @@ func parseEndpoint(endpoint string) (string, string, error) {
// getSandboxes get kata sandbox from the container engine.
// this will be called only after monitor start.
func (km *KataMonitor) getSandboxes() (map[string]string, error) {
func (km *KataMonitor) getSandboxes() (map[string]struct{}, error) {
sandboxMap := map[string]string{}
sandboxMap := make(map[string]struct{})
runtimeClient, runtimeConn, err := getRuntimeClient(km.runtimeEndpoint)
if err != nil {
return sandboxMap, err
@ -169,25 +169,12 @@ func (km *KataMonitor) getSandboxes() (map[string]string, error) {
}
lowRuntime := ""
highRuntime := ""
var res map[string]interface{}
if err := json.Unmarshal([]byte(r.Info["info"]), &res); err != nil {
monitorLog.WithError(err).WithField("pod", r).Error("failed to Unmarshal pod info")
continue
} else {
monitorLog.WithField("pod info", res).Debug("")
// get high level container runtime
pointer, _ := gojsonpointer.NewJsonPointer("/runtimeSpec/annotations/io.container.manager")
rt, _, _ := pointer.Get(res)
if rt != nil {
if str, ok := rt.(string); ok {
if str == "cri-o" {
highRuntime = RuntimeCRIO
} else {
highRuntime = RuntimeContainerd
}
}
}
// get low level container runtime
// containerd stores the pod runtime in "/runtimeType" while CRI-O stores it the
@ -207,11 +194,10 @@ func (km *KataMonitor) getSandboxes() (map[string]string, error) {
// Filter by pod name/namespace regular expressions.
monitorLog.WithFields(logrus.Fields{
"low runtime": lowRuntime,
"high runtime": highRuntime,
"low runtime": lowRuntime,
}).Debug("")
if matchesRegex(types.KataRuntimeNameRegexp, lowRuntime) || matchesRegex("kata*", lowRuntime) {
sandboxMap[pod.Id] = highRuntime
sandboxMap[pod.Id] = struct{}{}
}
}

View File

@ -159,9 +159,9 @@ func (km *KataMonitor) aggregateSandboxMetrics(encoder expfmt.Encoder) error {
monitorLog.WithField("sandbox_count", len(sandboxes)).Debugf("sandboxes count")
// get metrics from sandbox's shim
for sandboxID, runtime := range sandboxes {
for sandboxID := range sandboxes {
wg.Add(1)
go func(sandboxID, runtime string, results chan<- []*dto.MetricFamily) {
go func(sandboxID string, results chan<- []*dto.MetricFamily) {
sandboxMetrics, err := getParsedMetrics(sandboxID)
if err != nil {
monitorLog.WithError(err).WithField("sandbox_id", sandboxID).Errorf("failed to get metrics for sandbox")
@ -170,7 +170,7 @@ func (km *KataMonitor) aggregateSandboxMetrics(encoder expfmt.Encoder) error {
results <- sandboxMetrics
wg.Done()
monitorLog.WithField("sandbox_id", sandboxID).Debug("job finished")
}(sandboxID, runtime, results)
}(sandboxID, results)
monitorLog.WithField("sandbox_id", sandboxID).Debug("job started")
}

View File

@ -50,7 +50,7 @@ func NewKataMonitor(runtimeEndpoint string) (*KataMonitor, error) {
runtimeEndpoint: runtimeEndpoint,
sandboxCache: &sandboxCache{
Mutex: &sync.Mutex{},
sandboxes: make(map[string]string),
sandboxes: make(map[string]struct{}),
},
}
@ -112,7 +112,3 @@ func (km *KataMonitor) getSandboxList() []string {
}
return result
}
func (km *KataMonitor) getSandboxRuntime(sandbox string) (string, error) {
return km.sandboxCache.getSandboxRuntime(sandbox)
}

View File

@ -6,40 +6,39 @@
package katamonitor
import (
"fmt"
"sync"
)
type sandboxCache struct {
*sync.Mutex
sandboxes map[string]string
sandboxes map[string]struct{}
}
func (sc *sandboxCache) getAllSandboxes() map[string]string {
func (sc *sandboxCache) getAllSandboxes() map[string]struct{} {
sc.Lock()
defer sc.Unlock()
return sc.sandboxes
}
func (sc *sandboxCache) deleteIfExists(id string) (string, bool) {
func (sc *sandboxCache) deleteIfExists(id string) bool {
sc.Lock()
defer sc.Unlock()
if val, found := sc.sandboxes[id]; found {
if _, found := sc.sandboxes[id]; found {
delete(sc.sandboxes, id)
return val, true
return true
}
// not in sandbox cache
return "", false
return false
}
func (sc *sandboxCache) putIfNotExists(id, value string) bool {
func (sc *sandboxCache) putIfNotExists(id string) bool {
sc.Lock()
defer sc.Unlock()
if _, found := sc.sandboxes[id]; !found {
sc.sandboxes[id] = value
sc.sandboxes[id] = struct{}{}
return true
}
@ -47,19 +46,8 @@ func (sc *sandboxCache) putIfNotExists(id, value string) bool {
return false
}
func (sc *sandboxCache) set(sandboxes map[string]string) {
func (sc *sandboxCache) set(sandboxes map[string]struct{}) {
sc.Lock()
defer sc.Unlock()
sc.sandboxes = sandboxes
}
func (sc *sandboxCache) getSandboxRuntime(sandbox string) (string, error) {
sc.Lock()
defer sc.Unlock()
if val, found := sc.sandboxes[sandbox]; found {
return val, nil
}
return "", fmt.Errorf("sandbox %s not in cache", sandbox)
}

View File

@ -16,10 +16,10 @@ func TestSandboxCache(t *testing.T) {
assert := assert.New(t)
sc := &sandboxCache{
Mutex: &sync.Mutex{},
sandboxes: make(map[string]string),
sandboxes: make(map[string]struct{}),
}
scMap := map[string]string{"111": "222"}
scMap := map[string]struct{}{"111": {}}
sc.set(scMap)
@ -28,22 +28,19 @@ func TestSandboxCache(t *testing.T) {
// put new item
id := "new-id"
value := "new-value"
b := sc.putIfNotExists(id, "new-value")
b := sc.putIfNotExists(id)
assert.Equal(true, b)
assert.Equal(2, len(scMap))
// put key that alreay exists
b = sc.putIfNotExists(id, "new-value")
b = sc.putIfNotExists(id)
assert.Equal(false, b)
v, b := sc.deleteIfExists(id)
assert.Equal(value, v)
b = sc.deleteIfExists(id)
assert.Equal(true, b)
assert.Equal(1, len(scMap))
v, b = sc.deleteIfExists(id)
assert.Equal("", v)
b = sc.deleteIfExists(id)
assert.Equal(false, b)
assert.Equal(1, len(scMap))
}