Merge pull request #80570 from klueska/upstream-add-topology-manager-to-devicemanager

Add support for Topology Manager to Device Manager
This commit is contained in:
Kubernetes Prow Robot 2019-08-29 21:21:44 -07:00 committed by GitHub
commit 7d6f8d8f69
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 1017 additions and 193 deletions

View File

@ -252,9 +252,9 @@ func (m *ListAndWatchResponse) GetDevices() []*Device {
}
type TopologyInfo struct {
Node *NUMANode `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_sizecache int32 `json:"-"`
Nodes []*NUMANode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TopologyInfo) Reset() { *m = TopologyInfo{} }
@ -289,9 +289,9 @@ func (m *TopologyInfo) XXX_DiscardUnknown() {
var xxx_messageInfo_TopologyInfo proto.InternalMessageInfo
func (m *TopologyInfo) GetNode() *NUMANode {
func (m *TopologyInfo) GetNodes() []*NUMANode {
if m != nil {
return m.Node
return m.Nodes
}
return nil
}
@ -881,59 +881,59 @@ func init() {
func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) }
var fileDescriptor_00212fb1f9d3bf1c = []byte{
// 824 bytes of a gzipped FileDescriptorProto
// 822 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xdd, 0x8e, 0xdb, 0x44,
0x14, 0x8e, 0x93, 0xdd, 0x8d, 0x73, 0x92, 0xee, 0x66, 0x67, 0x4b, 0xe5, 0x75, 0x8b, 0x15, 0x06,
0x15, 0x2d, 0x52, 0x9b, 0xb2, 0xa9, 0x28, 0xa8, 0x17, 0x88, 0xd0, 0x2c, 0xb0, 0x12, 0xdd, 0x46,
0xb3, 0x54, 0xdc, 0x20, 0x45, 0x8e, 0x33, 0x8d, 0x2d, 0x9c, 0x19, 0xe3, 0x99, 0x44, 0xca, 0x1d,
0x17, 0x3c, 0x00, 0x0f, 0xc1, 0x63, 0xf0, 0x00, 0xbd, 0xe4, 0x92, 0x4b, 0x1a, 0x5e, 0x04, 0x79,
0xec, 0xb1, 0x2d, 0x37, 0xbb, 0x0b, 0x52, 0xef, 0x7c, 0x7e, 0xbe, 0x33, 0xdf, 0x9c, 0x73, 0xe6,
0x33, 0xb4, 0xdc, 0x28, 0xe8, 0x47, 0x31, 0x97, 0x1c, 0x35, 0x57, 0xa7, 0x53, 0x2a, 0xdd, 0x53,
0xfb, 0xe1, 0x3c, 0x90, 0xfe, 0x72, 0xda, 0xf7, 0xf8, 0xe2, 0xd1, 0x9c, 0xcf, 0xf9, 0x23, 0x15,
0x9f, 0x2e, 0x5f, 0x29, 0x4b, 0x19, 0xea, 0x2b, 0xc5, 0xe1, 0x67, 0x70, 0x34, 0xa2, 0xab, 0xc0,
0xa3, 0xe3, 0x70, 0x39, 0x0f, 0xd8, 0x8b, 0x48, 0x06, 0x9c, 0x09, 0xf4, 0x00, 0x50, 0x14, 0xd3,
0x89, 0x90, 0x6e, 0x2c, 0x27, 0x31, 0xfd, 0x79, 0x19, 0xc4, 0x74, 0x66, 0x19, 0x3d, 0xe3, 0xc4,
0x24, 0xdd, 0x28, 0xa6, 0x97, 0x49, 0x80, 0x64, 0x7e, 0xfc, 0xbb, 0x01, 0x07, 0x84, 0xce, 0x03,
0x21, 0x69, 0x9c, 0x38, 0xa9, 0x90, 0xc8, 0x82, 0xe6, 0x8a, 0xc6, 0x22, 0xe0, 0x4c, 0xc1, 0x5a,
0x44, 0x9b, 0xc8, 0x06, 0x93, 0xb2, 0x59, 0xc4, 0x03, 0x26, 0xad, 0xba, 0x0a, 0xe5, 0x36, 0xfa,
0x10, 0x6e, 0xc5, 0x54, 0xf0, 0x65, 0xec, 0xd1, 0x09, 0x73, 0x17, 0xd4, 0x6a, 0xa8, 0x84, 0x8e,
0x76, 0x5e, 0xb8, 0x0b, 0x8a, 0x9e, 0x40, 0x93, 0xa7, 0x3c, 0xad, 0x9d, 0x9e, 0x71, 0xd2, 0x1e,
0xdc, 0xeb, 0x67, 0xb7, 0xef, 0x6f, 0xb9, 0x0b, 0xd1, 0xc9, 0xb8, 0x09, 0xbb, 0x67, 0x8b, 0x48,
0xae, 0xf1, 0x10, 0x6e, 0x7f, 0x17, 0x08, 0x39, 0x64, 0xb3, 0x1f, 0x5c, 0xe9, 0xf9, 0x84, 0x8a,
0x88, 0x33, 0x41, 0xd1, 0xc7, 0xd0, 0x9c, 0xa9, 0x02, 0xc2, 0x32, 0x7a, 0x8d, 0x93, 0xf6, 0xe0,
0xa0, 0x52, 0x98, 0xe8, 0x38, 0xfe, 0x14, 0x3a, 0xdf, 0xf3, 0x88, 0x87, 0x7c, 0xbe, 0x3e, 0x67,
0xaf, 0x38, 0xba, 0x0f, 0x3b, 0x8c, 0xcf, 0xa8, 0xba, 0x6b, 0x7b, 0x70, 0x98, 0xe3, 0x2e, 0x5e,
0x3e, 0x1f, 0x5e, 0xf0, 0x19, 0x25, 0x2a, 0x8c, 0x6d, 0x30, 0xb5, 0x07, 0xed, 0x43, 0xfd, 0x7c,
0xa4, 0x00, 0x0d, 0x52, 0x0f, 0x46, 0xd8, 0x83, 0xbd, 0xf4, 0x94, 0x52, 0xa4, 0x95, 0x44, 0xd0,
0x1d, 0xd8, 0xf3, 0xa9, 0x1b, 0x4a, 0x3f, 0xeb, 0x57, 0x66, 0xa1, 0x53, 0x30, 0x65, 0x46, 0x42,
0x35, 0xaa, 0x3d, 0x78, 0x2f, 0x3f, 0xb8, 0xcc, 0x8e, 0xe4, 0x69, 0xf8, 0x29, 0x58, 0xe3, 0x6c,
0x7c, 0xcf, 0x38, 0x93, 0x6e, 0xc0, 0x8a, 0x91, 0x39, 0x00, 0xd9, 0xf5, 0xce, 0x47, 0x69, 0x07,
0x5a, 0xa4, 0xe4, 0xc1, 0x77, 0xe1, 0x78, 0x0b, 0x36, 0xed, 0x1d, 0xf6, 0xe0, 0x60, 0x18, 0x86,
0xdc, 0x73, 0x25, 0xd5, 0xf5, 0xc6, 0x80, 0x3c, 0x9d, 0xa7, 0x96, 0x88, 0x0a, 0xa9, 0x3b, 0xfb,
0x41, 0x4e, 0x34, 0x2f, 0x55, 0x81, 0x93, 0x43, 0xaf, 0x42, 0x50, 0x24, 0xec, 0xaf, 0x4a, 0xbf,
0x91, 0xfd, 0x1c, 0xba, 0x05, 0x24, 0x1b, 0xf8, 0x25, 0x1c, 0x95, 0x19, 0xa6, 0x5e, 0x4d, 0x11,
0x5f, 0x47, 0x31, 0x4d, 0x25, 0xc8, 0xab, 0x36, 0x42, 0xe0, 0x5f, 0x1b, 0x70, 0x7c, 0x25, 0x02,
0x7d, 0x09, 0x3b, 0x94, 0xad, 0xf4, 0x19, 0x0f, 0x6e, 0x3e, 0xa3, 0x7f, 0xc6, 0x56, 0xe2, 0x8c,
0xc9, 0x78, 0x4d, 0x14, 0x12, 0x7d, 0x04, 0x7b, 0x0b, 0xbe, 0x64, 0x52, 0x58, 0x75, 0x55, 0x63,
0x3f, 0xaf, 0xf1, 0x3c, 0x71, 0x93, 0x2c, 0x8a, 0x1e, 0x16, 0xdb, 0xdc, 0x50, 0x89, 0x47, 0x95,
0x6d, 0xbe, 0x8c, 0xa8, 0x97, 0x6f, 0x34, 0x7a, 0x09, 0x6d, 0x97, 0x31, 0x2e, 0x5d, 0xfd, 0xb2,
0x12, 0xc8, 0xe3, 0xff, 0xc0, 0x6f, 0x58, 0xa0, 0x52, 0x9a, 0xe5, 0x3a, 0xf6, 0x67, 0xd0, 0xca,
0x2f, 0x80, 0xba, 0xd0, 0xf8, 0x89, 0xae, 0xb3, 0xcd, 0x4e, 0x3e, 0xd1, 0x6d, 0xd8, 0x5d, 0xb9,
0xe1, 0x92, 0x66, 0x9b, 0x9d, 0x1a, 0x4f, 0xeb, 0x9f, 0x1b, 0xf6, 0x17, 0xd0, 0xad, 0x56, 0xfe,
0x3f, 0x78, 0xec, 0xc3, 0xae, 0xea, 0x07, 0xba, 0x0f, 0xfb, 0xc5, 0x90, 0x23, 0x57, 0xfa, 0x19,
0xfe, 0x56, 0xee, 0x1d, 0xbb, 0xd2, 0x47, 0x77, 0xa1, 0xe5, 0x73, 0x21, 0xd3, 0x8c, 0x4c, 0x97,
0x12, 0x87, 0x0e, 0xc6, 0xd4, 0x9d, 0x4d, 0x38, 0x0b, 0xd3, 0xa7, 0x66, 0x12, 0x33, 0x71, 0xbc,
0x60, 0xe1, 0x1a, 0xc7, 0x00, 0x45, 0x43, 0xdf, 0xc9, 0x71, 0x3d, 0x68, 0x47, 0x34, 0x5e, 0x04,
0x42, 0xa8, 0x59, 0xa4, 0x22, 0x58, 0x76, 0x0d, 0xbe, 0x86, 0x4e, 0xaa, 0xb8, 0xb1, 0xea, 0x0f,
0x7a, 0x02, 0xa6, 0x56, 0x60, 0x64, 0xe5, 0x43, 0xab, 0x88, 0xb2, 0x5d, 0xac, 0x4a, 0x2a, 0x84,
0xb5, 0xc1, 0x1f, 0x75, 0xe8, 0x94, 0x45, 0x13, 0x7d, 0x0b, 0x77, 0xbe, 0xa1, 0x72, 0xdb, 0x3f,
0xa1, 0x02, 0xb6, 0xaf, 0x55, 0x5d, 0x5c, 0x43, 0x43, 0xe8, 0x94, 0x55, 0xf6, 0x2d, 0xfc, 0xfb,
0xb9, 0xbd, 0x4d, 0x8c, 0x71, 0xed, 0x13, 0x03, 0x0d, 0xc1, 0xd4, 0xeb, 0x56, 0xba, 0x55, 0xe5,
0xe5, 0xdb, 0xc7, 0x5b, 0x22, 0xba, 0x08, 0xfa, 0x11, 0x0e, 0xdf, 0x12, 0x2d, 0x54, 0xa8, 0xcf,
0x55, 0x62, 0x68, 0xe3, 0xeb, 0x52, 0x74, 0xf5, 0xaf, 0xee, 0xbd, 0x7e, 0xe3, 0x18, 0x7f, 0xbd,
0x71, 0x6a, 0xbf, 0x6c, 0x1c, 0xe3, 0xf5, 0xc6, 0x31, 0xfe, 0xdc, 0x38, 0xc6, 0xdf, 0x1b, 0xc7,
0xf8, 0xed, 0x1f, 0xa7, 0x36, 0xdd, 0x53, 0xff, 0xd8, 0xc7, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff,
0xe3, 0xb3, 0x5b, 0x04, 0xa8, 0x07, 0x00, 0x00,
0x14, 0x8e, 0x93, 0x6e, 0xe2, 0x9c, 0xa4, 0xbb, 0xd9, 0xd9, 0x52, 0x79, 0xdd, 0x62, 0x85, 0x41,
0xc0, 0x22, 0xb5, 0x29, 0x9b, 0x4a, 0x2d, 0xea, 0x05, 0x22, 0x34, 0x0b, 0xac, 0x44, 0xb7, 0xd1,
0x2c, 0x15, 0x37, 0x48, 0x91, 0xe3, 0x4c, 0x63, 0x0b, 0x67, 0xc6, 0x78, 0x26, 0x91, 0x72, 0xc7,
0x05, 0x0f, 0xc0, 0x43, 0xf0, 0x18, 0x3c, 0x40, 0x2f, 0xb9, 0xe4, 0x92, 0x86, 0x17, 0x41, 0x1e,
0x7b, 0x6c, 0xcb, 0xcd, 0x6e, 0x41, 0xea, 0x9d, 0xcf, 0xcf, 0x77, 0xe6, 0x9b, 0x73, 0xce, 0x7c,
0x86, 0xb6, 0x1b, 0x05, 0x83, 0x28, 0xe6, 0x92, 0xa3, 0xd6, 0xfa, 0x74, 0x46, 0xa5, 0x7b, 0x6a,
0xdf, 0x5f, 0x04, 0xd2, 0x5f, 0xcd, 0x06, 0x1e, 0x5f, 0x3e, 0x58, 0xf0, 0x05, 0x7f, 0xa0, 0xe2,
0xb3, 0xd5, 0x4b, 0x65, 0x29, 0x43, 0x7d, 0xa5, 0x38, 0xfc, 0x14, 0x8e, 0xc6, 0x74, 0x1d, 0x78,
0x74, 0x12, 0xae, 0x16, 0x01, 0x7b, 0x1e, 0xc9, 0x80, 0x33, 0x81, 0xee, 0x01, 0x8a, 0x62, 0x3a,
0x15, 0xd2, 0x8d, 0xe5, 0x34, 0xa6, 0x3f, 0xaf, 0x82, 0x98, 0xce, 0x2d, 0xa3, 0x6f, 0x9c, 0x98,
0xa4, 0x17, 0xc5, 0xf4, 0x32, 0x09, 0x90, 0xcc, 0x8f, 0x7f, 0x37, 0xe0, 0x80, 0xd0, 0x45, 0x20,
0x24, 0x8d, 0x13, 0x27, 0x15, 0x12, 0x59, 0xd0, 0x5a, 0xd3, 0x58, 0x04, 0x9c, 0x29, 0x58, 0x9b,
0x68, 0x13, 0xd9, 0x60, 0x52, 0x36, 0x8f, 0x78, 0xc0, 0xa4, 0x55, 0x57, 0xa1, 0xdc, 0x46, 0x1f,
0xc2, 0xcd, 0x98, 0x0a, 0xbe, 0x8a, 0x3d, 0x3a, 0x65, 0xee, 0x92, 0x5a, 0x0d, 0x95, 0xd0, 0xd5,
0xce, 0x0b, 0x77, 0x49, 0xd1, 0x23, 0x68, 0xf1, 0x94, 0xa7, 0x75, 0xa3, 0x6f, 0x9c, 0x74, 0x86,
0x77, 0x07, 0xd9, 0xed, 0x07, 0x3b, 0xee, 0x42, 0x74, 0x32, 0x6e, 0xc1, 0xde, 0xd9, 0x32, 0x92,
0x1b, 0x3c, 0x82, 0x5b, 0xdf, 0x05, 0x42, 0x8e, 0xd8, 0xfc, 0x07, 0x57, 0x7a, 0x3e, 0xa1, 0x22,
0xe2, 0x4c, 0x50, 0xf4, 0x29, 0xb4, 0xe6, 0xaa, 0x80, 0xb0, 0x8c, 0x7e, 0xe3, 0xa4, 0x33, 0x3c,
0xa8, 0x14, 0x26, 0x3a, 0x8e, 0x1f, 0x43, 0xf7, 0x7b, 0x1e, 0xf1, 0x90, 0x2f, 0x36, 0xe7, 0xec,
0x25, 0x47, 0x9f, 0xc0, 0x1e, 0xe3, 0xf3, 0x1c, 0x78, 0x98, 0x03, 0x2f, 0x5e, 0x3c, 0x1b, 0x5d,
0xf0, 0x39, 0x25, 0x69, 0x1c, 0xdb, 0x60, 0x6a, 0x17, 0xda, 0x87, 0xfa, 0xf9, 0x58, 0xb5, 0xa7,
0x41, 0xea, 0xc1, 0x18, 0x7b, 0xd0, 0x4c, 0xcf, 0x29, 0x45, 0xda, 0x49, 0x04, 0xdd, 0x86, 0xa6,
0x4f, 0xdd, 0x50, 0xfa, 0x59, 0xc7, 0x32, 0x0b, 0x9d, 0x82, 0x29, 0x33, 0x1a, 0xaa, 0x55, 0x9d,
0xe1, 0x7b, 0xf9, 0xc9, 0x65, 0x7e, 0x24, 0x4f, 0xc3, 0x4f, 0xc0, 0x9a, 0x64, 0x03, 0x7c, 0xca,
0x99, 0x74, 0x03, 0x56, 0x0c, 0xcd, 0x01, 0xc8, 0x2e, 0x78, 0x3e, 0x4e, 0xaf, 0xd2, 0x26, 0x25,
0x0f, 0xbe, 0x03, 0xc7, 0x3b, 0xb0, 0x69, 0xf7, 0xb0, 0x07, 0x07, 0xa3, 0x30, 0xe4, 0x9e, 0x2b,
0xa9, 0xae, 0x37, 0x01, 0xe4, 0xe9, 0x3c, 0xb5, 0x46, 0x54, 0x48, 0xdd, 0xa2, 0x0f, 0x72, 0xa2,
0x79, 0xa9, 0x0a, 0x9c, 0x1c, 0x7a, 0x15, 0x82, 0x22, 0x61, 0x7f, 0x55, 0xfa, 0x5b, 0xd9, 0x2f,
0xa0, 0x57, 0x40, 0xb2, 0x91, 0x5f, 0xc2, 0x51, 0x99, 0x61, 0xea, 0xd5, 0x14, 0xf1, 0x75, 0x14,
0xd3, 0x54, 0x82, 0xbc, 0x6a, 0x23, 0x04, 0xfe, 0xb5, 0x01, 0xc7, 0x57, 0x22, 0xd0, 0x97, 0x70,
0x83, 0xb2, 0xb5, 0x3e, 0xe3, 0xde, 0xdb, 0xcf, 0x18, 0x9c, 0xb1, 0xb5, 0x38, 0x63, 0x32, 0xde,
0x10, 0x85, 0x44, 0x1f, 0x43, 0x73, 0xc9, 0x57, 0x4c, 0x0a, 0xab, 0xae, 0x6a, 0xec, 0xe7, 0x35,
0x9e, 0x25, 0x6e, 0x92, 0x45, 0xd1, 0xfd, 0x62, 0x9f, 0x1b, 0x2a, 0xf1, 0xa8, 0xb2, 0xcf, 0x97,
0x11, 0xf5, 0xf2, 0x9d, 0x46, 0x2f, 0xa0, 0xe3, 0x32, 0xc6, 0xa5, 0xab, 0xdf, 0x56, 0x02, 0x79,
0xf8, 0x1f, 0xf8, 0x8d, 0x0a, 0x54, 0x4a, 0xb3, 0x5c, 0xc7, 0x7e, 0x0c, 0xed, 0xfc, 0x02, 0xa8,
0x07, 0x8d, 0x9f, 0xe8, 0x26, 0xdb, 0xec, 0xe4, 0x13, 0xdd, 0x82, 0xbd, 0xb5, 0x1b, 0xae, 0x68,
0xb6, 0xd9, 0xa9, 0xf1, 0xa4, 0xfe, 0xb9, 0x61, 0x7f, 0x01, 0xbd, 0x6a, 0xe5, 0xff, 0x83, 0xc7,
0x3e, 0xec, 0xa9, 0x7e, 0xa0, 0x8f, 0x60, 0xbf, 0x18, 0x72, 0xe4, 0x4a, 0x3f, 0xc3, 0xdf, 0xcc,
0xbd, 0x13, 0x57, 0xfa, 0xe8, 0x0e, 0xb4, 0x7d, 0x2e, 0x64, 0x9a, 0x91, 0x29, 0x53, 0xe2, 0xd0,
0xc1, 0x98, 0xba, 0xf3, 0x29, 0x67, 0x61, 0xfa, 0xd4, 0x4c, 0x62, 0x26, 0x8e, 0xe7, 0x2c, 0xdc,
0xe0, 0x18, 0xa0, 0x68, 0xe8, 0x3b, 0x39, 0xae, 0x0f, 0x9d, 0x88, 0xc6, 0xcb, 0x40, 0x08, 0x35,
0x8b, 0x54, 0x06, 0xcb, 0xae, 0xe1, 0xd7, 0xd0, 0x4d, 0x35, 0x37, 0x56, 0xfd, 0x41, 0x8f, 0xc0,
0xd4, 0x1a, 0x8c, 0xac, 0x7c, 0x68, 0x15, 0x59, 0xb6, 0x8b, 0x55, 0x49, 0xa5, 0xb0, 0x36, 0xfc,
0xa3, 0x0e, 0xdd, 0xb2, 0x6c, 0xa2, 0x6f, 0xe1, 0xf6, 0x37, 0x54, 0xee, 0xfa, 0x2b, 0x54, 0xc0,
0xf6, 0xb5, 0xba, 0x8b, 0x6b, 0x68, 0x04, 0xdd, 0xb2, 0xce, 0xbe, 0x81, 0x7f, 0x3f, 0xb7, 0x77,
0xc9, 0x31, 0xae, 0x7d, 0x66, 0xa0, 0x11, 0x98, 0x7a, 0xdd, 0x4a, 0xb7, 0xaa, 0xbc, 0x7c, 0xfb,
0x78, 0x47, 0x44, 0x17, 0x41, 0x3f, 0xc2, 0xe1, 0x1b, 0xa2, 0x85, 0x0a, 0xf5, 0xb9, 0x4a, 0x0c,
0x6d, 0x7c, 0x5d, 0x8a, 0xae, 0xfe, 0xd5, 0xdd, 0x57, 0xaf, 0x1d, 0xe3, 0xaf, 0xd7, 0x4e, 0xed,
0x97, 0xad, 0x63, 0xbc, 0xda, 0x3a, 0xc6, 0x9f, 0x5b, 0xc7, 0xf8, 0x7b, 0xeb, 0x18, 0xbf, 0xfd,
0xe3, 0xd4, 0x66, 0x4d, 0xf5, 0x97, 0x7d, 0xf8, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x63, 0x60,
0xe7, 0xf8, 0xaa, 0x07, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -1415,17 +1415,19 @@ func (m *TopologyInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.Node != nil {
{
size, err := m.Node.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
if len(m.Nodes) > 0 {
for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Nodes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintApi(dAtA, i, uint64(size))
}
i -= size
i = encodeVarintApi(dAtA, i, uint64(size))
i--
dAtA[i] = 0xa
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
@ -1926,9 +1928,11 @@ func (m *TopologyInfo) Size() (n int) {
}
var l int
_ = l
if m.Node != nil {
l = m.Node.Size()
n += 1 + l + sovApi(uint64(l))
if len(m.Nodes) > 0 {
for _, e := range m.Nodes {
l = e.Size()
n += 1 + l + sovApi(uint64(l))
}
}
return n
}
@ -2170,8 +2174,13 @@ func (this *TopologyInfo) String() string {
if this == nil {
return "nil"
}
repeatedStringForNodes := "[]*NUMANode{"
for _, f := range this.Nodes {
repeatedStringForNodes += strings.Replace(f.String(), "NUMANode", "NUMANode", 1) + ","
}
repeatedStringForNodes += "}"
s := strings.Join([]string{`&TopologyInfo{`,
`Node:` + strings.Replace(this.Node.String(), "NUMANode", "NUMANode", 1) + `,`,
`Nodes:` + repeatedStringForNodes + `,`,
`}`,
}, "")
return s
@ -2761,7 +2770,7 @@ func (m *TopologyInfo) Unmarshal(dAtA []byte) error {
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@ -2788,10 +2797,8 @@ func (m *TopologyInfo) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Node == nil {
m.Node = &NUMANode{}
}
if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
m.Nodes = append(m.Nodes, &NUMANode{})
if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex

View File

@ -74,7 +74,7 @@ message ListAndWatchResponse {
}
message TopologyInfo {
NUMANode node = 1;
repeated NUMANode nodes = 1;
}
message NUMANode {

View File

@ -304,7 +304,8 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
klog.Infof("Creating device plugin manager: %t", devicePluginEnabled)
if devicePluginEnabled {
cm.deviceManager, err = devicemanager.NewManagerImpl()
cm.deviceManager, err = devicemanager.NewManagerImpl(numaNodeInfo, cm.topologyManager)
cm.topologyManager.AddHintProvider(cm.deviceManager)
} else {
cm.deviceManager, err = devicemanager.NewManagerStub()
}

View File

@ -50,12 +50,12 @@ func (a *cpuAccumulator) take(cpus cpuset.CPUSet) {
// Returns true if the supplied socket is fully available in `topoDetails`.
func (a *cpuAccumulator) isSocketFree(socketID int) bool {
return a.details.CPUsInSocket(socketID).Size() == a.topo.CPUsPerSocket()
return a.details.CPUsInSockets(socketID).Size() == a.topo.CPUsPerSocket()
}
// Returns true if the supplied core is fully available in `topoDetails`.
func (a *cpuAccumulator) isCoreFree(coreID int) bool {
return a.details.CPUsInCore(coreID).Size() == a.topo.CPUsPerCore()
return a.details.CPUsInCores(coreID).Size() == a.topo.CPUsPerCore()
}
// Returns free socket IDs as a slice sorted by:
@ -72,14 +72,14 @@ func (a *cpuAccumulator) freeCores() []int {
socketIDs := a.details.Sockets().ToSliceNoSort()
sort.Slice(socketIDs,
func(i, j int) bool {
iCores := a.details.CoresInSocket(socketIDs[i]).Filter(a.isCoreFree)
jCores := a.details.CoresInSocket(socketIDs[j]).Filter(a.isCoreFree)
iCores := a.details.CoresInSockets(socketIDs[i]).Filter(a.isCoreFree)
jCores := a.details.CoresInSockets(socketIDs[j]).Filter(a.isCoreFree)
return iCores.Size() < jCores.Size() || socketIDs[i] < socketIDs[j]
})
coreIDs := []int{}
for _, s := range socketIDs {
coreIDs = append(coreIDs, a.details.CoresInSocket(s).Filter(a.isCoreFree).ToSlice()...)
coreIDs = append(coreIDs, a.details.CoresInSockets(s).Filter(a.isCoreFree).ToSlice()...)
}
return coreIDs
}
@ -100,25 +100,25 @@ func (a *cpuAccumulator) freeCPUs() []int {
iCore := cores[i]
jCore := cores[j]
iCPUs := a.topo.CPUDetails.CPUsInCore(iCore).ToSlice()
jCPUs := a.topo.CPUDetails.CPUsInCore(jCore).ToSlice()
iCPUs := a.topo.CPUDetails.CPUsInCores(iCore).ToSlice()
jCPUs := a.topo.CPUDetails.CPUsInCores(jCore).ToSlice()
iSocket := a.topo.CPUDetails[iCPUs[0]].SocketID
jSocket := a.topo.CPUDetails[jCPUs[0]].SocketID
// Compute the number of CPUs in the result reside on the same socket
// as each core.
iSocketColoScore := a.topo.CPUDetails.CPUsInSocket(iSocket).Intersection(a.result).Size()
jSocketColoScore := a.topo.CPUDetails.CPUsInSocket(jSocket).Intersection(a.result).Size()
iSocketColoScore := a.topo.CPUDetails.CPUsInSockets(iSocket).Intersection(a.result).Size()
jSocketColoScore := a.topo.CPUDetails.CPUsInSockets(jSocket).Intersection(a.result).Size()
// Compute the number of available CPUs available on the same socket
// as each core.
iSocketFreeScore := a.details.CPUsInSocket(iSocket).Size()
jSocketFreeScore := a.details.CPUsInSocket(jSocket).Size()
iSocketFreeScore := a.details.CPUsInSockets(iSocket).Size()
jSocketFreeScore := a.details.CPUsInSockets(jSocket).Size()
// Compute the number of available CPUs on each core.
iCoreFreeScore := a.details.CPUsInCore(iCore).Size()
jCoreFreeScore := a.details.CPUsInCore(jCore).Size()
iCoreFreeScore := a.details.CPUsInCores(iCore).Size()
jCoreFreeScore := a.details.CPUsInCores(jCore).Size()
return iSocketColoScore > jSocketColoScore ||
iSocketFreeScore < jSocketFreeScore ||
@ -129,7 +129,7 @@ func (a *cpuAccumulator) freeCPUs() []int {
// For each core, append sorted CPU IDs to result.
for _, core := range cores {
result = append(result, a.details.CPUsInCore(core).ToSlice()...)
result = append(result, a.details.CPUsInCores(core).ToSlice()...)
}
return result
}
@ -161,7 +161,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num
if acc.needs(acc.topo.CPUsPerSocket()) {
for _, s := range acc.freeSockets() {
klog.V(4).Infof("[cpumanager] takeByTopology: claiming socket [%d]", s)
acc.take(acc.details.CPUsInSocket(s))
acc.take(acc.details.CPUsInSockets(s))
if acc.isSatisfied() {
return acc.result, nil
}
@ -176,7 +176,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num
if acc.needs(acc.topo.CPUsPerCore()) {
for _, c := range acc.freeCores() {
klog.V(4).Infof("[cpumanager] takeByTopology: claiming core [%d]", c)
acc.take(acc.details.CPUsInCore(c))
acc.take(acc.details.CPUsInCores(c))
if acc.isSatisfied() {
return acc.result, nil
}

View File

@ -258,7 +258,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity soc
if numaAffinity != nil {
alignedCPUs := cpuset.NewCPUSet()
for _, numaNodeID := range numaAffinity.GetSockets() {
alignedCPUs = alignedCPUs.Union(p.assignableCPUs(s).Intersection(p.topology.CPUDetails.CPUsInNUMANode(numaNodeID)))
alignedCPUs = alignedCPUs.Union(p.assignableCPUs(s).Intersection(p.topology.CPUDetails.CPUsInNUMANodes(numaNodeID)))
}
numAlignedToAlloc := alignedCPUs.Size()

View File

@ -90,13 +90,15 @@ func (d CPUDetails) NUMANodes() cpuset.CPUSet {
return b.Result()
}
// NUMANodesInSocket returns all of the logical NUMANode IDs associated with
// the given Socket ID in this CPUDetails.
func (d CPUDetails) NUMANodesInSocket(id int) cpuset.CPUSet {
// NUMANodesInSockets returns all of the logical NUMANode IDs associated with
// the given socket IDs in this CPUDetails.
func (d CPUDetails) NUMANodesInSockets(ids ...int) cpuset.CPUSet {
b := cpuset.NewBuilder()
for _, info := range d {
if info.SocketID == id {
b.Add(info.NUMANodeID)
for _, id := range ids {
for _, info := range d {
if info.SocketID == id {
b.Add(info.NUMANodeID)
}
}
}
return b.Result()
@ -112,25 +114,29 @@ func (d CPUDetails) Sockets() cpuset.CPUSet {
return b.Result()
}
// CPUsInSocket returns all of the logical CPU IDs associated with the
// given socket ID in this CPUDetails.
func (d CPUDetails) CPUsInSocket(id int) cpuset.CPUSet {
// CPUsInSockets returns all of the logical CPU IDs associated with the given
// socket IDs in this CPUDetails.
func (d CPUDetails) CPUsInSockets(ids ...int) cpuset.CPUSet {
b := cpuset.NewBuilder()
for cpu, info := range d {
if info.SocketID == id {
b.Add(cpu)
for _, id := range ids {
for cpu, info := range d {
if info.SocketID == id {
b.Add(cpu)
}
}
}
return b.Result()
}
// SocketsInNUMANode returns all of the logical Socket IDs associated with the
// given NUMANode ID in this CPUDetails.
func (d CPUDetails) SocketsInNUMANode(id int) cpuset.CPUSet {
// SocketsInNUMANodes returns all of the logical Socket IDs associated with the
// given NUMANode IDs in this CPUDetails.
func (d CPUDetails) SocketsInNUMANodes(ids ...int) cpuset.CPUSet {
b := cpuset.NewBuilder()
for _, info := range d {
if info.NUMANodeID == id {
b.Add(info.SocketID)
for _, id := range ids {
for _, info := range d {
if info.NUMANodeID == id {
b.Add(info.SocketID)
}
}
}
return b.Result()
@ -146,25 +152,29 @@ func (d CPUDetails) Cores() cpuset.CPUSet {
return b.Result()
}
// CoresInNUMANode returns all of the core IDs associated with the given
// NUMA ID in this CPUDetails.
func (d CPUDetails) CoresInNUMANode(id int) cpuset.CPUSet {
// CoresInNUMANodes returns all of the core IDs associated with the given
// NUMANode IDs in this CPUDetails.
func (d CPUDetails) CoresInNUMANodes(ids ...int) cpuset.CPUSet {
b := cpuset.NewBuilder()
for _, info := range d {
if info.NUMANodeID == id {
b.Add(info.CoreID)
for _, id := range ids {
for _, info := range d {
if info.NUMANodeID == id {
b.Add(info.CoreID)
}
}
}
return b.Result()
}
// CoresInSocket returns all of the core IDs associated with the given
// socket ID in this CPUDetails.
func (d CPUDetails) CoresInSocket(id int) cpuset.CPUSet {
// CoresInSockets returns all of the core IDs associated with the given socket
// IDs in this CPUDetails.
func (d CPUDetails) CoresInSockets(ids ...int) cpuset.CPUSet {
b := cpuset.NewBuilder()
for _, info := range d {
if info.SocketID == id {
b.Add(info.CoreID)
for _, id := range ids {
for _, info := range d {
if info.SocketID == id {
b.Add(info.CoreID)
}
}
}
return b.Result()
@ -179,25 +189,29 @@ func (d CPUDetails) CPUs() cpuset.CPUSet {
return b.Result()
}
// CPUsInNUMANode returns all of the logical CPU IDs associated with the given
// NUMANode ID in this CPUDetails.
func (d CPUDetails) CPUsInNUMANode(id int) cpuset.CPUSet {
// CPUsInNUMANodes returns all of the logical CPU IDs associated with the given
// NUMANode IDs in this CPUDetails.
func (d CPUDetails) CPUsInNUMANodes(ids ...int) cpuset.CPUSet {
b := cpuset.NewBuilder()
for cpu, info := range d {
if info.NUMANodeID == id {
b.Add(cpu)
for _, id := range ids {
for cpu, info := range d {
if info.NUMANodeID == id {
b.Add(cpu)
}
}
}
return b.Result()
}
// CPUsInCore returns all of the logical CPU IDs associated with the
// given core ID in this CPUDetails.
func (d CPUDetails) CPUsInCore(id int) cpuset.CPUSet {
// CPUsInCores returns all of the logical CPU IDs associated with the given
// core IDs in this CPUDetails.
func (d CPUDetails) CPUsInCores(ids ...int) cpuset.CPUSet {
b := cpuset.NewBuilder()
for cpu, info := range d {
if info.CoreID == id {
b.Add(cpu)
for _, id := range ids {
for cpu, info := range d {
if info.CoreID == id {
b.Add(cpu)
}
}
}
return b.Result()

View File

@ -71,14 +71,26 @@ func (m *manager) GetTopologyHints(pod v1.Pod, container v1.Container) map[strin
// bits set as the narrowest matching NUMANodeAffinity with 'Preferred: true', and
// marking all others with 'Preferred: false'.
func (m *manager) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, request int) []topologymanager.TopologyHint {
// Initialize minAffinity to a full affinity mask.
minAffinity, _ := socketmask.NewSocketMask()
minAffinity.Fill()
// Initialize minAffinitySize to include all NUMA Nodes.
minAffinitySize := m.topology.CPUDetails.NUMANodes().Size()
// Initialize minSocketsOnMinAffinity to include all Sockets.
minSocketsOnMinAffinity := m.topology.CPUDetails.Sockets().Size()
// Iterate through all combinations of socketMasks and build hints from them.
hints := []topologymanager.TopologyHint{}
socketmask.IterateSocketMasks(m.topology.CPUDetails.NUMANodes().ToSlice(), func(mask socketmask.SocketMask) {
// Check to see if we have enough CPUs available on the current
// First, update minAffinitySize and minSocketsOnMinAffinity for the
// current request size.
cpusInMask := m.topology.CPUDetails.CPUsInNUMANodes(mask.GetSockets()...).Size()
socketsInMask := m.topology.CPUDetails.SocketsInNUMANodes(mask.GetSockets()...).Size()
if cpusInMask >= request && mask.Count() < minAffinitySize {
minAffinitySize = mask.Count()
if socketsInMask < minSocketsOnMinAffinity {
minSocketsOnMinAffinity = socketsInMask
}
}
// Then check to see if we have enough CPUs available on the current
// SocketMask to satisfy the CPU request.
numMatching := 0
for _, c := range availableCPUs.ToSlice() {
@ -99,20 +111,19 @@ func (m *manager) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, request
NUMANodeAffinity: mask,
Preferred: false,
})
// Update minAffinity if relevant
if mask.IsNarrowerThan(minAffinity) {
minAffinity = mask
}
})
// Loop back through all hints and update the 'Preferred' field based on
// counting the number of bits sets in the affinity mask and comparing it
// to the minAffinity. Only those with an equal number of bits set will be
// considered preferred.
// to the minAffinitySize. Only those with an equal number of bits set (and
// with a minimal set of sockets) will be considered preferred.
for i := range hints {
if hints[i].NUMANodeAffinity.Count() == minAffinity.Count() {
hints[i].Preferred = true
if hints[i].NUMANodeAffinity.Count() == minAffinitySize {
nodes := hints[i].NUMANodeAffinity.GetSockets()
numSockets := m.topology.CPUDetails.SocketsInNUMANodes(nodes...).Size()
if numSockets == minSocketsOnMinAffinity {
hints[i].Preferred = true
}
}
}

View File

@ -75,28 +75,18 @@ func TestGetTopologyHints(t *testing.T) {
1: cpuset.NewCPUSet(3, 9, 4, 10, 5, 11),
}
topology, _ := topology.Discover(&machineInfo, numaNodeInfo)
m := manager{
policy: &staticPolicy{
topology: topology,
},
state: &mockState{
defaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
},
topology: topology,
}
tcases := []struct {
name string
pod v1.Pod
container v1.Container
defaultCPUSet cpuset.CPUSet
expectedHints []topologymanager.TopologyHint
}{
{
name: "Request 2 CPUs; 4 available on Socket 0, 6 available on Socket 1",
pod: *testPod1,
container: *testContainer1,
name: "Request 2 CPUs, 4 available on NUMA 0, 6 available on NUMA 1",
pod: *testPod1,
container: *testContainer1,
defaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expectedHints: []topologymanager.TopologyHint{
{
NUMANodeAffinity: firstSocketMask,
@ -113,9 +103,10 @@ func TestGetTopologyHints(t *testing.T) {
},
},
{
name: "Request 5 CPUs; 4 available on Socket 0, 6 available on Socket 1",
pod: *testPod2,
container: *testContainer2,
name: "Request 5 CPUs, 4 available on NUMA 0, 6 available on NUMA 1",
pod: *testPod2,
container: *testContainer2,
defaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expectedHints: []topologymanager.TopologyHint{
{
NUMANodeAffinity: secondSocketMask,
@ -128,9 +119,10 @@ func TestGetTopologyHints(t *testing.T) {
},
},
{
name: "Request 7 CPUs, 4 available on Socket 0, 6 available on Socket 1",
pod: *testPod3,
container: *testContainer3,
name: "Request 7 CPUs, 4 available on NUMA 0, 6 available on NUMA 1",
pod: *testPod3,
container: *testContainer3,
defaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expectedHints: []topologymanager.TopologyHint{
{
NUMANodeAffinity: crossSocketMask,
@ -139,13 +131,38 @@ func TestGetTopologyHints(t *testing.T) {
},
},
{
name: "Request 11 CPUs, 4 available on Socket 0, 6 available on Socket 1",
name: "Request 11 CPUs, 4 available on NUMA 0, 6 available on NUMA 1",
pod: *testPod4,
container: *testContainer4,
defaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expectedHints: nil,
},
{
name: "Request 2 CPUs, 1 available on NUMA 0, 1 available on NUMA 1",
pod: *testPod1,
container: *testContainer1,
defaultCPUSet: cpuset.NewCPUSet(0, 3),
expectedHints: []topologymanager.TopologyHint{
{
NUMANodeAffinity: crossSocketMask,
Preferred: false,
},
},
},
}
for _, tc := range tcases {
topology, _ := topology.Discover(&machineInfo, numaNodeInfo)
m := manager{
policy: &staticPolicy{
topology: topology,
},
state: &mockState{
defaultCPUSet: tc.defaultCPUSet,
},
topology: topology,
}
hints := m.GetTopologyHints(tc.pod, tc.container)[string(v1.ResourceCPU)]
if len(tc.expectedHints) == 0 && len(hints) == 0 {
continue

View File

@ -8,6 +8,7 @@ go_library(
"manager.go",
"manager_stub.go",
"pod_devices.go",
"topology_hints.go",
"types.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager",
@ -20,7 +21,10 @@ go_library(
"//pkg/kubelet/apis/podresources/v1alpha1:go_default_library",
"//pkg/kubelet/checkpointmanager:go_default_library",
"//pkg/kubelet/checkpointmanager/errors:go_default_library",
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
"//pkg/kubelet/cm/devicemanager/checkpoint:go_default_library",
"//pkg/kubelet/cm/topologymanager:go_default_library",
"//pkg/kubelet/cm/topologymanager/socketmask:go_default_library",
"//pkg/kubelet/config:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/lifecycle:go_default_library",
@ -42,12 +46,15 @@ go_test(
srcs = [
"endpoint_test.go",
"manager_test.go",
"topology_hints_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/kubelet/apis/deviceplugin/v1beta1:go_default_library",
"//pkg/kubelet/apis/pluginregistration/v1:go_default_library",
"//pkg/kubelet/checkpointmanager:go_default_library",
"//pkg/kubelet/cm/topologymanager:go_default_library",
"//pkg/kubelet/cm/topologymanager/socketmask:go_default_library",
"//pkg/kubelet/config:go_default_library",
"//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/pluginmanager:go_default_library",

View File

@ -22,6 +22,7 @@ import (
"net"
"os"
"path/filepath"
"sort"
"sync"
"time"
@ -38,7 +39,10 @@ import (
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
cputopology "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
@ -79,6 +83,9 @@ type ManagerImpl struct {
// e.g. a new device is advertised, two old devices are deleted and a running device fails.
callback monitorCallback
// allDevices is a map by resource name of all the devices currently registered to the device manager
allDevices map[string]map[string]pluginapi.Device
// healthyDevices contains all of the registered healthy resourceNames and their exported device IDs.
healthyDevices map[string]sets.String
@ -91,6 +98,12 @@ type ManagerImpl struct {
// podDevices contains pod to allocated device mapping.
podDevices podDevices
checkpointManager checkpointmanager.CheckpointManager
// List of NUMA Nodes available on the underlying machine
numaNodes []int
// Store of Topology Affinties that the Device Manager can query.
topologyAffinityStore topologymanager.Store
}
type endpointInfo struct {
@ -104,27 +117,35 @@ func (s *sourcesReadyStub) AddSource(source string) {}
func (s *sourcesReadyStub) AllReady() bool { return true }
// NewManagerImpl creates a new manager.
func NewManagerImpl() (*ManagerImpl, error) {
return newManagerImpl(pluginapi.KubeletSocket)
func NewManagerImpl(numaNodeInfo cputopology.NUMANodeInfo, topologyAffinityStore topologymanager.Store) (*ManagerImpl, error) {
return newManagerImpl(pluginapi.KubeletSocket, numaNodeInfo, topologyAffinityStore)
}
func newManagerImpl(socketPath string) (*ManagerImpl, error) {
func newManagerImpl(socketPath string, numaNodeInfo cputopology.NUMANodeInfo, topologyAffinityStore topologymanager.Store) (*ManagerImpl, error) {
klog.V(2).Infof("Creating Device Plugin manager at %s", socketPath)
if socketPath == "" || !filepath.IsAbs(socketPath) {
return nil, fmt.Errorf(errBadSocket+" %s", socketPath)
}
var numaNodes []int
for node := range numaNodeInfo {
numaNodes = append(numaNodes, node)
}
dir, file := filepath.Split(socketPath)
manager := &ManagerImpl{
endpoints: make(map[string]endpointInfo),
socketname: file,
socketdir: dir,
healthyDevices: make(map[string]sets.String),
unhealthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String),
podDevices: make(podDevices),
socketname: file,
socketdir: dir,
allDevices: make(map[string]map[string]pluginapi.Device),
healthyDevices: make(map[string]sets.String),
unhealthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String),
podDevices: make(podDevices),
numaNodes: numaNodes,
topologyAffinityStore: topologyAffinityStore,
}
manager.callback = manager.genericDeviceUpdateCallback
@ -145,7 +166,9 @@ func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, devices [
m.mutex.Lock()
m.healthyDevices[resourceName] = sets.NewString()
m.unhealthyDevices[resourceName] = sets.NewString()
m.allDevices[resourceName] = make(map[string]pluginapi.Device)
for _, dev := range devices {
m.allDevices[resourceName][dev.ID] = dev
if dev.Health == pluginapi.Healthy {
m.healthyDevices[resourceName].Insert(dev.ID)
} else {
@ -633,7 +656,14 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
if available.Len() < needed {
return nil, fmt.Errorf("requested number of devices unavailable for %s. Requested: %d, Available: %d", resource, needed, available.Len())
}
// By default, pull devices from the unsorted list of available devices.
allocated := available.UnsortedList()[:needed]
// If topology alignment is desired, update allocated to the set of devices
// with the best alignment.
hint := m.topologyAffinityStore.GetAffinity(podUID, contName)
if m.deviceHasTopologyAlignment(resource) && hint.NUMANodeAffinity != nil {
allocated = m.takeByTopology(resource, available, hint.NUMANodeAffinity, needed)
}
// Updates m.allocatedDevices with allocated devices to prevent them
// from being allocated to other pods/containers, given that we are
// not holding lock during the rpc call.
@ -644,6 +674,74 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
return devices, nil
}
func (m *ManagerImpl) takeByTopology(resource string, available sets.String, affinity socketmask.SocketMask, request int) []string {
// Build a map of of NUMA Nodes to the devices associated with them. A
// device may be associated to multiple NUMA nodes at the same time. If an
// available device does not have any NUMA Nodes associated with it, add it
// to a list of NUMA Nodes for the fake NUMANode -1.
perNodeDevices := make(map[int]sets.String)
for d := range available {
var nodes []int
if m.allDevices[resource][d].Topology != nil {
for _, node := range m.allDevices[resource][d].Topology.Nodes {
nodes = append(nodes, int(node.ID))
}
}
if len(nodes) == 0 {
nodes = []int{-1}
}
for _, node := range nodes {
if _, ok := perNodeDevices[node]; !ok {
perNodeDevices[node] = sets.NewString()
}
perNodeDevices[node].Insert(d)
}
}
// Get a flat list of all of the nodes associated with available devices.
var nodes []int
for node := range perNodeDevices {
nodes = append(nodes, node)
}
// Sort the list of nodes by how many devices they contain.
sort.Slice(nodes, func(i, j int) bool {
return perNodeDevices[i].Len() < perNodeDevices[j].Len()
})
// Generate three sorted lists of devices. Devices in the first list come
// from valid NUMA Nodes contained in the affinity mask. Devices in the
// second list come from valid NUMA Nodes not in the affinity mask. Devices
// in the third list come from devices with no NUMA Node association (i.e.
// those mapped to the fake NUMA Node -1). Because we loop through the
// sorted list of NUMA nodes in order, within each list, devices are sorted
// by their connection to NUMA Nodes with more devices on them.
var fromAffinity []string
var notFromAffinity []string
var withoutTopology []string
for d := range available {
// Since the same device may be associated with multiple NUMA Nodes. We
// need to be careful not to add each device to multiple lists. The
// logic below ensures this by breaking after the first NUMA node that
// has the device is encountered.
for _, n := range nodes {
if perNodeDevices[n].Has(d) {
if n == -1 {
withoutTopology = append(withoutTopology, d)
} else if affinity.IsSet(n) {
fromAffinity = append(fromAffinity, d)
} else {
notFromAffinity = append(notFromAffinity, d)
}
break
}
}
}
// Concatenate the lists above return the first 'request' devices from it..
return append(append(fromAffinity, notFromAffinity...), withoutTopology...)[:request]
}
// allocateContainerResources attempts to allocate all of required device
// plugin resources for the input container, issues an Allocate rpc request
// for each new device resource requirement, processes their AllocateResponses,

View File

@ -19,6 +19,7 @@ package devicemanager
import (
v1 "k8s.io/api/core/v1"
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
@ -63,6 +64,11 @@ func (h *ManagerStub) GetWatcherHandler() cache.PluginHandler {
return nil
}
// GetTopologyHints returns an empty TopologyHint map
func (h *ManagerStub) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
return map[string][]topologymanager.TopologyHint{}
}
// GetDevices returns nil
func (h *ManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
return nil

View File

@ -37,6 +37,7 @@ import (
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
watcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager"
@ -60,9 +61,10 @@ func tmpSocketDir() (socketDir, socketName, pluginSocketName string, err error)
func TestNewManagerImpl(t *testing.T) {
socketDir, socketName, _, err := tmpSocketDir()
topologyStore := topologymanager.NewFakeManager()
require.NoError(t, err)
defer os.RemoveAll(socketDir)
_, err = newManagerImpl(socketName)
_, err = newManagerImpl(socketName, nil, topologyStore)
require.NoError(t, err)
os.RemoveAll(socketDir)
}
@ -221,7 +223,8 @@ func TestDevicePluginReRegistrationProbeMode(t *testing.T) {
}
func setupDeviceManager(t *testing.T, devs []*pluginapi.Device, callback monitorCallback, socketName string) (Manager, <-chan interface{}) {
m, err := newManagerImpl(socketName)
topologyStore := topologymanager.NewFakeManager()
m, err := newManagerImpl(socketName, nil, topologyStore)
require.NoError(t, err)
updateChan := make(chan interface{})
@ -288,9 +291,10 @@ func cleanup(t *testing.T, m Manager, p *Stub) {
func TestUpdateCapacityAllocatable(t *testing.T) {
socketDir, socketName, _, err := tmpSocketDir()
topologyStore := topologymanager.NewFakeManager()
require.NoError(t, err)
defer os.RemoveAll(socketDir)
testManager, err := newManagerImpl(socketName)
testManager, err := newManagerImpl(socketName, nil, topologyStore)
as := assert.New(t)
as.NotNil(testManager)
as.Nil(err)
@ -594,16 +598,17 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso
return nil, err
}
testManager := &ManagerImpl{
socketdir: tmpDir,
callback: monitorCallback,
healthyDevices: make(map[string]sets.String),
unhealthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String),
endpoints: make(map[string]endpointInfo),
podDevices: make(podDevices),
activePods: activePods,
sourcesReady: &sourcesReadyStub{},
checkpointManager: ckm,
socketdir: tmpDir,
callback: monitorCallback,
healthyDevices: make(map[string]sets.String),
unhealthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String),
endpoints: make(map[string]endpointInfo),
podDevices: make(podDevices),
topologyAffinityStore: topologymanager.NewFakeManager(),
activePods: activePods,
sourcesReady: &sourcesReadyStub{},
checkpointManager: ckm,
}
for _, res := range testRes {

View File

@ -0,0 +1,139 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devicemanager
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask"
)
// GetTopologyHints implements the TopologyManager HintProvider Interface which
// ensures the Device Manager is consulted when Topology Aware Hints for each
// container are created.
func (m *ManagerImpl) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
deviceHints := make(map[string][]topologymanager.TopologyHint)
for resourceObj, requestedObj := range container.Resources.Limits {
resource := string(resourceObj)
requested := int(requestedObj.Value())
if m.isDevicePluginResource(resource) {
if aligned := m.deviceHasTopologyAlignment(resource); !aligned {
klog.Infof("[devicemanager] Resource '%v' does not have a topology preference", resource)
deviceHints[resource] = nil
continue
}
available := m.getAvailableDevices(resource)
if available.Len() < requested {
klog.Errorf("[devicemanager] Unable to generate topology hints: requested number of devices unavailable for '%s': requested: %d, available: %d", resource, requested, available.Len())
deviceHints[resource] = []topologymanager.TopologyHint{}
continue
}
deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, requested)
}
}
return deviceHints
}
func (m *ManagerImpl) deviceHasTopologyAlignment(resource string) bool {
// If any device has Topology set, we assume they care about alignment.
for device := range m.allDevices[resource] {
if m.allDevices[resource][device].Topology != nil {
return true
}
}
return false
}
func (m *ManagerImpl) getAvailableDevices(resource string) sets.String {
// Gets Devices in use.
m.updateAllocatedDevices(m.activePods())
// Strip all devices in use from the list of healthy ones.
return m.healthyDevices[resource].Difference(m.allocatedDevices[resource])
}
func (m *ManagerImpl) generateDeviceTopologyHints(resource string, devices sets.String, request int) []topologymanager.TopologyHint {
// Initialize minAffinitySize to include all NUMA Nodes
minAffinitySize := len(m.numaNodes)
// Iterate through all combinations of NUMA Nodes and build hints from them.
hints := []topologymanager.TopologyHint{}
socketmask.IterateSocketMasks(m.numaNodes, func(mask socketmask.SocketMask) {
// First, update minAffinitySize for the current request size.
devicesInMask := 0
for _, device := range m.allDevices[resource] {
if device.Topology == nil {
continue
}
for _, node := range device.Topology.Nodes {
if mask.IsSet(int(node.ID)) {
devicesInMask++
break
}
}
}
if devicesInMask >= request && mask.Count() < minAffinitySize {
minAffinitySize = mask.Count()
}
// Then check to see if we have enough devices available on the current
// NUMA Node combination to satisfy the device request.
numMatching := 0
for d := range devices {
if m.allDevices[resource][d].Topology == nil {
continue
}
for _, node := range m.allDevices[resource][d].Topology.Nodes {
if mask.IsSet(int(node.ID)) {
numMatching++
break
}
}
}
// If we don't, then move onto the next combination.
if numMatching < request {
return
}
// Otherwise, create a new hint from the NUMA mask and add it to the
// list of hints. We set all hint preferences to 'false' on the first
// pass through.
hints = append(hints, topologymanager.TopologyHint{
NUMANodeAffinity: mask,
Preferred: false,
})
})
// Loop back through all hints and update the 'Preferred' field based on
// counting the number of bits sets in the affinity mask and comparing it
// to the minAffinity. Only those with an equal number of bits set will be
// considered preferred.
for i := range hints {
if hints[i].NUMANodeAffinity.Count() == minAffinitySize {
hints[i].Preferred = true
}
}
return hints
}

View File

@ -0,0 +1,514 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devicemanager
import (
"reflect"
"sort"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask"
)
type mockAffinityStore struct {
hint topologymanager.TopologyHint
}
func (m *mockAffinityStore) GetAffinity(podUID string, containerName string) topologymanager.TopologyHint {
return m.hint
}
func makeNUMADevice(id string, numa int) pluginapi.Device {
return pluginapi.Device{
ID: id,
Topology: &pluginapi.TopologyInfo{Nodes: []*pluginapi.NUMANode{{ID: int64(numa)}}},
}
}
func topologyHintLessThan(a topologymanager.TopologyHint, b topologymanager.TopologyHint) bool {
if a.Preferred != b.Preferred {
return a.Preferred == true
}
return a.NUMANodeAffinity.IsNarrowerThan(b.NUMANodeAffinity)
}
func makeSocketMask(sockets ...int) socketmask.SocketMask {
mask, _ := socketmask.NewSocketMask(sockets...)
return mask
}
func TestGetTopologyHints(t *testing.T) {
tcases := []struct {
description string
request map[string]string
devices map[string][]pluginapi.Device
allocatedDevices map[string][]string
expectedHints map[string][]topologymanager.TopologyHint
}{
{
description: "Single Request, no alignment",
request: map[string]string{
"testdevice": "1",
},
devices: map[string][]pluginapi.Device{
"testdevice": {
{ID: "Dev1"},
{ID: "Dev2"},
},
},
expectedHints: map[string][]topologymanager.TopologyHint{
"testdevice": nil,
},
},
{
description: "Single Request, only one with alignment",
request: map[string]string{
"testdevice": "1",
},
devices: map[string][]pluginapi.Device{
"testdevice": {
{ID: "Dev1"},
makeNUMADevice("Dev2", 1),
},
},
expectedHints: map[string][]topologymanager.TopologyHint{
"testdevice": {
{
NUMANodeAffinity: makeSocketMask(1),
Preferred: true,
},
{
NUMANodeAffinity: makeSocketMask(0, 1),
Preferred: false,
},
},
},
},
{
description: "Single Request, one device per socket",
request: map[string]string{
"testdevice": "1",
},
devices: map[string][]pluginapi.Device{
"testdevice": {
makeNUMADevice("Dev1", 0),
makeNUMADevice("Dev2", 1),
},
},
expectedHints: map[string][]topologymanager.TopologyHint{
"testdevice": {
{
NUMANodeAffinity: makeSocketMask(0),
Preferred: true,
},
{
NUMANodeAffinity: makeSocketMask(1),
Preferred: true,
},
{
NUMANodeAffinity: makeSocketMask(0, 1),
Preferred: false,
},
},
},
},
{
description: "Request for 2, one device per socket",
request: map[string]string{
"testdevice": "2",
},
devices: map[string][]pluginapi.Device{
"testdevice": {
makeNUMADevice("Dev1", 0),
makeNUMADevice("Dev2", 1),
},
},
expectedHints: map[string][]topologymanager.TopologyHint{
"testdevice": {
{
NUMANodeAffinity: makeSocketMask(0, 1),
Preferred: true,
},
},
},
},
{
description: "Request for 2, 2 devices per socket",
request: map[string]string{
"testdevice": "2",
},
devices: map[string][]pluginapi.Device{
"testdevice": {
makeNUMADevice("Dev1", 0),
makeNUMADevice("Dev2", 1),
makeNUMADevice("Dev3", 0),
makeNUMADevice("Dev4", 1),
},
},
expectedHints: map[string][]topologymanager.TopologyHint{
"testdevice": {
{
NUMANodeAffinity: makeSocketMask(0),
Preferred: true,
},
{
NUMANodeAffinity: makeSocketMask(1),
Preferred: true,
},
{
NUMANodeAffinity: makeSocketMask(0, 1),
Preferred: false,
},
},
},
},
{
description: "Request for 2, optimal on 1 NUMA node, forced cross-NUMA",
request: map[string]string{
"testdevice": "2",
},
devices: map[string][]pluginapi.Device{
"testdevice": {
makeNUMADevice("Dev1", 0),
makeNUMADevice("Dev2", 1),
makeNUMADevice("Dev3", 0),
makeNUMADevice("Dev4", 1),
},
},
allocatedDevices: map[string][]string{
"testdevice": {"Dev1", "Dev2"},
},
expectedHints: map[string][]topologymanager.TopologyHint{
"testdevice": {
{
NUMANodeAffinity: makeSocketMask(0, 1),
Preferred: false,
},
},
},
},
{
description: "2 device types, mixed configuration",
request: map[string]string{
"testdevice1": "2",
"testdevice2": "1",
},
devices: map[string][]pluginapi.Device{
"testdevice1": {
makeNUMADevice("Dev1", 0),
makeNUMADevice("Dev2", 1),
makeNUMADevice("Dev3", 0),
makeNUMADevice("Dev4", 1),
},
"testdevice2": {
makeNUMADevice("Dev1", 0),
},
},
expectedHints: map[string][]topologymanager.TopologyHint{
"testdevice1": {
{
NUMANodeAffinity: makeSocketMask(0),
Preferred: true,
},
{
NUMANodeAffinity: makeSocketMask(1),
Preferred: true,
},
{
NUMANodeAffinity: makeSocketMask(0, 1),
Preferred: false,
},
},
"testdevice2": {
{
NUMANodeAffinity: makeSocketMask(0),
Preferred: true,
},
{
NUMANodeAffinity: makeSocketMask(0, 1),
Preferred: false,
},
},
},
},
}
for _, tc := range tcases {
resourceList := v1.ResourceList{}
for r := range tc.request {
resourceList[v1.ResourceName(r)] = resource.MustParse(tc.request[r])
}
pod := makePod(resourceList)
m := ManagerImpl{
allDevices: make(map[string]map[string]pluginapi.Device),
healthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String),
podDevices: make(podDevices),
sourcesReady: &sourcesReadyStub{},
activePods: func() []*v1.Pod { return []*v1.Pod{} },
numaNodes: []int{0, 1},
}
for r := range tc.devices {
m.allDevices[r] = make(map[string]pluginapi.Device)
m.healthyDevices[r] = sets.NewString()
for _, d := range tc.devices[r] {
m.allDevices[r][d.ID] = d
m.healthyDevices[r].Insert(d.ID)
}
}
for r := range tc.allocatedDevices {
m.allocatedDevices[r] = sets.NewString()
for _, d := range tc.allocatedDevices[r] {
m.allocatedDevices[r].Insert(d)
}
}
hints := m.GetTopologyHints(*pod, pod.Spec.Containers[0])
for r := range tc.expectedHints {
sort.SliceStable(hints[r], func(i, j int) bool {
return topologyHintLessThan(hints[r][i], hints[r][j])
})
sort.SliceStable(tc.expectedHints[r], func(i, j int) bool {
return topologyHintLessThan(tc.expectedHints[r][i], tc.expectedHints[r][j])
})
if !reflect.DeepEqual(hints[r], tc.expectedHints[r]) {
t.Errorf("%v: Expected result to be %v, got %v", tc.description, tc.expectedHints[r], hints[r])
}
}
}
}
func TestTopologyAlignedAllocation(t *testing.T) {
tcases := []struct {
description string
resource string
request int
devices []pluginapi.Device
allocatedDevices []string
hint topologymanager.TopologyHint
expectedAllocation int
expectedAlignment map[int]int
}{
{
description: "Single Request, no alignment",
resource: "resource",
request: 1,
devices: []pluginapi.Device{
{ID: "Dev1"},
{ID: "Dev2"},
},
hint: topologymanager.TopologyHint{
NUMANodeAffinity: makeSocketMask(0, 1),
Preferred: true,
},
expectedAllocation: 1,
expectedAlignment: map[int]int{},
},
{
description: "Request for 1, partial alignment",
resource: "resource",
request: 1,
devices: []pluginapi.Device{
{ID: "Dev1"},
makeNUMADevice("Dev2", 1),
},
hint: topologymanager.TopologyHint{
NUMANodeAffinity: makeSocketMask(1),
Preferred: true,
},
expectedAllocation: 1,
expectedAlignment: map[int]int{1: 1},
},
{
description: "Single Request, socket 0",
resource: "resource",
request: 1,
devices: []pluginapi.Device{
makeNUMADevice("Dev1", 0),
makeNUMADevice("Dev2", 1),
},
hint: topologymanager.TopologyHint{
NUMANodeAffinity: makeSocketMask(0),
Preferred: true,
},
expectedAllocation: 1,
expectedAlignment: map[int]int{0: 1},
},
{
description: "Single Request, socket 1",
resource: "resource",
request: 1,
devices: []pluginapi.Device{
makeNUMADevice("Dev1", 0),
makeNUMADevice("Dev2", 1),
},
hint: topologymanager.TopologyHint{
NUMANodeAffinity: makeSocketMask(1),
Preferred: true,
},
expectedAllocation: 1,
expectedAlignment: map[int]int{1: 1},
},
{
description: "Request for 2, socket 0",
resource: "resource",
request: 2,
devices: []pluginapi.Device{
makeNUMADevice("Dev1", 0),
makeNUMADevice("Dev2", 1),
makeNUMADevice("Dev3", 0),
makeNUMADevice("Dev4", 1),
},
hint: topologymanager.TopologyHint{
NUMANodeAffinity: makeSocketMask(0),
Preferred: true,
},
expectedAllocation: 2,
expectedAlignment: map[int]int{0: 2},
},
{
description: "Request for 2, socket 1",
resource: "resource",
request: 2,
devices: []pluginapi.Device{
makeNUMADevice("Dev1", 0),
makeNUMADevice("Dev2", 1),
makeNUMADevice("Dev3", 0),
makeNUMADevice("Dev4", 1),
},
hint: topologymanager.TopologyHint{
NUMANodeAffinity: makeSocketMask(1),
Preferred: true,
},
expectedAllocation: 2,
expectedAlignment: map[int]int{1: 2},
},
{
description: "Request for 4, unsatisfiable, prefer socket 0",
resource: "resource",
request: 4,
devices: []pluginapi.Device{
makeNUMADevice("Dev1", 0),
makeNUMADevice("Dev2", 1),
makeNUMADevice("Dev3", 0),
makeNUMADevice("Dev4", 1),
makeNUMADevice("Dev5", 0),
makeNUMADevice("Dev6", 1),
},
hint: topologymanager.TopologyHint{
NUMANodeAffinity: makeSocketMask(0),
Preferred: true,
},
expectedAllocation: 4,
expectedAlignment: map[int]int{0: 3, 1: 1},
},
{
description: "Request for 4, unsatisfiable, prefer socket 1",
resource: "resource",
request: 4,
devices: []pluginapi.Device{
makeNUMADevice("Dev1", 0),
makeNUMADevice("Dev2", 1),
makeNUMADevice("Dev3", 0),
makeNUMADevice("Dev4", 1),
makeNUMADevice("Dev5", 0),
makeNUMADevice("Dev6", 1),
},
hint: topologymanager.TopologyHint{
NUMANodeAffinity: makeSocketMask(1),
Preferred: true,
},
expectedAllocation: 4,
expectedAlignment: map[int]int{0: 1, 1: 3},
},
{
description: "Request for 4, multisocket",
resource: "resource",
request: 4,
devices: []pluginapi.Device{
makeNUMADevice("Dev1", 0),
makeNUMADevice("Dev2", 1),
makeNUMADevice("Dev3", 2),
makeNUMADevice("Dev4", 3),
makeNUMADevice("Dev5", 0),
makeNUMADevice("Dev6", 1),
makeNUMADevice("Dev7", 2),
makeNUMADevice("Dev8", 3),
},
hint: topologymanager.TopologyHint{
NUMANodeAffinity: makeSocketMask(1, 3),
Preferred: true,
},
expectedAllocation: 4,
expectedAlignment: map[int]int{1: 2, 3: 2},
},
}
for _, tc := range tcases {
m := ManagerImpl{
allDevices: make(map[string]map[string]pluginapi.Device),
healthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String),
podDevices: make(podDevices),
sourcesReady: &sourcesReadyStub{},
activePods: func() []*v1.Pod { return []*v1.Pod{} },
topologyAffinityStore: &mockAffinityStore{tc.hint},
}
m.allDevices[tc.resource] = make(map[string]pluginapi.Device)
m.healthyDevices[tc.resource] = sets.NewString()
for _, d := range tc.devices {
m.allDevices[tc.resource][d.ID] = d
m.healthyDevices[tc.resource].Insert(d.ID)
}
allocated, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.NewString())
if err != nil {
t.Errorf("Unexpected error: %v", err)
continue
}
if len(allocated) != tc.expectedAllocation {
t.Errorf("%v. expected allocation: %v but got: %v", tc.description, tc.expectedAllocation, len(allocated))
}
alignment := make(map[int]int)
if m.deviceHasTopologyAlignment(tc.resource) {
for d := range allocated {
if m.allDevices[tc.resource][d].Topology != nil {
alignment[int(m.allDevices[tc.resource][d].Topology.Nodes[0].ID)]++
}
}
}
if !reflect.DeepEqual(alignment, tc.expectedAlignment) {
t.Errorf("%v. expected alignment: %v but got: %v", tc.description, tc.expectedAlignment, alignment)
}
}
}

View File

@ -21,6 +21,7 @@ import (
v1 "k8s.io/api/core/v1"
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
@ -63,6 +64,10 @@ type Manager interface {
// depending on the checkpoint file availability. Absence of the checkpoint file strongly indicates
// the node has been recreated.
ShouldResetExtendedResourceCapacity() bool
// TopologyManager HintProvider provider indicates the Device Manager implements the Topology Manager Interface
// and is consulted to make Topology aware resource alignments
GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint
}
// DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices.