Skip to content

Commit 62bd361

Browse files
committed
Add multiple roles support
Signed-off-by: Markus Blaschke <[email protected]>
1 parent 66e0095 commit 62bd361

File tree

4 files changed

+21
-38
lines changed

4 files changed

+21
-38
lines changed

config/config.go

Lines changed: 10 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ type (
3131
}
3232

3333
PoolConfigNode struct {
34-
Role *string `yaml:"role"`
34+
Roles *[]string `yaml:"roles"`
3535
ConfigSource *PoolConfigNodeConfigSource `yaml:"configSource"`
3636
Labels *map[string]string `yaml:"labels"`
3737
Annotations *map[string]string `yaml:"annotations"`
@@ -107,20 +107,15 @@ func (p *PoolConfig) IsMatchingNode(node *corev1.Node) (bool, error) {
107107
func (p *PoolConfig) CreateJsonPatchSet() (patches []k8s.JsonPatch) {
108108
patches = []k8s.JsonPatch{}
109109

110-
if p.Node.Role != nil {
111-
label := "kubernetes.io/role"
112-
patches = append(patches, k8s.JsonPatchString{
113-
Op: "replace",
114-
Path: fmt.Sprintf("/metadata/labels/%s", k8s.PatchPathEsacpe(label)),
115-
Value: *p.Node.Role,
116-
})
117-
118-
label = fmt.Sprintf("node-role.kubernetes.io/%s", *p.Node.Role)
119-
patches = append(patches, k8s.JsonPatchString{
120-
Op: "replace",
121-
Path: fmt.Sprintf("/metadata/labels/%s", k8s.PatchPathEsacpe(label)),
122-
Value: "",
123-
})
110+
if p.Node.Roles != nil {
111+
for _, role := range *p.Node.Roles {
112+
label := fmt.Sprintf("node-role.kubernetes.io/%s", role)
113+
patches = append(patches, k8s.JsonPatchString{
114+
Op: "replace",
115+
Path: fmt.Sprintf("/metadata/labels/%s", k8s.PatchPathEsacpe(label)),
116+
Value: "",
117+
})
118+
}
124119
}
125120

126121
if p.Node.ConfigSource != nil {

deployment/config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ data:
1111
- path: "{.spec.providerID}"
1212
regexp: "^.+virtualMachineScaleSets\\/aks-agents-35471996-vmss\\/.+$"
1313
node:
14-
role: agents
14+
roles: [agents]
1515
#configSource:
1616
# configMap:
1717
# name: kubelet-config

example.yaml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ pools:
66
match: "linux"
77
node:
88
# sets the kubernetes node role
9-
role: linux
9+
roles: [linux]
1010

1111
- pool: windows
1212
continue: true
@@ -15,7 +15,7 @@ pools:
1515
match: "windows"
1616
node:
1717
# sets the kubernetes node role
18-
role: windows
18+
role: [windows]
1919

2020
- pool: agents
2121
selector:
@@ -24,7 +24,7 @@ pools:
2424
match: "azure:///subscriptions/d86bcf13-ddf7-45ea-82f1-6f656767a318/resourceGroups/mc_k8s_mblaschke_westeurope/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agents-35471996-vmss/virtualMachines/30"
2525
node:
2626
# sets the kubernetes node role
27-
role: testing
27+
roles: [agent,foobar]
2828

2929
# dynamic kubelet configuration
3030
# see https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/
@@ -49,7 +49,7 @@ pools:
4949
regexp: "^.+virtualMachineScaleSets\\/aks-agents-35471996-vmss\\/.+$"
5050
node:
5151
# sets the kubernetes node role
52-
role: testing
52+
roles: [agent,regexp]
5353

5454
# dynamic kubelet configuration
5555
# see https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/

manager/manager.go

Lines changed: 6 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -94,50 +94,38 @@ func (m *KubePoolManager) startWatch() {
9494
}()
9595
}
9696

97-
func (m *KubePoolManager) applyToAll() error {
98-
result, err := m.k8sClient.CoreV1().Nodes().List(m.ctx, metav1.ListOptions{})
99-
if err != nil {
100-
return err
101-
}
102-
103-
for _, node := range result.Items {
104-
m.applyNode(&node)
105-
}
106-
107-
return nil
108-
}
109-
11097
func (m *KubePoolManager) applyNode(node *corev1.Node) {
11198
contextLogger := log.WithField("node", node.Name)
11299

113100
m.prometheus.nodeApplied.WithLabelValues(node.Name).Set(0)
114101

115102
for _, poolConfig := range m.Config.Pools {
103+
poolLogger := contextLogger.WithField("pool", poolConfig.Name)
116104
matching, err := poolConfig.IsMatchingNode(node)
117105
if err != nil {
118106
log.Panic(err)
119107
}
120108

121109
if matching {
122-
contextLogger.Infof("Node \"%s\" matches pool configuration \"%s\", applying pool config", node.Name, poolConfig.Name)
110+
poolLogger.Infof("applying pool \"%s\" to node \"%s\"", poolConfig.Name, node.Name)
123111

124112
// create json patch
125113
patchSet := poolConfig.CreateJsonPatchSet()
126114
patchBytes, patchErr := json.Marshal(patchSet)
127115
if patchErr != nil {
128-
contextLogger.Errorf("failed to create json patch: %v", err)
116+
poolLogger.Errorf("failed to create json patch: %v", err)
129117
return
130118
}
131119

132120
if !m.Opts.DryRun {
133121
// patch node
134122
_, k8sError := m.k8sClient.CoreV1().Nodes().Patch(m.ctx, node.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
135123
if k8sError != nil {
136-
contextLogger.Errorf("failed to apply json patch: %v", k8sError)
124+
poolLogger.Errorf("failed to apply json patch: %v", k8sError)
137125
return
138126
}
139127
} else {
140-
contextLogger.Infof("Not applying pool config, dry-run active")
128+
poolLogger.Infof("Not applying pool config, dry-run active")
141129
}
142130

143131
m.prometheus.nodeApplied.WithLabelValues(node.Name).Set(1)
@@ -147,7 +135,7 @@ func (m *KubePoolManager) applyNode(node *corev1.Node) {
147135
break
148136
}
149137
} else {
150-
contextLogger.Debugf("Node NOT matches pool configuration \"%s\"", poolConfig.Name)
138+
poolLogger.Debugf("Node NOT matches pool configuration \"%s\"", poolConfig.Name)
151139
}
152140
}
153141
}

0 commit comments

Comments
 (0)