Skip to content

Commit c5e776c

Browse files
authored
✨ Manifest completion (#1033)
* Skip manifests in work reconcile that are marked Complete Signed-off-by: Ben Perry <[email protected]> * Aggregate Complete condition to work from manifests Signed-off-by: Ben Perry <[email protected]> * Delete work that is complete and satisfies configured TTL Signed-off-by: Ben Perry <[email protected]> * tests Signed-off-by: Ben Perry <[email protected]> * lint Signed-off-by: Ben Perry <[email protected]> * go.mod Signed-off-by: Ben Perry <[email protected]> * Helper funcs for conditions Signed-off-by: Ben Perry <[email protected]> * Generic condition aggregation Signed-off-by: Ben Perry <[email protected]> * Support integration test args Signed-off-by: Ben Perry <[email protected]> * Remove work deletion from spoke, will be moved to hub GC Signed-off-by: Ben Perry <[email protected]> * Cleanup Signed-off-by: Ben Perry <[email protected]> * update api Signed-off-by: Ben Perry <[email protected]> * Wait for NS to exist before testing Signed-off-by: Ben Perry <[email protected]> --------- Signed-off-by: Ben Perry <[email protected]>
1 parent 7123675 commit c5e776c

File tree

17 files changed

+575
-167
lines changed

17 files changed

+575
-167
lines changed

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ require (
4040
k8s.io/kubectl v0.32.2
4141
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
4242
open-cluster-management.io/addon-framework v1.0.0
43-
open-cluster-management.io/api v1.0.0
43+
open-cluster-management.io/api v1.0.1-0.20250703232537-f781272f812e
4444
open-cluster-management.io/sdk-go v1.0.1-0.20250708024404-422b23814b5d
4545
sigs.k8s.io/about-api v0.0.0-20250131010323-518069c31c03
4646
sigs.k8s.io/cluster-inventory-api v0.0.0-20240730014211-ef0154379848

go.sum

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -491,8 +491,8 @@ k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6J
491491
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
492492
open-cluster-management.io/addon-framework v1.0.0 h1:ejTk4hPAJnwCSxQhY/tVDPg3SeH91lVfWqSaJcYiKwg=
493493
open-cluster-management.io/addon-framework v1.0.0/go.mod h1:Gw9zRGvuNJJ3XhTYanIuA7FFFw0EjtoE74l5OBZCZf8=
494-
open-cluster-management.io/api v1.0.0 h1:54QllH9DTudCk6VrGt0q8CDsE3MghqJeTaTN4RHZpE0=
495-
open-cluster-management.io/api v1.0.0/go.mod h1:/OeqXycNBZQoe3WG6ghuWsMgsKGuMZrK8ZpsU6gWL0Y=
494+
open-cluster-management.io/api v1.0.1-0.20250703232537-f781272f812e h1:CHPatj3lW7pLaI8wWNbXkiWdYScHusu+YV0cVawxMw4=
495+
open-cluster-management.io/api v1.0.1-0.20250703232537-f781272f812e/go.mod h1:KEj/4wbUjdbWktrKLL8+mWzAIzE6Ii3bcRr4CvnBNEg=
496496
open-cluster-management.io/sdk-go v1.0.1-0.20250708024404-422b23814b5d h1:sYgNfYyQ6O7sfiVOUaMuoK/CTeWnTNTfVKY8dWORBgw=
497497
open-cluster-management.io/sdk-go v1.0.1-0.20250708024404-422b23814b5d/go.mod h1:LYX48E3h96XGnm6o+GomV0DSf15w1i9crtggj2HeDvI=
498498
sigs.k8s.io/about-api v0.0.0-20250131010323-518069c31c03 h1:1ShFiMjGQOR/8jTBkmZrk1gORxnvMwm1nOy2/DbHg4U=

pkg/work/helper/helpers.go

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -397,6 +397,19 @@ func FindManifestConfiguration(resourceMeta workapiv1.ManifestResourceMeta,
397397
return rstOption
398398
}
399399

400+
// FindManifestCondition searches the slice for a manifest that matches the given resource metadata
401+
func FindManifestCondition(resourceMeta workapiv1.ManifestResourceMeta, manifestConditions []workapiv1.ManifestCondition) *workapiv1.ManifestCondition {
402+
resourceMeta = resetOrdinal(resourceMeta)
403+
for i := range manifestConditions {
404+
manifestCondition := &manifestConditions[i]
405+
if resourceMeta != resetOrdinal(manifestCondition.ResourceMeta) {
406+
continue
407+
}
408+
return manifestCondition
409+
}
410+
return nil
411+
}
412+
400413
func ApplyOwnerReferences(ctx context.Context, dynamicClient dynamic.Interface, gvr schema.GroupVersionResource,
401414
existing runtime.Object, requiredOwner metav1.OwnerReference) error {
402415
accessor, err := meta.Accessor(existing)

pkg/work/spoke/conditions/helpers.go

Lines changed: 155 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,155 @@
1+
package conditions
2+
3+
import (
4+
"fmt"
5+
"slices"
6+
"strings"
7+
8+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9+
10+
workapiv1 "open-cluster-management.io/api/work/v1"
11+
)
12+
13+
const (
14+
// conditionRuleReasonPrefix is used to identify conditions that were generated by condition rules
15+
conditionRuleReasonPrefix = "ConditionRule"
16+
)
17+
18+
// IsConditionGeneratedByConditionRule determines if a condition was created by condition rules
19+
func IsConditionGeneratedByConditionRule(condition metav1.Condition) bool {
20+
return strings.HasPrefix(condition.Reason, conditionRuleReasonPrefix)
21+
}
22+
23+
// PruneConditionsGeneratedByConditionRules removes old conditions that were generated by a ConditionRule
24+
// but have not been updated since that latest ManifestWork generation. Removing a ConditionRule will
25+
// always update the ManifestWork generation, so any conditions generated by conditions rules from a
26+
// previous generation must have been from a deleted rule.
27+
func PruneConditionsGeneratedByConditionRules(conditions *[]metav1.Condition, workGeneration int64) {
28+
*conditions = slices.DeleteFunc(*conditions, func(c metav1.Condition) bool {
29+
return IsConditionGeneratedByConditionRule(c) && c.ObservedGeneration != workGeneration
30+
})
31+
}
32+
33+
// AggregateManifestConditions collects conditions from manifests and combines them into top level
34+
// conditions for the ManifestWork. Only the Available condition and conditions generated by condition rules
35+
// are aggregated.
36+
func AggregateManifestConditions(generation int64, manifests []workapiv1.ManifestCondition) []metav1.Condition {
37+
aggregated := map[string]aggregateCondition{
38+
// Always set Available condition, even if there are no manifests
39+
workapiv1.ManifestAvailable: {},
40+
}
41+
42+
// Collect conditions by type
43+
for _, manifest := range manifests {
44+
for _, condition := range manifest.Conditions {
45+
if condition.Type == workapiv1.ManifestAvailable || IsConditionGeneratedByConditionRule(condition) {
46+
aggCondition := aggregated[condition.Type]
47+
switch condition.Status {
48+
case metav1.ConditionFalse:
49+
aggCondition.numFalse++
50+
case metav1.ConditionTrue:
51+
aggCondition.numTrue++
52+
case metav1.ConditionUnknown:
53+
aggCondition.numUnknown++
54+
}
55+
56+
if condition.Message != "" {
57+
aggCondition.message = condition.Message
58+
}
59+
60+
aggregated[condition.Type] = aggCondition
61+
}
62+
}
63+
}
64+
65+
// Create aggregate conditions
66+
conditions := make([]metav1.Condition, 0, len(aggregated))
67+
for conditionType, aggCondition := range aggregated {
68+
var condition metav1.Condition
69+
switch conditionType {
70+
case workapiv1.ManifestAvailable:
71+
condition = newAggregateAvailableCondition(aggCondition, generation, len(manifests))
72+
default:
73+
condition = newAggregateConditionFromRules(conditionType, generation, aggCondition)
74+
}
75+
76+
conditions = append(conditions, condition)
77+
}
78+
79+
// Ensure consistent ordering of aggregated conditions
80+
slices.SortFunc(conditions, func(a, b metav1.Condition) int {
81+
return strings.Compare(a.Type, b.Type)
82+
})
83+
return conditions
84+
}
85+
86+
type aggregateCondition struct {
87+
numTrue, numFalse, numUnknown int
88+
message string
89+
}
90+
91+
func newAggregateAvailableCondition(aggCondition aggregateCondition, generation int64, total int) metav1.Condition {
92+
switch {
93+
case aggCondition.numFalse > 0:
94+
return metav1.Condition{
95+
Type: workapiv1.WorkAvailable,
96+
Status: metav1.ConditionFalse,
97+
Reason: "ResourcesNotAvailable",
98+
ObservedGeneration: generation,
99+
Message: fmt.Sprintf("%d of %d resources are not available", aggCondition.numFalse, total),
100+
}
101+
case aggCondition.numUnknown > 0:
102+
return metav1.Condition{
103+
Type: workapiv1.WorkAvailable,
104+
Status: metav1.ConditionUnknown,
105+
Reason: "ResourcesStatusUnknown",
106+
ObservedGeneration: generation,
107+
Message: fmt.Sprintf("%d of %d resources have unknown status", aggCondition.numUnknown, total),
108+
}
109+
case aggCondition.numTrue == 0:
110+
return metav1.Condition{
111+
Type: workapiv1.WorkAvailable,
112+
Status: metav1.ConditionUnknown,
113+
Reason: "ResourcesStatusUnknown",
114+
ObservedGeneration: generation,
115+
Message: "cannot get any available resource ",
116+
}
117+
default:
118+
return metav1.Condition{
119+
Type: workapiv1.WorkAvailable,
120+
Status: metav1.ConditionTrue,
121+
Reason: "ResourcesAvailable",
122+
ObservedGeneration: generation,
123+
Message: "All resources are available",
124+
}
125+
}
126+
}
127+
128+
func newAggregateConditionFromRules(conditionType string, generation int64, agg aggregateCondition) metav1.Condition {
129+
var status metav1.ConditionStatus
130+
switch {
131+
case agg.numFalse > 0:
132+
status = metav1.ConditionFalse
133+
case agg.numUnknown > 0 || agg.numTrue == 0:
134+
status = metav1.ConditionUnknown
135+
default:
136+
status = metav1.ConditionTrue
137+
}
138+
139+
// Use manifest condition message if there is only one, otherwise use generic message
140+
numTotal := agg.numFalse + agg.numTrue + agg.numUnknown
141+
var message string
142+
if numTotal == 1 && agg.message != "" {
143+
message = agg.message
144+
} else {
145+
message = fmt.Sprintf("Aggregated from %d manifest conditions", numTotal)
146+
}
147+
148+
return metav1.Condition{
149+
Type: conditionType,
150+
Status: status,
151+
Reason: "ConditionRulesAggregated",
152+
ObservedGeneration: generation,
153+
Message: message,
154+
}
155+
}

pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,11 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac
133133
return nil
134134
}
135135

136+
// work that is completed does not receive any updates
137+
if meta.IsStatusConditionTrue(manifestWork.Status.Conditions, workapiv1.WorkComplete) {
138+
return nil
139+
}
140+
136141
// Apply appliedManifestWork
137142
appliedManifestWork, err := m.applyAppliedManifestWork(ctx, manifestWork.Name, m.hubHash, m.agentID)
138143
if err != nil {

pkg/work/spoke/controllers/manifestcontroller/manifestwork_reconciler.go

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ func (m *manifestworkReconciler) reconcile(
5252
resourceResults := make([]applyResult, len(manifestWork.Spec.Workload.Manifests))
5353
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
5454
resourceResults = m.applyManifests(
55-
ctx, manifestWork.Spec.Workload.Manifests, manifestWork.Spec, controllerContext.Recorder(), *owner, resourceResults)
55+
ctx, manifestWork.Spec.Workload.Manifests, manifestWork.Spec, manifestWork.Status, controllerContext.Recorder(), *owner, resourceResults)
5656

5757
for _, result := range resourceResults {
5858
if apierrors.IsConflict(result.Error) {
@@ -133,6 +133,7 @@ func (m *manifestworkReconciler) applyManifests(
133133
ctx context.Context,
134134
manifests []workapiv1.Manifest,
135135
workSpec workapiv1.ManifestWorkSpec,
136+
workStatus workapiv1.ManifestWorkStatus,
136137
recorder events.Recorder,
137138
owner metav1.OwnerReference,
138139
existingResults []applyResult) []applyResult {
@@ -141,10 +142,10 @@ func (m *manifestworkReconciler) applyManifests(
141142
switch {
142143
case existingResults[index].Result == nil:
143144
// Apply if there is no result.
144-
existingResults[index] = m.applyOneManifest(ctx, index, manifest, workSpec, recorder, owner)
145+
existingResults[index] = m.applyOneManifest(ctx, index, manifest, workSpec, workStatus, recorder, owner)
145146
case apierrors.IsConflict(existingResults[index].Error):
146147
// Apply if there is a resource conflict error.
147-
existingResults[index] = m.applyOneManifest(ctx, index, manifest, workSpec, recorder, owner)
148+
existingResults[index] = m.applyOneManifest(ctx, index, manifest, workSpec, workStatus, recorder, owner)
148149
}
149150
}
150151

@@ -156,6 +157,7 @@ func (m *manifestworkReconciler) applyOneManifest(
156157
index int,
157158
manifest workapiv1.Manifest,
158159
workSpec workapiv1.ManifestWorkSpec,
160+
workStatus workapiv1.ManifestWorkStatus,
159161
recorder events.Recorder,
160162
owner metav1.OwnerReference) applyResult {
161163

@@ -181,6 +183,15 @@ func (m *manifestworkReconciler) applyOneManifest(
181183
return result
182184
}
183185

186+
// manifests with the Complete condition are not updated
187+
manifestCondition := helper.FindManifestCondition(resMeta, workStatus.ResourceStatus.Manifests)
188+
if manifestCondition != nil {
189+
if meta.IsStatusConditionTrue(manifestCondition.Conditions, workapiv1.ManifestComplete) {
190+
result.Result = required
191+
return result
192+
}
193+
}
194+
184195
// check if the resource to be applied should be owned by the manifest work
185196
ownedByTheWork := helper.OwnedByTheWork(gvr, resMeta.Namespace, resMeta.Name, workSpec.DeleteOption)
186197

0 commit comments

Comments
 (0)