Skip to content

Commit aa45393

Browse files
committed
replace constant values to pass test
Signed-off-by: sivchari <[email protected]>
1 parent 81daf58 commit aa45393

File tree

5 files changed

+177
-177
lines changed

5 files changed

+177
-177
lines changed

test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -61,23 +61,23 @@ func (r *ClusterBackEndReconciler) ReconcileNormal(ctx context.Context, cluster
6161
dockerCluster.Spec.Backend.Docker.LoadBalancer.ImageTag,
6262
strconv.Itoa(dockerCluster.Spec.ControlPlaneEndpoint.Port))
6363
if err != nil {
64-
v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error())
64+
v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableV1Beta1Condition, infrav1.LoadBalancerProvisioningFailedV1Beta1Reason, clusterv1.ConditionSeverityWarning, "%s", err.Error())
6565
conditions.Set(dockerCluster, metav1.Condition{
66-
Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition,
66+
Type: infrav1.DevClusterDockerLoadBalancerAvailableCondition,
6767
Status: metav1.ConditionFalse,
68-
Reason: infrav1.DevClusterDockerLoadBalancerNotAvailableV1Beta2Reason,
68+
Reason: infrav1.DevClusterDockerLoadBalancerNotAvailableReason,
6969
Message: fmt.Sprintf("Failed to create helper for managing the externalLoadBalancer: %v", err),
7070
})
7171
return ctrl.Result{}, errors.Wrapf(err, "failed to create helper for managing the externalLoadBalancer")
7272
}
7373

7474
// Create the docker container hosting the load balancer.
7575
if err := externalLoadBalancer.Create(ctx); err != nil {
76-
v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error())
76+
v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableV1Beta1Condition, infrav1.LoadBalancerProvisioningFailedV1Beta1Reason, clusterv1.ConditionSeverityWarning, "%s", err.Error())
7777
conditions.Set(dockerCluster, metav1.Condition{
78-
Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition,
78+
Type: infrav1.DevClusterDockerLoadBalancerAvailableCondition,
7979
Status: metav1.ConditionFalse,
80-
Reason: infrav1.DevClusterDockerLoadBalancerNotAvailableV1Beta2Reason,
80+
Reason: infrav1.DevClusterDockerLoadBalancerNotAvailableReason,
8181
Message: fmt.Sprintf("Failed to create load balancer: %v", err),
8282
})
8383
return ctrl.Result{}, errors.Wrap(err, "failed to create load balancer")
@@ -86,11 +86,11 @@ func (r *ClusterBackEndReconciler) ReconcileNormal(ctx context.Context, cluster
8686
// Set APIEndpoints with the load balancer IP so the Cluster API Cluster Controller can pull it
8787
lbIP, err := externalLoadBalancer.IP(ctx)
8888
if err != nil {
89-
v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error())
89+
v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableV1Beta1Condition, infrav1.LoadBalancerProvisioningFailedV1Beta1Reason, clusterv1.ConditionSeverityWarning, "%s", err.Error())
9090
conditions.Set(dockerCluster, metav1.Condition{
91-
Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition,
91+
Type: infrav1.DevClusterDockerLoadBalancerAvailableCondition,
9292
Status: metav1.ConditionFalse,
93-
Reason: infrav1.DevClusterDockerLoadBalancerNotAvailableV1Beta2Reason,
93+
Reason: infrav1.DevClusterDockerLoadBalancerNotAvailableReason,
9494
Message: fmt.Sprintf("Failed to get ip for the load balancer: %v", err),
9595
})
9696
return ctrl.Result{}, errors.Wrap(err, "failed to get ip for the load balancer")
@@ -104,11 +104,11 @@ func (r *ClusterBackEndReconciler) ReconcileNormal(ctx context.Context, cluster
104104

105105
// Mark the dockerCluster ready
106106
dockerCluster.Status.Ready = true
107-
v1beta1conditions.MarkTrue(dockerCluster, infrav1.LoadBalancerAvailableCondition)
107+
v1beta1conditions.MarkTrue(dockerCluster, infrav1.LoadBalancerAvailableV1Beta1Condition)
108108
conditions.Set(dockerCluster, metav1.Condition{
109-
Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition,
109+
Type: infrav1.DevClusterDockerLoadBalancerAvailableCondition,
110110
Status: metav1.ConditionTrue,
111-
Reason: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Reason,
111+
Reason: infrav1.DevClusterDockerLoadBalancerAvailableReason,
112112
})
113113

114114
return ctrl.Result{}, nil
@@ -126,11 +126,11 @@ func (r *ClusterBackEndReconciler) ReconcileDelete(ctx context.Context, cluster
126126
dockerCluster.Spec.Backend.Docker.LoadBalancer.ImageTag,
127127
strconv.Itoa(dockerCluster.Spec.ControlPlaneEndpoint.Port))
128128
if err != nil {
129-
v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error())
129+
v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableV1Beta1Condition, infrav1.LoadBalancerProvisioningFailedV1Beta1Reason, clusterv1.ConditionSeverityWarning, "%s", err.Error())
130130
conditions.Set(dockerCluster, metav1.Condition{
131-
Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition,
131+
Type: infrav1.DevClusterDockerLoadBalancerAvailableCondition,
132132
Status: metav1.ConditionFalse,
133-
Reason: infrav1.DevClusterDockerLoadBalancerNotAvailableV1Beta2Reason,
133+
Reason: infrav1.DevClusterDockerLoadBalancerNotAvailableReason,
134134
Message: fmt.Sprintf("Failed to create helper for managing the externalLoadBalancer: %v", err),
135135
})
136136

@@ -140,12 +140,12 @@ func (r *ClusterBackEndReconciler) ReconcileDelete(ctx context.Context, cluster
140140
// Set the LoadBalancerAvailableCondition reporting delete is started, and requeue in order to make
141141
// this visible to the users.
142142
// TODO (v1beta2): test for v1beta2 conditions
143-
if v1beta1conditions.GetReason(dockerCluster, infrav1.LoadBalancerAvailableCondition) != clusterv1.DeletingV1Beta1Reason {
144-
v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, clusterv1.DeletingV1Beta1Reason, clusterv1.ConditionSeverityInfo, "")
143+
if v1beta1conditions.GetReason(dockerCluster, infrav1.LoadBalancerAvailableV1Beta1Condition) != clusterv1.DeletingV1Beta1Reason {
144+
v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableV1Beta1Condition, clusterv1.DeletingV1Beta1Reason, clusterv1.ConditionSeverityInfo, "")
145145
conditions.Set(dockerCluster, metav1.Condition{
146-
Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition,
146+
Type: infrav1.DevClusterDockerLoadBalancerAvailableCondition,
147147
Status: metav1.ConditionFalse,
148-
Reason: infrav1.DevClusterDockerLoadBalancerDeletingV1Beta2Reason,
148+
Reason: infrav1.DevClusterDockerLoadBalancerDeletingReason,
149149
})
150150
return ctrl.Result{RequeueAfter: 1 * time.Second}, nil
151151
}
@@ -171,27 +171,27 @@ func (r *ClusterBackEndReconciler) PatchDevCluster(ctx context.Context, patchHel
171171
// A step counter is added to represent progress during the provisioning process (instead we are hiding it during the deletion process).
172172
v1beta1conditions.SetSummary(dockerCluster,
173173
v1beta1conditions.WithConditions(
174-
infrav1.LoadBalancerAvailableCondition,
174+
infrav1.LoadBalancerAvailableV1Beta1Condition,
175175
),
176176
v1beta1conditions.WithStepCounterIf(dockerCluster.DeletionTimestamp.IsZero()),
177177
)
178-
if err := conditions.SetSummaryCondition(dockerCluster, dockerCluster, infrav1.DevClusterReadyV1Beta2Condition,
178+
if err := conditions.SetSummaryCondition(dockerCluster, dockerCluster, infrav1.DevClusterReadyCondition,
179179
conditions.ForConditionTypes{
180-
infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition,
180+
infrav1.DevClusterDockerLoadBalancerAvailableCondition,
181181
},
182182
// Using a custom merge strategy to override reasons applied during merge.
183183
conditions.CustomMergeStrategy{
184184
MergeStrategy: conditions.DefaultMergeStrategy(
185185
// Use custom reasons.
186186
conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc(
187-
infrav1.DevClusterNotReadyV1Beta2Reason,
188-
infrav1.DevClusterReadyUnknownV1Beta2Reason,
189-
infrav1.DevClusterReadyV1Beta2Reason,
187+
infrav1.DevClusterNotReadyReason,
188+
infrav1.DevClusterReadyUnknownReason,
189+
infrav1.DevClusterReadyReason,
190190
)),
191191
),
192192
},
193193
); err != nil {
194-
return errors.Wrapf(err, "failed to set %s condition", infrav1.DevClusterReadyV1Beta2Condition)
194+
return errors.Wrapf(err, "failed to set %s condition", infrav1.DevClusterReadyCondition)
195195
}
196196

197197
// Patch the object, ignoring conflicts on the conditions owned by this controller.
@@ -200,12 +200,12 @@ func (r *ClusterBackEndReconciler) PatchDevCluster(ctx context.Context, patchHel
200200
dockerCluster,
201201
patch.WithOwnedV1Beta1Conditions{Conditions: []clusterv1.ConditionType{
202202
clusterv1.ReadyV1Beta1Condition,
203-
infrav1.LoadBalancerAvailableCondition,
203+
infrav1.LoadBalancerAvailableV1Beta1Condition,
204204
}},
205205
patch.WithOwnedConditions{Conditions: []string{
206206
clusterv1.PausedCondition,
207-
infrav1.DevClusterReadyV1Beta2Condition,
208-
infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition,
207+
infrav1.DevClusterReadyCondition,
208+
infrav1.DevClusterDockerLoadBalancerAvailableCondition,
209209
}},
210210
)
211211
}

0 commit comments

Comments
 (0)