Skip to content

Commit 983136c

Browse files
committed
test-integration: Add priority ordering verification for BPF links
Add integration tests to verify BPF program links are correctly ordered by priority across XDP, TC, and TCX program types. The new verification framework validates link ordering on each cluster node by comparing ClusterBpfApplicationState data against actual bpfman daemon state. Signed-off-by: Andreas Karis <[email protected]>
1 parent 6608677 commit 983136c

File tree

5 files changed

+395
-21
lines changed

5 files changed

+395
-21
lines changed

test/integration/common.go

Lines changed: 231 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,23 @@ package integration
55

66
import (
77
"bytes"
8+
"fmt"
89
"regexp"
10+
"slices"
911
"strconv"
1012
"strings"
1113
"testing"
1214

15+
"github.com/bpfman/bpfman-operator/apis/v1alpha1"
1316
"github.com/stretchr/testify/require"
17+
"k8s.io/apimachinery/pkg/api/meta"
18+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
19+
)
20+
21+
const (
22+
bpfmanNamespace = "bpfman"
23+
bpfmanContainer = "bpfman"
24+
bpfmanDaemonSelector = "name=bpfman-daemon"
1425
)
1526

1627
func doKprobeCheck(t *testing.T, output *bytes.Buffer) bool {
@@ -131,3 +142,223 @@ func doProbeCommonCheck(t *testing.T, output *bytes.Buffer, str string) (bool, i
131142
}
132143
return false, 0
133144
}
145+
146+
// clusterBpfApplicationStateSuccess returns a function that checks if the expected number of
147+
// ClusterBpfApplications matching the label selector have reached a successful state.
148+
func clusterBpfApplicationStateSuccess(t *testing.T, labelSelector string, numExpected int) func() bool {
149+
return func() bool {
150+
// Fetch all ClusterBpfApplications matching the label selector.
151+
apps, err := bpfmanClient.BpfmanV1alpha1().ClusterBpfApplications().List(ctx, metav1.ListOptions{
152+
LabelSelector: labelSelector,
153+
})
154+
require.NoError(t, err)
155+
156+
// Count how many applications have reached success state.
157+
numMatches := 0
158+
for _, app := range apps.Items {
159+
c := meta.FindStatusCondition(app.Status.Conditions, string(v1alpha1.BpfAppStateCondSuccess))
160+
if c != nil && c.Status == metav1.ConditionTrue {
161+
numMatches++
162+
}
163+
}
164+
// Return true if the number of successful applications matches expected count.
165+
return numMatches == numExpected
166+
}
167+
}
168+
169+
// verifyClusterBpfApplicationPriority returns a function that verifies BPF program links are ordered
170+
// correctly according to their priority values on each node.
171+
func verifyClusterBpfApplicationPriority(t *testing.T, labelSelector string) func() bool {
172+
return func() bool {
173+
// Fetch all ClusterBpfApplications matching the label selector.
174+
apps, err := bpfmanClient.BpfmanV1alpha1().ClusterBpfApplications().List(ctx, metav1.ListOptions{
175+
LabelSelector: labelSelector,
176+
})
177+
require.NoError(t, err)
178+
179+
// Fetch all ClusterBpfApplicationStates to get per-node link information.
180+
appStates, err := bpfmanClient.BpfmanV1alpha1().ClusterBpfApplicationStates().List(ctx, metav1.ListOptions{})
181+
require.NoError(t, err)
182+
183+
// Build a map of node names to their associated links from ClusterBpfApplicationStates.
184+
nodeLinks := map[string][]link{}
185+
for _, app := range apps.Items {
186+
for _, appState := range appStates.Items {
187+
for _, ownerRef := range appState.OwnerReferences {
188+
// Skip if this appState is not controlled by the current app.
189+
if ownerRef.Controller == nil || !*ownerRef.Controller {
190+
continue
191+
}
192+
if ownerRef.UID != app.UID {
193+
continue
194+
}
195+
// Initialize the slice for this node if needed.
196+
if nodeLinks[appState.Status.Node] == nil {
197+
nodeLinks[appState.Status.Node] = []link{}
198+
}
199+
// Extract and append links from this appState.
200+
nodeLinks[appState.Status.Node] = append(
201+
nodeLinks[appState.Status.Node],
202+
getClusterBpfApplicationStateLinks(t, appState)...,
203+
)
204+
}
205+
}
206+
}
207+
// Verify link ordering on each node by directly querying bpfman daemon inside the pod.
208+
for node, appStateLinks := range nodeLinks {
209+
bpfmanLinks := []link{}
210+
// Find the bpfman daemon pod running on this node.
211+
pods, err := env.Cluster().Client().CoreV1().Pods(bpfmanNamespace).List(ctx, metav1.ListOptions{
212+
LabelSelector: bpfmanDaemonSelector,
213+
FieldSelector: fmt.Sprintf("spec.nodeName=%s", node),
214+
})
215+
require.NoError(t, err)
216+
require.Len(t, pods.Items, 1)
217+
// Query each link from bpfman and verify that bpfman get link matches the output from
218+
// ClusterBpfApplicationState.
219+
for _, appStateLink := range appStateLinks {
220+
cmd := []string{"./bpfman", "get", "link", fmt.Sprintf("%d", appStateLink.linkId)}
221+
var bpfmanOut, bpfmanErr bytes.Buffer
222+
err := podExec(ctx, t, pods.Items[0], bpfmanContainer, &bpfmanOut, &bpfmanErr, cmd)
223+
require.NoError(t, err)
224+
t.Logf("bpfman get link output:\n%s", bpfmanOut.String())
225+
// Parse the bpfman output and verify it matches.
226+
bpfmanLink := parseLink(bpfmanOut.String())
227+
require.True(t, linkOutputMatchesLink(t, bpfmanLink, appStateLink))
228+
bpfmanLinks = append(bpfmanLinks, bpfmanLink)
229+
}
230+
// Verify that links are ordered correctly by priority (match priority to expected position).
231+
require.True(t, verifyLinkOrder(bpfmanLinks), "position in slice should match priority", bpfmanLinks)
232+
}
233+
return true
234+
}
235+
}
236+
237+
// link represents a BPF program link with its metadata including link ID, network interface,
238+
// namespace path, priority, and position in the link chain.
239+
type link struct {
240+
linkId uint32
241+
interfaceName string
242+
netnsPath string
243+
priority int32
244+
position int32
245+
}
246+
247+
// parseLink parses the output from "bpfman get link" command and converts it to a link struct.
248+
func parseLink(out string) link {
249+
l := link{}
250+
lines := bytes.Split([]byte(out), []byte("\n"))
251+
252+
for _, line := range lines {
253+
parts := bytes.SplitN(line, []byte(":"), 2)
254+
if len(parts) != 2 {
255+
continue
256+
}
257+
key := bytes.TrimSpace(parts[0])
258+
value := bytes.TrimSpace(parts[1])
259+
260+
switch string(key) {
261+
case "Link ID":
262+
fmt.Sscanf(string(value), "%d", &l.linkId)
263+
case "Interface":
264+
l.interfaceName = string(value)
265+
case "Network Namespace":
266+
if string(value) != "None" {
267+
l.netnsPath = string(value)
268+
}
269+
case "Priority":
270+
fmt.Sscanf(string(value), "%d", &l.priority)
271+
case "Position":
272+
fmt.Sscanf(string(value), "%d", &l.position)
273+
}
274+
}
275+
276+
return l
277+
}
278+
279+
// getClusterBpfApplicationStateLinks extracts link information from a ClusterBpfApplicationState
280+
// for XDP, TC, and TCX program types.
281+
func getClusterBpfApplicationStateLinks(t *testing.T, appState v1alpha1.ClusterBpfApplicationState) []link {
282+
links := []link{}
283+
// Iterate through all programs in the application state.
284+
for _, program := range appState.Status.Programs {
285+
switch program.Type {
286+
case v1alpha1.ProgTypeXDP:
287+
// Extract XDP program links.
288+
for _, l := range program.XDP.Links {
289+
require.NotNil(t, l.LinkId)
290+
require.NotNil(t, l.Priority)
291+
links = append(links, link{
292+
linkId: *l.LinkId,
293+
interfaceName: l.InterfaceName,
294+
netnsPath: l.NetnsPath,
295+
priority: *l.Priority,
296+
})
297+
}
298+
case v1alpha1.ProgTypeTC:
299+
// Extract TC program links.
300+
for _, l := range program.TC.Links {
301+
require.NotNil(t, l.LinkId)
302+
require.NotNil(t, l.Priority)
303+
links = append(links, link{
304+
linkId: *l.LinkId,
305+
interfaceName: l.InterfaceName,
306+
netnsPath: l.NetnsPath,
307+
priority: *l.Priority,
308+
})
309+
}
310+
case v1alpha1.ProgTypeTCX:
311+
// Extract TCX program links.
312+
for _, l := range program.TCX.Links {
313+
require.NotNil(t, l.LinkId)
314+
require.NotNil(t, l.Priority)
315+
links = append(links, link{
316+
linkId: *l.LinkId,
317+
interfaceName: l.InterfaceName,
318+
netnsPath: l.NetnsPath,
319+
priority: *l.Priority,
320+
})
321+
}
322+
}
323+
}
324+
return links
325+
}
326+
327+
// linkOutputMatchesLink compares a link parsed from bpfman output with an expected link state.
328+
func linkOutputMatchesLink(t *testing.T, linkFromOutput, l link) bool {
329+
t.Logf("Comparing output and desired link state; got:\n%+v\nwanted:\n%+v", linkFromOutput, l)
330+
return l.linkId == linkFromOutput.linkId &&
331+
l.interfaceName == linkFromOutput.interfaceName &&
332+
l.netnsPath == linkFromOutput.netnsPath &&
333+
l.priority == linkFromOutput.priority
334+
}
335+
336+
// verifyLinkOrder verifies that links are ordered correctly by priority and their positions
337+
// match their index in the sorted slice.
338+
// Side-effect: this orders `links` in place.
339+
func verifyLinkOrder(links []link) bool {
340+
slices.SortFunc(links, func(a, b link) int {
341+
if a.priority < b.priority {
342+
return -1
343+
}
344+
if a.priority > b.priority {
345+
return 1
346+
}
347+
// Tiebreaker - below, we're comparing against position. It would hence make little sense ordering here
348+
// by position. However, if 2 priorities are the same, bpfman can order them either way. Therefore, just take
349+
// the sorting that bpfman chose.
350+
if a.position < b.position {
351+
return -1
352+
}
353+
if a.position > b.position {
354+
return 1
355+
}
356+
return 0
357+
})
358+
for k, v := range links {
359+
if int32(k) != v.position {
360+
return false
361+
}
362+
}
363+
return true
364+
}

test/integration/metrics_test.go

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ func testMetricsProxySelfTest(ctx context.Context, t *testing.T, pod corev1.Pod,
179179
cmd := []string{"env", "TOKEN=" + token, "/metrics-proxy", "test"}
180180

181181
var stdout, stderr bytes.Buffer
182-
err := podExec(ctx, t, pod, &stdout, &stderr, cmd)
182+
err := podExec(ctx, t, pod, "", &stdout, &stderr, cmd)
183183

184184
// For self-test, we expect exit code 0 for success, non-zero
185185
// for failure. Parse the JSON regardless of exit code to get
@@ -244,7 +244,8 @@ func testMetricsProxySelfTest(ctx context.Context, t *testing.T, pod corev1.Pod,
244244
t.Logf("All self-tests passed successfully on pod %s", pod.Name)
245245
}
246246

247-
func podExec(ctx context.Context, t *testing.T, pod corev1.Pod, stdout, stderr *bytes.Buffer, cmd []string) error {
247+
// podExec executes a command in a pod's container and captures stdout/stderr output.
248+
func podExec(ctx context.Context, t *testing.T, pod corev1.Pod, container string, stdout, stderr *bytes.Buffer, cmd []string) error {
248249
t.Helper()
249250
kubeConfig, err := config.GetConfig()
250251
if err != nil {
@@ -269,6 +270,9 @@ func podExec(ctx context.Context, t *testing.T, pod corev1.Pod, stdout, stderr *
269270
Stderr: true,
270271
TTY: false,
271272
}
273+
if container != "" {
274+
execOptions.Container = container
275+
}
272276

273277
req.VersionedParams(execOptions, scheme.ParameterCodec)
274278

test/integration/tc_test.go

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@ package integration
66
import (
77
"bytes"
88
"context"
9+
"errors"
10+
"fmt"
911
"io"
1012
"testing"
1113
"time"
@@ -14,12 +16,15 @@ import (
1416
"github.com/stretchr/testify/require"
1517
corev1 "k8s.io/api/core/v1"
1618
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
19+
"k8s.io/utils/ptr"
1720
)
1821

1922
const (
2023
tcGoCounterKustomize = "https://github.com/bpfman/bpfman/examples/config/default/go-tc-counter/?timeout=120&ref=main"
2124
tcGoCounterUserspaceNs = "go-tc-counter"
2225
tcGoCounterUserspaceDsName = "go-tc-counter-ds"
26+
tcGoCounterBytecodeName = "go-tc-counter-example"
27+
tcByteCodeLabelSelector = "app.kubernetes.io/name=tcprogram"
2328
)
2429

2530
func TestTcGoCounter(t *testing.T) {
@@ -57,3 +62,74 @@ func TestTcGoCounter(t *testing.T) {
5762
return doTcCheck(t, output)
5863
}, 30*time.Second, time.Second)
5964
}
65+
66+
func TestTcGoCounterLinkPriority(t *testing.T) {
67+
priorities := []*int32{
68+
nil,
69+
ptr.To(int32(0)),
70+
ptr.To(int32(500)),
71+
ptr.To(int32(1000)),
72+
}
73+
74+
t.Log("deploying tc counter program")
75+
require.NoError(t, clusters.KustomizeDeployForCluster(ctx, env.Cluster(), tcGoCounterKustomize))
76+
addCleanup(func(context.Context) error {
77+
cleanupLog("cleaning up tc counter program")
78+
err1 := clusters.KustomizeDeleteForCluster(ctx, env.Cluster(), tcGoCounterKustomize)
79+
80+
cleanupLog("cleaning up tc counter bytecode")
81+
err2 := bpfmanClient.BpfmanV1alpha1().ClusterBpfApplications().DeleteCollection(ctx, metav1.DeleteOptions{},
82+
metav1.ListOptions{
83+
LabelSelector: tcByteCodeLabelSelector,
84+
})
85+
return errors.Join(err1, err2)
86+
})
87+
88+
t.Log("creating copies of bytecode using the same link")
89+
cba, err := bpfmanClient.BpfmanV1alpha1().ClusterBpfApplications().Get(ctx, tcGoCounterBytecodeName, metav1.GetOptions{})
90+
require.NoError(t, err)
91+
name := cba.Name
92+
cba.ObjectMeta = metav1.ObjectMeta{
93+
Labels: cba.Labels,
94+
}
95+
for i, priority := range priorities {
96+
cba.Name = fmt.Sprintf("%s-%d", name, i)
97+
cba.Spec.Programs[0].TC.Links[0].Priority = priority
98+
_, err := bpfmanClient.BpfmanV1alpha1().ClusterBpfApplications().Create(ctx, cba, metav1.CreateOptions{})
99+
require.NoError(t, err)
100+
}
101+
// Add priority 55 from the kustomize deployment as well.
102+
priorities = append(priorities, ptr.To(int32(55)))
103+
104+
t.Log("waiting for bytecode to be attached successfully")
105+
require.Eventually(t, clusterBpfApplicationStateSuccess(t, tcByteCodeLabelSelector, len(priorities)), 2*time.Minute, 10*time.Second)
106+
require.Eventually(t, verifyClusterBpfApplicationPriority(t, tcByteCodeLabelSelector), 1*time.Minute, 10*time.Second)
107+
108+
t.Log("waiting for go tc counter userspace daemon to be available")
109+
require.Eventually(t, func() bool {
110+
daemon, err := env.Cluster().Client().AppsV1().DaemonSets(tcGoCounterUserspaceNs).Get(ctx, tcGoCounterUserspaceDsName, metav1.GetOptions{})
111+
require.NoError(t, err)
112+
return daemon.Status.DesiredNumberScheduled == daemon.Status.NumberAvailable
113+
},
114+
// Wait 5 minutes since cosign is slow, https://github.com/bpfman/bpfman/issues/1043
115+
5*time.Minute, 10*time.Second)
116+
117+
pods, err := env.Cluster().Client().CoreV1().Pods(tcGoCounterUserspaceNs).List(ctx, metav1.ListOptions{LabelSelector: "name=go-tc-counter"})
118+
require.NoError(t, err)
119+
require.Len(t, pods.Items, 1)
120+
goTcCounterPod := pods.Items[0]
121+
122+
req := env.Cluster().Client().CoreV1().Pods(tcGoCounterUserspaceNs).GetLogs(goTcCounterPod.Name, &corev1.PodLogOptions{})
123+
124+
require.Eventually(t, func() bool {
125+
logs, err := req.Stream(ctx)
126+
require.NoError(t, err)
127+
defer logs.Close()
128+
output := new(bytes.Buffer)
129+
_, err = io.Copy(output, logs)
130+
require.NoError(t, err)
131+
t.Logf("counter pod log %s", output.String())
132+
133+
return doTcCheck(t, output)
134+
}, 30*time.Second, time.Second)
135+
}

0 commit comments

Comments
 (0)