selftests/bpf: Add a basic fifo qdisc test

This selftest includes a bare minimum fifo qdisc, which simply enqueues
sk_buffs into the back of a bpf list and dequeues from the front of the
list.

Signed-off-by: Amery Hung <amery.hung@bytedance.com>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://patch.msgid.link/20250409214606.2000194-9-ameryhung@gmail.com
This commit is contained in:
Amery Hung 2025-04-09 14:46:04 -07:00 committed by Martin KaFai Lau
parent 4b15121da7
commit 11c701639b
4 changed files with 230 additions and 0 deletions

View File

@ -71,6 +71,7 @@ CONFIG_NET_IPGRE=y
CONFIG_NET_IPGRE_DEMUX=y
CONFIG_NET_IPIP=y
CONFIG_NET_MPLS_GSO=y
CONFIG_NET_SCH_BPF=y
CONFIG_NET_SCH_FQ=y
CONFIG_NET_SCH_INGRESS=y
CONFIG_NET_SCHED=y

View File

@ -0,0 +1,81 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pkt_sched.h>
#include <linux/rtnetlink.h>
#include <test_progs.h>
#include "network_helpers.h"
#include "bpf_qdisc_fifo.skel.h"
#define LO_IFINDEX 1
static const unsigned int total_bytes = 10 * 1024 * 1024;
static void do_test(char *qdisc)
{
DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX,
.attach_point = BPF_TC_QDISC,
.parent = TC_H_ROOT,
.handle = 0x8000000,
.qdisc = qdisc);
int srv_fd = -1, cli_fd = -1;
int err;
err = bpf_tc_hook_create(&hook);
if (!ASSERT_OK(err, "attach qdisc"))
return;
srv_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_OK_FD(srv_fd, "start server"))
goto done;
cli_fd = connect_to_fd(srv_fd, 0);
if (!ASSERT_OK_FD(cli_fd, "connect to client"))
goto done;
err = send_recv_data(srv_fd, cli_fd, total_bytes);
ASSERT_OK(err, "send_recv_data");
done:
if (srv_fd != -1)
close(srv_fd);
if (cli_fd != -1)
close(cli_fd);
bpf_tc_hook_destroy(&hook);
}
static void test_fifo(void)
{
struct bpf_qdisc_fifo *fifo_skel;
struct bpf_link *link;
fifo_skel = bpf_qdisc_fifo__open_and_load();
if (!ASSERT_OK_PTR(fifo_skel, "bpf_qdisc_fifo__open_and_load"))
return;
link = bpf_map__attach_struct_ops(fifo_skel->maps.fifo);
if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
bpf_qdisc_fifo__destroy(fifo_skel);
return;
}
do_test("bpf_fifo");
bpf_link__destroy(link);
bpf_qdisc_fifo__destroy(fifo_skel);
}
void test_bpf_qdisc(void)
{
struct netns_obj *netns;
netns = netns_new("bpf_qdisc_ns", true);
if (!ASSERT_OK_PTR(netns, "netns_new"))
return;
if (test__start_subtest("fifo"))
test_fifo();
netns_free(netns);
}

View File

@ -0,0 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BPF_QDISC_COMMON_H
#define _BPF_QDISC_COMMON_H
#define NET_XMIT_SUCCESS 0x00
#define NET_XMIT_DROP 0x01 /* skb dropped */
#define NET_XMIT_CN 0x02 /* congestion notification */
#define TC_PRIO_CONTROL 7
#define TC_PRIO_MAX 15
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
u32 bpf_skb_get_hash(struct sk_buff *p) __ksym;
void bpf_kfree_skb(struct sk_buff *p) __ksym;
void bpf_qdisc_skb_drop(struct sk_buff *p, struct bpf_sk_buff_ptr *to_free) __ksym;
void bpf_qdisc_watchdog_schedule(struct Qdisc *sch, u64 expire, u64 delta_ns) __ksym;
void bpf_qdisc_bstats_update(struct Qdisc *sch, const struct sk_buff *skb) __ksym;
static struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
{
return (struct qdisc_skb_cb *)skb->cb;
}
static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
{
return qdisc_skb_cb(skb)->pkt_len;
}
#endif

View File

@ -0,0 +1,117 @@
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include "bpf_experimental.h"
#include "bpf_qdisc_common.h"
char _license[] SEC("license") = "GPL";
struct skb_node {
struct sk_buff __kptr * skb;
struct bpf_list_node node;
};
private(A) struct bpf_spin_lock q_fifo_lock;
private(A) struct bpf_list_head q_fifo __contains(skb_node, node);
SEC("struct_ops/bpf_fifo_enqueue")
int BPF_PROG(bpf_fifo_enqueue, struct sk_buff *skb, struct Qdisc *sch,
struct bpf_sk_buff_ptr *to_free)
{
struct skb_node *skbn;
u32 pkt_len;
if (sch->q.qlen == sch->limit)
goto drop;
skbn = bpf_obj_new(typeof(*skbn));
if (!skbn)
goto drop;
pkt_len = qdisc_pkt_len(skb);
sch->q.qlen++;
skb = bpf_kptr_xchg(&skbn->skb, skb);
if (skb)
bpf_qdisc_skb_drop(skb, to_free);
bpf_spin_lock(&q_fifo_lock);
bpf_list_push_back(&q_fifo, &skbn->node);
bpf_spin_unlock(&q_fifo_lock);
sch->qstats.backlog += pkt_len;
return NET_XMIT_SUCCESS;
drop:
bpf_qdisc_skb_drop(skb, to_free);
return NET_XMIT_DROP;
}
SEC("struct_ops/bpf_fifo_dequeue")
struct sk_buff *BPF_PROG(bpf_fifo_dequeue, struct Qdisc *sch)
{
struct bpf_list_node *node;
struct sk_buff *skb = NULL;
struct skb_node *skbn;
bpf_spin_lock(&q_fifo_lock);
node = bpf_list_pop_front(&q_fifo);
bpf_spin_unlock(&q_fifo_lock);
if (!node)
return NULL;
skbn = container_of(node, struct skb_node, node);
skb = bpf_kptr_xchg(&skbn->skb, skb);
bpf_obj_drop(skbn);
if (!skb)
return NULL;
sch->qstats.backlog -= qdisc_pkt_len(skb);
bpf_qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
SEC("struct_ops/bpf_fifo_init")
int BPF_PROG(bpf_fifo_init, struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
sch->limit = 1000;
return 0;
}
SEC("struct_ops/bpf_fifo_reset")
void BPF_PROG(bpf_fifo_reset, struct Qdisc *sch)
{
struct bpf_list_node *node;
struct skb_node *skbn;
int i;
bpf_for(i, 0, sch->q.qlen) {
struct sk_buff *skb = NULL;
bpf_spin_lock(&q_fifo_lock);
node = bpf_list_pop_front(&q_fifo);
bpf_spin_unlock(&q_fifo_lock);
if (!node)
break;
skbn = container_of(node, struct skb_node, node);
skb = bpf_kptr_xchg(&skbn->skb, skb);
if (skb)
bpf_kfree_skb(skb);
bpf_obj_drop(skbn);
}
sch->q.qlen = 0;
}
SEC(".struct_ops")
struct Qdisc_ops fifo = {
.enqueue = (void *)bpf_fifo_enqueue,
.dequeue = (void *)bpf_fifo_dequeue,
.init = (void *)bpf_fifo_init,
.reset = (void *)bpf_fifo_reset,
.id = "bpf_fifo",
};