2021-10-15 18:23:42 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
|
|
|
|
|
|
#include <linux/bpf.h>
|
|
|
|
|
|
|
|
|
|
#include <bpf/bpf_helpers.h>
|
|
|
|
|
|
2021-11-02 10:39:43 +01:00
|
|
|
#include <bpf/bpf_core_read.h> /* bpf_core_type_id_local */
|
2021-10-29 17:54:09 +02:00
|
|
|
|
2021-10-26 17:00:48 +02:00
|
|
|
struct {
|
|
|
|
|
__uint(type, BPF_MAP_TYPE_XSKMAP);
|
2021-10-28 13:17:33 +02:00
|
|
|
__uint(max_entries, 64); /* Assume netdev has no more than 64 queues */
|
|
|
|
|
__uint(key_size, sizeof(int));
|
|
|
|
|
__uint(value_size, sizeof(int));
|
2021-10-26 17:00:48 +02:00
|
|
|
} xsks_map SEC(".maps");
|
2021-10-28 13:17:33 +02:00
|
|
|
|
2021-10-26 17:00:48 +02:00
|
|
|
struct {
|
|
|
|
|
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
2021-10-28 13:17:33 +02:00
|
|
|
__type(key, __u32);
|
|
|
|
|
__type(value, __u32);
|
2021-10-26 17:00:48 +02:00
|
|
|
__uint(max_entries, 64);
|
|
|
|
|
} xdp_stats_map SEC(".maps");
|
2021-10-15 18:23:42 +02:00
|
|
|
|
2021-10-28 17:42:23 +02:00
|
|
|
/*
|
2021-11-02 13:58:39 +01:00
|
|
|
* The xdp_hints_xxx struct's are stored in the XDP 'data_meta' area,
|
|
|
|
|
* which is located just in-front-of the raw packet payload data.
|
|
|
|
|
*
|
|
|
|
|
* Explaining the struct attribute's:
|
|
|
|
|
* ----------------------------------
|
|
|
|
|
* The struct must be 4 byte aligned (kernel requirement), which here
|
|
|
|
|
* is enforced by the struct __attribute__((aligned(4))).
|
|
|
|
|
*
|
|
|
|
|
* To avoid any C-struct padding attribute "packed" is used.
|
2021-10-28 17:42:23 +02:00
|
|
|
*
|
2021-10-29 17:54:09 +02:00
|
|
|
* NOTICE: Do NOT define __attribute__((preserve_access_index)) here,
|
|
|
|
|
* as libbpf will try to find a matching kernel data-structure,
|
|
|
|
|
* e.g. it will cause BPF-prog loading step to fail (with invalid func
|
|
|
|
|
* unknown#195896080 which is 0xbad2310 in hex for "bad relo").
|
|
|
|
|
*/
|
2021-11-02 13:58:39 +01:00
|
|
|
struct xdp_hints_mark {
|
|
|
|
|
__u32 mark;
|
|
|
|
|
__u32 btf_id;
|
|
|
|
|
} __attribute__((aligned(4))) __attribute__((packed));
|
2021-10-28 17:42:23 +02:00
|
|
|
|
2021-11-02 10:39:43 +01:00
|
|
|
struct xdp_hints_rx_time {
|
|
|
|
|
__u64 rx_ktime;
|
|
|
|
|
__u32 btf_id;
|
|
|
|
|
} __attribute__((aligned(4))) __attribute__((packed));
|
|
|
|
|
|
|
|
|
|
int meta_add_rx_time(struct xdp_md *ctx)
|
2021-10-15 18:23:42 +02:00
|
|
|
{
|
2021-11-02 10:39:43 +01:00
|
|
|
struct xdp_hints_rx_time *meta;
|
2021-10-28 18:45:41 +02:00
|
|
|
void *data;
|
2021-10-28 17:42:23 +02:00
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
/* Reserve space in-front of data pointer for our meta info.
|
|
|
|
|
* (Notice drivers not supporting data_meta will fail here!)
|
|
|
|
|
*/
|
|
|
|
|
err = bpf_xdp_adjust_meta(ctx, -(int)sizeof(*meta));
|
|
|
|
|
if (err)
|
2021-11-02 10:39:43 +01:00
|
|
|
return -1;
|
2021-10-15 18:23:42 +02:00
|
|
|
|
2021-10-28 18:45:41 +02:00
|
|
|
/* Notice: Kernel-side verifier requires that loading of
|
|
|
|
|
* ctx->data MUST happen _after_ helper bpf_xdp_adjust_meta(),
|
|
|
|
|
* as pkt-data pointers are invalidated. Helpers that require
|
|
|
|
|
* this are determined/marked by bpf_helper_changes_pkt_data()
|
|
|
|
|
*/
|
|
|
|
|
data = (void *)(unsigned long)ctx->data;
|
|
|
|
|
|
|
|
|
|
meta = (void *)(unsigned long)ctx->data_meta;
|
2021-11-02 10:39:43 +01:00
|
|
|
if (meta + 1 > data) /* Verify meta area is accessible */
|
|
|
|
|
return -2;
|
|
|
|
|
|
|
|
|
|
meta->rx_ktime = bpf_ktime_get_ns();
|
2021-11-02 13:58:39 +01:00
|
|
|
/* Userspace can identify struct used by BTF id */
|
2021-11-02 10:39:43 +01:00
|
|
|
meta->btf_id = bpf_core_type_id_local(struct xdp_hints_rx_time);
|
2021-10-28 18:45:41 +02:00
|
|
|
|
2021-11-02 10:39:43 +01:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int meta_add_mark(struct xdp_md *ctx, __u32 mark)
|
|
|
|
|
{
|
|
|
|
|
struct xdp_hints_mark *meta;
|
|
|
|
|
void *data;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
/* Reserve space in-front of data pointer for our meta info */
|
|
|
|
|
err = bpf_xdp_adjust_meta(ctx, -(int)sizeof(*meta));
|
|
|
|
|
if (err)
|
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
data = (void *)(unsigned long)ctx->data;
|
|
|
|
|
meta = (void *)(unsigned long)ctx->data_meta;
|
|
|
|
|
if (meta + 1 > data) /* Verify meta area is accessible */
|
|
|
|
|
return -2;
|
|
|
|
|
|
|
|
|
|
meta->mark = mark;
|
2021-10-29 17:54:09 +02:00
|
|
|
meta->btf_id = bpf_core_type_id_local(struct xdp_hints_mark);
|
2021-10-28 18:45:41 +02:00
|
|
|
|
2021-11-02 10:39:43 +01:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SEC("xdp_sock")
|
|
|
|
|
int xdp_sock_prog(struct xdp_md *ctx)
|
|
|
|
|
{
|
|
|
|
|
int index = ctx->rx_queue_index;
|
|
|
|
|
__u32 *pkt_count;
|
|
|
|
|
int err;
|
|
|
|
|
|
2021-10-15 18:23:42 +02:00
|
|
|
pkt_count = bpf_map_lookup_elem(&xdp_stats_map, &index);
|
2021-11-02 10:39:43 +01:00
|
|
|
if (!pkt_count)
|
|
|
|
|
return XDP_ABORTED;
|
|
|
|
|
__u64 cnt = (*pkt_count)++;
|
2021-11-04 13:49:20 +01:00
|
|
|
// if (cnt == 0) {
|
|
|
|
|
// if (bpf_ktime_get_ns() == 42)
|
|
|
|
|
// return XDP_ABORTED;
|
|
|
|
|
// cnt++;
|
|
|
|
|
// }
|
2021-11-02 10:39:43 +01:00
|
|
|
|
2021-11-02 13:58:39 +01:00
|
|
|
/* Notice how two different xdp_hints meta-data are used */
|
2021-11-02 10:39:43 +01:00
|
|
|
if ((cnt % 2) == 0) {
|
|
|
|
|
err = meta_add_rx_time(ctx);
|
|
|
|
|
if (err < 0)
|
|
|
|
|
return XDP_ABORTED;
|
|
|
|
|
} else {
|
|
|
|
|
err = meta_add_mark(ctx, 42);
|
|
|
|
|
if (err < 0)
|
|
|
|
|
return XDP_DROP;
|
2021-10-15 18:23:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* A set entry here means that the correspnding queue_id
|
|
|
|
|
* has an active AF_XDP socket bound to it. */
|
|
|
|
|
if (bpf_map_lookup_elem(&xsks_map, &index))
|
|
|
|
|
return bpf_redirect_map(&xsks_map, index, 0);
|
|
|
|
|
|
|
|
|
|
return XDP_PASS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
char _license[] SEC("license") = "GPL";
|