MTU-tests: Cycle through different MTU packet sizes

Encode this statically via C/BPF-code switch statement, and
have global counter cycles through these.

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
This commit is contained in:
Jesper Dangaard Brouer
2020-10-13 22:10:28 +02:00
parent 7c2badedd4
commit f2a13d942f

View File

@@ -4,35 +4,47 @@
#include <xdp/parsing_helpers.h>
#include "encap.h"
/* MTU is defined as L3 size (usually 1500 for Ethernet),
* but remember TC (and XDP) operate at L2.
/* Cycle through different MTU packet sizes, encoded in BPF-code via switch
* statement. MTU is defined as L3 size (usually 1500 for Ethernet), but
* remember TC (and XDP) operate at L2 (adjusted later)
*/
//#define PKT_SIZE_L3 1500
#define PKT_SIZE_L3 1501
//#define PKT_SIZE_L3 1505
//#define PKT_SIZE_L3 1600
//#define PKT_SIZE_L3 20000
//#define PKT_SIZE_L3 65535
#define OFFSET sizeof(struct iphdr)
#define ENCAP_TYPE BPF_F_ADJ_ROOM_ENCAP_L3_IPV4
static __always_inline __u32 get_pkt_size_l3(__u64 cnt)
{
switch (cnt) {
case 0:
return 1024;
case 1:
return 1500;
case 2:
return 1504;
case 3:
return 1508;
case 4:
return 1600;
case 5:
return 4096 + 128;
case 6:
return 3520;
case 7:
return 3528;
case 8:
return 4096 - 14;
case 9:
return 4096;
case 10:
return 8192;
case 11:
return 16000;
default:
return 1500;
}
}
#define CNT_MAX 12
/* The tc tool (iproute2) use another ELF map layout than libbpf, see
* struct bpf_elf_map from iproute2, but bpf_map_def from libbpf have
* same binary layout until "flags" so use that.
struct bpf_elf_map {
__u32 type;
__u32 size_key;
__u32 size_value;
__u32 max_elem;
__u32 flags;
__u32 id;
__u32 pinning;
__u32 inner_id;
__u32 inner_idx;
};
*/
*/
struct bpf_map_def SEC("maps") cnt_map = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
@@ -48,6 +60,8 @@ struct bpf_map_def SEC("maps") cnt_map = {
#define lock_xadd(ptr, val) ((void) __sync_fetch_and_add(ptr, val))
#endif
#define ENCAP_TYPE BPF_F_ADJ_ROOM_ENCAP_L3_IPV4
SEC("classifier") int tc_inc_pkt_sz(struct __sk_buff *skb)
{
volatile void *data, *data_end;
@@ -65,7 +79,7 @@ SEC("classifier") int tc_inc_pkt_sz(struct __sk_buff *skb)
goto out;
/* Desired packet size at L2 */
int pkt_size_l2 = PKT_SIZE_L3 + sizeof(*eth) ;
int pkt_size_l2 = get_pkt_size_l3(*cnt) + sizeof(*eth) ;
data = (void *)(long)skb->data;
data_end = (void *)(long)skb->data_end;
@@ -84,15 +98,16 @@ SEC("classifier") int tc_inc_pkt_sz(struct __sk_buff *skb)
extra_len = pkt_size_l2 - len;
// extra_len= sizeof(*iph); /* Adj that does correct IPIP encap */
if (bpf_skb_adjust_room(skb, extra_len, BPF_ADJ_ROOM_MAC, ENCAP_TYPE))
goto out;
// TODO: Handle if bpf_skb_adjust_room() cannot increase size,
// as it's only my patched kernel that drop the MTU check
/* Multiple CPUs can access cnt_map, use an atomic operation */
/* Wrapping global counter */
lock_xadd(cnt, 1);
// *cnt = 42;
if (*cnt == CNT_MAX)
*cnt = 0;
if (bpf_skb_adjust_room(skb, extra_len, BPF_ADJ_ROOM_MAC, ENCAP_TYPE)) {
/* If adjust fails, then skip this packet length adjustment */
ret = BPF_OK;
goto out;
}
/* Most re-load after bpf_skb_adjust_room() */
data = (void *)(long)skb->data;
@@ -107,7 +122,6 @@ SEC("classifier") int tc_inc_pkt_sz(struct __sk_buff *skb)
goto out;
eth->h_proto = bpf_htons(ETH_P_IP);
iph->ttl = (*cnt & 0xFF);
ret = BPF_OK;
out: