2022-05-20 14:53:14 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
#include <linux/bpf.h>
|
2022-06-14 16:52:04 +02:00
|
|
|
#include <asm/ptrace.h>
|
2022-05-20 14:53:14 +02:00
|
|
|
#include <bpf/bpf_helpers.h>
|
2022-06-14 16:52:04 +02:00
|
|
|
#include <bpf/bpf_tracing.h>
|
|
|
|
#include <bpf/bpf_core_read.h>
|
2022-05-20 14:53:14 +02:00
|
|
|
#include <xdp/parsing_helpers.h>
|
|
|
|
#include <linux/pkt_cls.h>
|
|
|
|
|
|
|
|
#include "pkt-loop-filter.h"
|
|
|
|
|
2022-06-14 16:52:04 +02:00
|
|
|
/* local partial kernel struct definitions with just the members we need */
|
|
|
|
struct net {
|
|
|
|
__u64 net_cookie;
|
|
|
|
} __attribute__((preserve_access_index));
|
|
|
|
|
|
|
|
struct net_device {
|
|
|
|
int ifindex;
|
|
|
|
struct {
|
|
|
|
struct net *net;
|
|
|
|
} nd_net;
|
|
|
|
} __attribute__((preserve_access_index));
|
|
|
|
|
|
|
|
struct netdev_notifier_info {
|
|
|
|
struct net_device *dev;
|
|
|
|
} __attribute__((preserve_access_index));
|
|
|
|
|
|
|
|
#define NETDEV_GOING_DOWN 10
|
|
|
|
|
|
|
|
/* cookie for init ns; hoping this is stable */
|
|
|
|
#define INIT_NS 1
|
|
|
|
|
2022-06-14 17:36:54 +02:00
|
|
|
#define PKT_TYPE_UNICAST 1
|
|
|
|
#define PKT_TYPE_MULTICAST 2
|
|
|
|
|
2022-05-20 14:53:14 +02:00
|
|
|
/* We use an LRU map to avoid having to do cleanup: We just rely on the LRU
|
|
|
|
* mechanism to evict old entries as the map fills up.
|
|
|
|
*/
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
|
|
|
__type(key, struct pkt_loop_key);
|
|
|
|
__type(value, struct pkt_loop_data);
|
|
|
|
__uint(max_entries, 16384);
|
|
|
|
} iface_state SEC(".maps");
|
|
|
|
|
2022-06-14 17:36:54 +02:00
|
|
|
int active_ifindexes[MAX_IFINDEXES] = {};
|
|
|
|
unsigned int current_ifindex = 0;
|
|
|
|
|
|
|
|
static int ethaddr_equal(__u8 *a, __u8 *b)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ETH_ALEN; i++)
|
|
|
|
if (a[i] != b[i])
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_current_ifindex(void)
|
|
|
|
{
|
|
|
|
/* bounds check to placate the verifier */
|
|
|
|
if (current_ifindex > MAX_IFINDEXES)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return active_ifindexes[current_ifindex];
|
|
|
|
}
|
|
|
|
|
2022-05-20 14:53:14 +02:00
|
|
|
static int parse_pkt(struct __sk_buff *skb, struct pkt_loop_key *key)
|
|
|
|
{
|
2022-06-14 17:36:54 +02:00
|
|
|
static __u8 mcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
|
2022-05-20 14:53:14 +02:00
|
|
|
void *data_end = (void *)(unsigned long long)skb->data_end;
|
|
|
|
void *data = (void *)(unsigned long long)skb->data;
|
|
|
|
struct hdr_cursor nh = { .pos = data };
|
|
|
|
struct ethhdr *eth;
|
|
|
|
int eth_type;
|
|
|
|
|
|
|
|
/* Parse Ethernet and IP/IPv6 headers */
|
|
|
|
eth_type = parse_ethhdr(&nh, data_end, ð);
|
|
|
|
if (eth_type < 0)
|
|
|
|
return eth_type;
|
|
|
|
|
|
|
|
__builtin_memcpy(key->src_mac, eth->h_source, ETH_ALEN);
|
|
|
|
key->src_vlan = skb->vlan_tci;
|
|
|
|
|
2022-06-14 17:36:54 +02:00
|
|
|
return ethaddr_equal(eth->h_dest, mcast_addr) ? PKT_TYPE_MULTICAST : PKT_TYPE_UNICAST;
|
2022-05-20 14:53:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
SEC("tc")
|
|
|
|
int record_egress_pkt(struct __sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct pkt_loop_data value = { .ifindex = skb->ifindex }, *v;
|
|
|
|
struct pkt_loop_key key;
|
2022-06-14 17:36:54 +02:00
|
|
|
int pkt_type;
|
2022-05-20 14:53:14 +02:00
|
|
|
|
2022-06-14 17:36:54 +02:00
|
|
|
pkt_type = parse_pkt(skb, &key);
|
|
|
|
if (pkt_type < 0)
|
2022-05-20 14:53:14 +02:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
v = bpf_map_lookup_elem(&iface_state, &key);
|
|
|
|
if (!v) {
|
|
|
|
bpf_map_update_elem(&iface_state, &key, &value, BPF_NOEXIST);
|
|
|
|
v = bpf_map_lookup_elem(&iface_state, &key);
|
|
|
|
if (!v)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
v->expiry_time = bpf_ktime_get_coarse_ns() + STATE_LIFETIME;
|
|
|
|
v->ifindex = skb->ifindex;
|
|
|
|
out:
|
|
|
|
return TC_ACT_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("tc")
|
|
|
|
int filter_ingress_pkt(struct __sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct pkt_loop_data *value;
|
|
|
|
struct pkt_loop_key key;
|
2022-06-14 17:36:54 +02:00
|
|
|
int pkt_type;
|
2022-05-20 14:53:14 +02:00
|
|
|
|
2022-06-14 17:36:54 +02:00
|
|
|
pkt_type = parse_pkt(skb, &key);
|
|
|
|
if (pkt_type < 0)
|
2022-05-20 14:53:14 +02:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
value = bpf_map_lookup_elem(&iface_state, &key);
|
|
|
|
if (value && value->expiry_time > bpf_ktime_get_coarse_ns()) {
|
|
|
|
value->drops++;
|
|
|
|
return TC_ACT_SHOT;
|
|
|
|
}
|
|
|
|
|
2022-06-14 17:36:54 +02:00
|
|
|
/* Only allow multicast pkts on the currently active interface */
|
|
|
|
if (pkt_type == PKT_TYPE_MULTICAST &&
|
|
|
|
skb->ifindex != get_current_ifindex())
|
|
|
|
return TC_ACT_SHOT;
|
|
|
|
|
2022-05-20 14:53:14 +02:00
|
|
|
out:
|
|
|
|
return TC_ACT_OK;
|
|
|
|
}
|
|
|
|
|
2022-06-14 16:52:04 +02:00
|
|
|
SEC("kprobe/call_netdevice_notifiers_info")
|
|
|
|
int BPF_KPROBE(handle_device_notify, unsigned long val, struct netdev_notifier_info *info)
|
|
|
|
{
|
|
|
|
int ifindex = BPF_CORE_READ(info, dev, ifindex);
|
|
|
|
__u64 cookie = BPF_CORE_READ(info, dev, nd_net.net, net_cookie);
|
|
|
|
|
2022-06-14 17:36:54 +02:00
|
|
|
if (val == NETDEV_GOING_DOWN && cookie == INIT_NS &&
|
|
|
|
ifindex == get_current_ifindex()) {
|
|
|
|
/* Active interface going down, switch to next one; we currently
|
|
|
|
* don't check for ifup and switch back
|
|
|
|
*/
|
|
|
|
current_ifindex++;
|
|
|
|
if (current_ifindex > MAX_IFINDEXES || !active_ifindexes[current_ifindex])
|
|
|
|
current_ifindex = 0;
|
|
|
|
}
|
2022-06-14 16:52:04 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-20 14:53:14 +02:00
|
|
|
char _license[] SEC("license") = "GPL";
|