1
0
mirror of https://gitlab.labs.nic.cz/labs/bird.git synced 2024-05-11 16:54:54 +00:00

rewritten the announcement, now needa fix the double cleanup of journals

This commit is contained in:
Maria Matejka
2024-05-08 23:36:31 +02:00
parent f44f570356
commit 86eb00b822
3 changed files with 43 additions and 174 deletions

View File

@@ -272,7 +272,7 @@ static inline int rt_prefilter_net(const struct rt_prefilter *p, const net_addr
}
static inline _Bool
rt_net_is_feeding(struct rt_export_feeder *ref, const net_addr *n)
rt_net_is_feeding_feeder(struct rt_export_feeder *ref, const net_addr *n)
{
struct netindex *ni = NET_TO_INDEX(n);
for (struct rt_feeding_request *rfr = ref->feeding; rfr; rfr = rfr->next)
@@ -282,6 +282,15 @@ rt_net_is_feeding(struct rt_export_feeder *ref, const net_addr *n)
return 0;
}
static inline _Bool
rt_net_is_feeding_request(struct rt_export_request *req, const net_addr *n)
{ return rt_net_is_feeding_feeder(&req->feeder, n); }
#define rt_net_is_feeding(h, n) _Generic((h), \
struct rt_export_feeder *: rt_net_is_feeding_feeder, \
struct rt_export_request *: rt_net_is_feeding_request, \
void *: bug)(h, n)
/*
* The original rtable
@@ -435,7 +444,7 @@ typedef struct network {
struct rte_storage * _Atomic routes; /* Available routes for this network */
/* Uncleaned pending exports */
struct rt_net_pending_export _Atomic any;
struct rt_net_pending_export _Atomic all;
struct rt_net_pending_export _Atomic best;
} net;

View File

@@ -68,7 +68,7 @@ rt_export_get(struct rt_export_request *r)
if (es == TES_READY)
/* Fed up of feeding */
return NULL;
else if (reu.feed = e->feed_next(e, r))
else if (reu.feed = e->feed_next(e, r)) /* TODO this must run prefilter */
/* There is more food */
return &reu;
else
@@ -86,7 +86,14 @@ rt_export_get(struct rt_export_request *r)
return rt_export_get(r);
}
/* Is this update allowed by prefilter? */
const net_addr *n = (reu.update->new ?: reu.update->old)->net;
if (!rt_prefilter_net(&r->feeder.prefilter, n))
{
rt_export_release(&reu);
return rt_export_get(r);
}
if ((es != TES_READY) && rt_net_is_feeding(&r->feeder, n))
{
/* But this net shall get a feed first! */

View File

@@ -1125,122 +1125,34 @@ channel_notify_basic(void *_channel)
}
}
struct rt_pending_export *
rpe_next(struct rt_pending_export *rpe, struct rte_src *src)
static void
rte_announce_to(struct rt_exporter *e, _Atomic struct rt_net_pending_export *npe, const rte *new, const rte *old)
{
struct rt_pending_export *next = atomic_load_explicit(&rpe->next, memory_order_acquire);
if (new == old)
return;
if (!next)
return NULL;
struct rt_pending_export rpe = {
.it = {
.new = new,
.old = old,
},
};
if (!src)
return next;
SKIP_BACK_DECLARE(struct rt_pending_export, pushed, it, rt_exporter_push(e, &rpe.it));
struct rt_net_pending_export nloc = atomic_load_explicit(npe, memory_order_relaxed);
while (rpe = next)
if (src == (rpe->new ? rpe->new->src : rpe->old->src))
return rpe;
else
next = atomic_load_explicit(&rpe->next, memory_order_acquire);
if (nloc.last)
ASSERT_DIE(atomic_exchange_explicit(&nloc.last->next, pushed, memory_order_acq_rel) == NULL);
nloc.last = pushed;
if (!nloc.first)
nloc.first = pushed;
return NULL;
atomic_store_explicit(npe, nloc, memory_order_release);
}
static void
rte_export(struct rt_export_hook *hook, struct rt_pending_export *rpe)
{
/* Seen already? */
if (bmap_test(&hook->seq_map, rpe->seq))
return;
const net_addr *n = rpe->new_best ? rpe->new_best->net : rpe->old_best->net;
/* Check export eligibility of this net */
if (!rt_prefilter_net(&hook->req->prefilter, n))
return;
if (hook->req->prefilter.mode == TE_ADDR_FOR)
bug("Continuos export of best prefix match not implemented yet.");
if (rpe->new)
hook->stats.updates_received++;
else
hook->stats.withdraws_received++;
if (rpe->old)
ASSERT_DIE(rpe->old->flags & REF_OBSOLETE);
if (hook->req->export_one)
hook->req->export_one(hook->req, n, rpe);
else if (hook->req->export_bulk)
{
uint count = 0;
const rte **feed = NULL;
const SKIP_BACK_DECLARE(struct netindex, i, addr, (net_addr (*)[0]) n);
ASSERT_DIE(i->index < atomic_load_explicit(&hook->tab->routes_block_size, memory_order_relaxed));
struct rt_pending_export *last;
{
RT_READ(hook->tab, tr);
/* Get the route block. */
net *routes = atomic_load_explicit(&tr->t->routes, memory_order_acquire);
net *net = &routes[i->index];
/* Get the feed itself. It may change under our hands tho. */
last = atomic_load_explicit(&net->last, memory_order_acquire);
count = rte_feed_count(tr, net);
if (count)
{
feed = alloca(count * sizeof(rte *));
rte_feed_obtain(tr, net, feed, count);
}
/* Check that it indeed didn't change and the last export is still the same. */
if (last != atomic_load_explicit(&net->last, memory_order_acquire))
RT_READ_RETRY(tr);
}
hook->req->export_bulk(hook->req, n, rpe, last, feed, count);
}
else
bug("Export request must always provide an export method");
}
/**
* rte_announce - announce a routing table change
* @tab: table the route has been added to
* @net: network in question
* @new: the new or changed route
* @old: the previous route replaced by the new one
* @new_best: the new best route for the same network
* @old_best: the previous best route for the same network
*
* This function gets a routing table update and announces it to all protocols
* that are connected to the same table by their channels.
*
* There are two ways of how routing table changes are announced. First, there
* is a change of just one route in @net (which may caused a change of the best
* route of the network). In this case @new and @old describes the changed route
* and @new_best and @old_best describes best routes. Other routes are not
* affected, but in sorted table the order of other routes might change.
*
* The function announces the change to all associated channels. For each
* channel, an appropriate preprocessing is done according to channel &ra_mode.
* For example, %RA_OPTIMAL channels receive just changes of best routes.
*
* In general, we first call preexport() hook of a protocol, which performs
* basic checks on the route (each protocol has a right to veto or force accept
* of the route before any filter is asked). Then we consult an export filter
* of the channel and verify the old route in an export map of the channel.
* Finally, the rt_notify() hook of the protocol gets called.
*
* Note that there are also calls of rt_notify() hooks due to feed, but that is
* done outside of scope of rte_announce().
*/
static void
rte_announce(struct rtable_private *tab, const struct netindex *i, net *net, const rte *new, const rte *old,
rte_announce(struct rtable_private *tab, const struct netindex *i UNUSED, net *net, const rte *new, const rte *old,
const rte *new_best, const rte *old_best)
{
/* Update network count */
@@ -1257,21 +1169,9 @@ rte_announce(struct rtable_private *tab, const struct netindex *i, net *net, con
if (old_best_valid)
old_best->sender->stats.pref--;
SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, lfjour_push_prepare(&tab->journal));
if (!rpe)
if (!lfjour_count_recipients(&tab->export_all.journal) &&
!lfjour_count_recipients(&tab->export_best.journal))
{
rt_trace(tab, D_ROUTES, "Not announcing %N, "
"new=%p id %u from %s, "
"old=%p id %u from %s, "
"new_best=%p id %u, "
"old_best=%p id %u (no exporter present)",
i->addr,
new, new ? new->id : 0, new ? new->sender->req->name : NULL,
old, old ? old->id : 0, old ? old->sender->req->name : NULL,
new_best, new_best ? new_best->id : 0,
old_best, old_best ? old_best->id : 0);
/* Not announcing, can free old route immediately */
if (old)
{
hmap_clear(&tab->id_map, old->id);
@@ -1280,59 +1180,12 @@ rte_announce(struct rtable_private *tab, const struct netindex *i, net *net, con
return;
}
rt_trace(tab, D_ROUTES, "Announcing %N, "
"new=%p id %u from %s, "
"old=%p id %u from %s, "
"new_best=%p id %u, "
"old_best=%p id %u seq=%lu",
i->addr,
new, new ? new->id : 0, new ? new->sender->req->name : NULL,
old, old ? old->id : 0, old ? old->sender->req->name : NULL,
new_best, new_best ? new_best->id : 0,
old_best, old_best ? old_best->id : 0,
rpe->li.seq);
*rpe = (struct rt_pending_export) {
.li = rpe->li, /* Keep the item's internal state */
.new = new,
.new_best = new_best,
.old = old,
.old_best = old_best,
};
lfjour_push_commit(&tab->journal);
/* Append to the same-network squasher list */
struct rt_pending_export *last = atomic_load_explicit(&net->last, memory_order_relaxed);
if (last)
{
struct rt_pending_export *rpenull = NULL;
ASSERT_DIE(atomic_compare_exchange_strong_explicit(
&last->next, &rpenull, rpe,
memory_order_release,
memory_order_relaxed));
}
ASSERT_DIE(atomic_compare_exchange_strong_explicit(
&net->last, &last, rpe,
memory_order_release,
memory_order_relaxed));
struct rt_pending_export *rpenull = NULL;
atomic_compare_exchange_strong_explicit(
&net->first, &rpenull, rpe,
memory_order_release,
memory_order_relaxed);
rte_announce_to(&tab->export_all, &net->all, new, old);
rte_announce_to(&tab->export_best, &net->best, new_best, old_best);
rt_check_cork_high(tab);
}
static inline void
rt_send_export_event(struct rt_export_hook *hook)
{
ev_send(hook->req->list, hook->event);
}
static void
rt_cleanup_export(struct lfjour *j, struct lfjour_item *i)
{