From 1bc5acd12047e27b02134f6ebfb56e9da0d48d08 Mon Sep 17 00:00:00 2001 From: Maria Matejka Date: Tue, 5 Mar 2024 13:57:11 +0100 Subject: [PATCH] Lockfree usecount: deferring unlocks to the metaloop This allows us for easy temporary locks without additional burden of explicit cleanup. --- lib/io-loop.h | 1 + lib/lockfree.c | 12 +++++++++++ lib/lockfree.h | 49 ++++++++++++++++++++++++++++++++++++++++--- nest/mpls.c | 4 ++-- sysdep/unix/io-loop.c | 3 +++ 5 files changed, 64 insertions(+), 5 deletions(-) diff --git a/lib/io-loop.h b/lib/io-loop.h index 80cd2ea2..4a66ca6e 100644 --- a/lib/io-loop.h +++ b/lib/io-loop.h @@ -15,6 +15,7 @@ #include "lib/socket.h" extern struct birdloop main_birdloop; +extern _Thread_local struct birdloop *meta_birdloop; /* Start a new birdloop owned by given pool and domain */ struct birdloop *birdloop_new(pool *p, uint order, btime max_latency, const char *fmt, ...); diff --git a/lib/lockfree.c b/lib/lockfree.c index e3fb33ac..bdb684ba 100644 --- a/lib/lockfree.c +++ b/lib/lockfree.c @@ -12,6 +12,18 @@ #define LOCAL_DEBUG +_Thread_local struct lfuc_unlock_queue *lfuc_unlock_queue; + +void lfuc_unlock_deferred(void *_q) +{ + struct lfuc_unlock_queue *q = _q; + for (u32 i = 0; i < q->pos; i++) + lfuc_unlock_immediately(q->block[i].c, q->block[i].el, q->block[i].ev); + + free_page(q); + lfuc_unlock_queue = NULL; +} + #if 0 #define lfjour_debug(...) log(L_TRACE __VA_ARGS__) #define lfjour_debug_detailed(...) log(L_TRACE __VA_ARGS__) diff --git a/lib/lockfree.h b/lib/lockfree.h index 87a6e1bb..e08608e5 100644 --- a/lib/lockfree.h +++ b/lib/lockfree.h @@ -14,6 +14,7 @@ #include "lib/rcu.h" #include "lib/settle.h" #include "lib/tlists.h" +#include "lib/io-loop.h" #include @@ -57,7 +58,7 @@ static inline u64 lfuc_lock_revive(struct lfuc *c) } /** - * lfuc_unlock - decrease an atomic usecount + * lfuc_unlock_immediately - decrease an atomic usecount * @c: the usecount structure * @el: prune event list * @ev: prune event itself @@ -65,7 +66,7 @@ static inline u64 lfuc_lock_revive(struct lfuc *c) * If the usecount reaches zero, a prune event is run to possibly free the object. * The prune event MUST use lfuc_finished() to check the object state. */ -static inline u64 lfuc_unlock(struct lfuc *c, event_list *el, event *ev) +static inline void lfuc_unlock_immediately(struct lfuc *c, event_list *el, event *ev) { /* Unlocking is tricky. We do it lockless so at the same time, the prune * event may be running, therefore if the unlock gets us to zero, it must be @@ -112,7 +113,49 @@ static inline u64 lfuc_unlock(struct lfuc *c, event_list *el, event *ev) * RCU synchronization instead of a busy loop. */ rcu_read_unlock(); - return uc - LFUC_IN_PROGRESS - 1; +// return uc - LFUC_IN_PROGRESS - 1; +} + +extern _Thread_local struct lfuc_unlock_queue { + event e; + u32 pos; + struct lfuc_unlock_queue_block { + struct lfuc *c; + event_list *el; + event *ev; + } block[0]; +} *lfuc_unlock_queue; + +void lfuc_unlock_deferred(void *queue); + +static inline void lfuc_unlock(struct lfuc *c, event_list *el, event *ev) +{ + static u32 queue_items = 0; + if (queue_items == 0) + { + ASSERT_DIE((u64) page_size > sizeof(struct lfuc_unlock_queue) + sizeof(struct lfuc_unlock_queue_block)); + queue_items = (page_size - OFFSETOF(struct lfuc_unlock_queue, block)) + / sizeof lfuc_unlock_queue->block[0]; + } + + if (!lfuc_unlock_queue || (lfuc_unlock_queue->pos >= queue_items)) + { + lfuc_unlock_queue = alloc_page(); + *lfuc_unlock_queue = (struct lfuc_unlock_queue) { + .e = { + .hook = lfuc_unlock_deferred, + .data = lfuc_unlock_queue, + }, + }; + + ev_send_loop(meta_birdloop, &lfuc_unlock_queue->e); + } + + lfuc_unlock_queue->block[lfuc_unlock_queue->pos++] = (struct lfuc_unlock_queue_block) { + .c = c, + .el = el, + .ev = ev, + }; } /** diff --git a/nest/mpls.c b/nest/mpls.c index bf7caab7..5400bcba 100644 --- a/nest/mpls.c +++ b/nest/mpls.c @@ -1196,8 +1196,8 @@ inline void mpls_lock_fec(struct mpls_fec *fec) inline void mpls_unlock_fec(struct mpls_fec *fec) { - UNUSED u64 s = lfuc_unlock(&fec->uc, birdloop_event_list(fec->map->loop), fec->map->cleanup_event); - DBGL("Unlocked FEC %p %u, now %lu", fec, fec->label, s); + lfuc_unlock(&fec->uc, birdloop_event_list(fec->map->loop), fec->map->cleanup_event); + DBGL("Unlocked FEC %p %u (deferred)", fec, fec->label); } static inline void diff --git a/sysdep/unix/io-loop.c b/sysdep/unix/io-loop.c index 0a222ba0..d9a2e955 100644 --- a/sysdep/unix/io-loop.c +++ b/sysdep/unix/io-loop.c @@ -817,6 +817,7 @@ bird_thread_main(void *arg) account_to(&thr->overhead); birdloop_enter(thr->meta); + meta_birdloop = thr->meta; tmp_init(thr->pool, birdloop_domain(thr->meta)); init_list(&thr->loops); @@ -1369,6 +1370,7 @@ cmd_show_threads(int show_loops) static struct bird_thread main_thread; struct birdloop main_birdloop = { .thread = &main_thread, }; +_Thread_local struct birdloop *meta_birdloop; static void birdloop_enter_locked(struct birdloop *loop); @@ -1396,6 +1398,7 @@ birdloop_init(void) timers_init(&main_birdloop.time, &root_pool); birdloop_enter_locked(&main_birdloop); + meta_birdloop = &main_birdloop; } static void