![]() |
The FreeRADIUS server $Id: 15bac2a4c627c01d1aa2047687b3418955ac7f00 $
|
Thread-safe queues. More...
#include <stdint.h>#include <stdalign.h>#include <inttypes.h>#include <stdlib.h>#include <freeradius-devel/autoconf.h>#include <freeradius-devel/io/atomic_queue.h>#include <freeradius-devel/util/math.h>
Include dependency graph for atomic_queue.c:Go to the source code of this file.
Data Structures | |
| struct | fr_atomic_queue_entry_t |
| Entry in the queue. More... | |
| struct | fr_atomic_queue_s |
| Structure to hold the atomic queue. More... | |
| struct | fr_atomic_ring_entry_s |
| struct | fr_atomic_ring_s |
Macros | |
| #define | acquire(_var) atomic_load_explicit(&_var, memory_order_acquire) |
| #define | atomic_int64_t _Atomic(int64_t) |
| #define | atomic_uint32_t _Atomic(uint32_t) |
| #define | atomic_uint64_t _Atomic(uint64_t) |
| #define | CACHE_LINE_SIZE 64 |
| #define | cas_decr(_store, _var) atomic_compare_exchange_strong_explicit(&_store, &_var, _var - 1, memory_order_release, memory_order_relaxed) |
| #define | cas_incr(_store, _var) atomic_compare_exchange_strong_explicit(&_store, &_var, _var + 1, memory_order_release, memory_order_relaxed) |
| #define | load(_var) atomic_load_explicit(&_var, memory_order_relaxed) |
| #define | store(_store, _var) atomic_store_explicit(&_store, _var, memory_order_release) |
Typedefs | |
| typedef struct fr_atomic_ring_entry_s | fr_atomic_ring_entry_t |
Functions | |
| static int | _atomic_ring_free (fr_atomic_ring_t *ring) |
| talloc destructor for fr_atomic_ring_t: walk the chain and free segments | |
| static void | atomic_queue_init (fr_atomic_queue_t *aq, size_t size) |
| Initialise the sequence numbers and head/tail on a fresh queue buffer. | |
| static fr_atomic_ring_entry_t * | atomic_ring_entry_alloc (size_t seg_size) |
| Allocate a fresh segment and its embedded queue. | |
| static void | atomic_ring_entry_free (fr_atomic_ring_entry_t *s) |
| void | fr_atomic_queue_debug (FILE *fp, fr_atomic_queue_t *aq) |
| Dump an atomic queue. | |
| void | fr_atomic_queue_free (fr_atomic_queue_t **aq) |
| Free an atomic queue if it's not freed by ctx. | |
| fr_atomic_queue_t * | fr_atomic_queue_malloc (size_t size) |
| Create fixed-size atomic queue outside any talloc hierarchy. | |
| bool | fr_atomic_queue_pop (fr_atomic_queue_t *aq, void **p_data) |
| Pop a pointer from the atomic queue. | |
| bool | fr_atomic_queue_push (fr_atomic_queue_t *aq, void *data) |
| Push a pointer into the atomic queue. | |
| size_t | fr_atomic_queue_size (fr_atomic_queue_t *aq) |
| fr_atomic_queue_t * | fr_atomic_queue_talloc (TALLOC_CTX *ctx, size_t size) |
| Create fixed-size atomic queue. | |
| fr_atomic_ring_t * | fr_atomic_ring_alloc (TALLOC_CTX *ctx, size_t seg_size) |
| Allocate an empty SPSC ring. | |
| void | fr_atomic_ring_free (fr_atomic_ring_t **ring_p) |
| Free the ring and all remaining segments. | |
| bool | fr_atomic_ring_pop (fr_atomic_ring_t *ring, void **p_data) |
| Pop a pointer from the ring, advancing past drained segments. | |
| bool | fr_atomic_ring_push (fr_atomic_ring_t *ring, void *data) |
| Push a pointer into the ring; allocate a new segment on overflow. | |
Thread-safe queues.
This is an implementation of a bounded MPMC ring buffer with per-slot sequence numbers, described by Dmitry Vyukov.
Definition in file atomic_queue.c.
| struct fr_atomic_queue_entry_t |
Entry in the queue.
Definition at line 62 of file atomic_queue.c.
| Data Fields | ||
|---|---|---|
| void * | data | |
| atomic_int64_t | seq | Must be seq then data to ensure seq is 64bit aligned for 32bit address spaces. |
| struct fr_atomic_queue_s |
Structure to hold the atomic queue.
Definition at line 76 of file atomic_queue.c.
Collaboration diagram for fr_atomic_queue_s:| Data Fields | ||
|---|---|---|
| void * | chunk |
The start of the talloc chunk to pass to free, or NULL if this queue was allocated raw via fr_atomic_queue_malloc. We need to play tricks to get aligned memory with talloc. |
| fr_atomic_queue_entry_t | entry[] | The entry array, also aligned to ensure it's not in the same cache line as tail and size. |
| atomic_int64_t | head |
Position of the producer. Cache aligned bytes to ensure it's in a different cache line to tail to reduce memory contention. |
| size_t | size |
The length of the queue. This is static. Also needs to be cache aligned, otherwise it can end up directly after tail in memory and share a cache line. |
| atomic_int64_t | tail |
Position of the consumer. Cache aligned bytes to ensure it's in a different cache line to tail to reduce memory contention. Reads may still need to occur from size whilst the producer is writing to tail. |
| struct fr_atomic_ring_entry_s |
Definition at line 436 of file atomic_queue.c.
Collaboration diagram for fr_atomic_ring_entry_s:| Data Fields | ||
|---|---|---|
| fr_atomic_queue_t * | q | Per-segment MPMC ring (used SPSC here). |
| struct fr_atomic_ring_s |
Definition at line 443 of file atomic_queue.c.
| Data Fields | ||
|---|---|---|
| size_t | seg_size | Capacity of each segment. |
| #define acquire | ( | _var | ) | atomic_load_explicit(&_var, memory_order_acquire) |
Definition at line 51 of file atomic_queue.c.
Definition at line 44 of file atomic_queue.c.
Definition at line 45 of file atomic_queue.c.
Definition at line 46 of file atomic_queue.c.
| #define CACHE_LINE_SIZE 64 |
Definition at line 54 of file atomic_queue.c.
| #define cas_decr | ( | _store, | |
| _var | |||
| ) | atomic_compare_exchange_strong_explicit(&_store, &_var, _var - 1, memory_order_release, memory_order_relaxed) |
Definition at line 49 of file atomic_queue.c.
| #define cas_incr | ( | _store, | |
| _var | |||
| ) | atomic_compare_exchange_strong_explicit(&_store, &_var, _var + 1, memory_order_release, memory_order_relaxed) |
Definition at line 48 of file atomic_queue.c.
| #define load | ( | _var | ) | atomic_load_explicit(&_var, memory_order_relaxed) |
Definition at line 50 of file atomic_queue.c.
| #define store | ( | _store, | |
| _var | |||
| ) | atomic_store_explicit(&_store, _var, memory_order_release) |
Definition at line 52 of file atomic_queue.c.
| typedef struct fr_atomic_ring_entry_s fr_atomic_ring_entry_t |
Definition at line 434 of file atomic_queue.c.
|
static |
talloc destructor for fr_atomic_ring_t: walk the chain and free segments
Definition at line 480 of file atomic_queue.c.
Here is the call graph for this function:
Here is the caller graph for this function:
|
static |
Initialise the sequence numbers and head/tail on a fresh queue buffer.
Shared between the talloc and raw allocators. The buffer must already be cache-line aligned and sized to hold size entries.
| [in] | aq | The queue buffer to initialise. |
| [in] | size | Entry count, already rounded up to a power of 2. |
Definition at line 112 of file atomic_queue.c.
Here is the caller graph for this function:
|
static |
Allocate a fresh segment and its embedded queue.
Uses the raw (non-talloc) allocator so this function is safe to call from the producer thread even when that thread cannot safely use talloc.
Definition at line 456 of file atomic_queue.c.
Here is the call graph for this function:
Here is the caller graph for this function:
|
static |
Definition at line 473 of file atomic_queue.c.
Here is the call graph for this function:
Here is the caller graph for this function:| void fr_atomic_queue_debug | ( | FILE * | fp, |
| fr_atomic_queue_t * | aq | ||
| ) |
Dump an atomic queue.
Absolutely NOT thread-safe.
| [in] | aq | The atomic queue to debug. |
| [in] | fp | where the debugging information will be printed. |
Definition at line 661 of file atomic_queue.c.
Here is the caller graph for this function:| void fr_atomic_queue_free | ( | fr_atomic_queue_t ** | aq | ) |
Free an atomic queue if it's not freed by ctx.
This function is needed because the atomic queue memory must be cache line aligned, and may live either in a talloc chunk or a raw posix_memalign allocation (aq->chunk == NULL).
Definition at line 212 of file atomic_queue.c.
Here is the call graph for this function:
Here is the caller graph for this function:| fr_atomic_queue_t * fr_atomic_queue_malloc | ( | size_t | size | ) |
Create fixed-size atomic queue outside any talloc hierarchy.
Backed by posix_memalign; the resulting queue is released with fr_atomic_queue_free (which detects the raw allocation via a NULL chunk field) or plain free() on the queue pointer.
Intended for callers that push or pop from threads where talloc is not safe (for example, library-owned callback threads).
| [in] | size | The number of entries in the queue. |
Definition at line 188 of file atomic_queue.c.
Here is the call graph for this function:
Here is the caller graph for this function:| bool fr_atomic_queue_pop | ( | fr_atomic_queue_t * | aq, |
| void ** | p_data | ||
| ) |
Pop a pointer from the atomic queue.
| [in] | aq | the atomic queue to retrieve data from. |
| [out] | p_data | where to write the data. |
Definition at line 355 of file atomic_queue.c.
Here is the caller graph for this function:| bool fr_atomic_queue_push | ( | fr_atomic_queue_t * | aq, |
| void * | data | ||
| ) |
Push a pointer into the atomic queue.
| [in] | aq | The atomic queue to add data to. |
| [in] | data | to push. |
Definition at line 232 of file atomic_queue.c.
Here is the call graph for this function:
Here is the caller graph for this function:| size_t fr_atomic_queue_size | ( | fr_atomic_queue_t * | aq | ) |
| fr_atomic_queue_t * fr_atomic_queue_talloc | ( | TALLOC_CTX * | ctx, |
| size_t | size | ||
| ) |
Create fixed-size atomic queue.
| [in] | ctx | The talloc ctx to allocate the queue in. |
| [in] | size | The number of entries in the queue. |
Definition at line 143 of file atomic_queue.c.
Here is the call graph for this function:
Here is the caller graph for this function:| fr_atomic_ring_t * fr_atomic_ring_alloc | ( | TALLOC_CTX * | ctx, |
| size_t | seg_size | ||
| ) |
Allocate an empty SPSC ring.
| [in] | ctx | talloc ctx that owns the ring handle (segments live outside talloc; they are freed by the ring's destructor). |
| [in] | seg_size | Per-segment capacity. Rounded up to a power of 2. |
Definition at line 504 of file atomic_queue.c.
Here is the call graph for this function:
Here is the caller graph for this function:| void fr_atomic_ring_free | ( | fr_atomic_ring_t ** | ring_p | ) |
Free the ring and all remaining segments.
Equivalent to talloc_free() on the ring, but nulls the caller's handle in the style of fr_atomic_queue_free.
Definition at line 533 of file atomic_queue.c.
Here is the call graph for this function:
Here is the caller graph for this function:| bool fr_atomic_ring_pop | ( | fr_atomic_ring_t * | ring, |
| void ** | p_data | ||
| ) |
Pop a pointer from the ring, advancing past drained segments.
Single-consumer only. Must not be called concurrently with itself.
| [in] | ring | Ring to pop from. |
| [out] | p_data | Where to write the popped value on success. |
Definition at line 595 of file atomic_queue.c.
Here is the call graph for this function:
Here is the caller graph for this function:| bool fr_atomic_ring_push | ( | fr_atomic_ring_t * | ring, |
| void * | data | ||
| ) |
Push a pointer into the ring; allocate a new segment on overflow.
Single-producer only. Must not be called concurrently with itself.
| [in] | ring | Ring to push into. |
| [in] | data | Value to push (must be non-NULL). |
Definition at line 552 of file atomic_queue.c.
Here is the call graph for this function:
Here is the caller graph for this function:
1.9.8