96 #include <sys/queue.h> 106 #define RTE_TAILQ_RING_NAME "RTE_RING" 108 enum rte_ring_queue_behavior {
109 RTE_RING_QUEUE_FIXED = 0,
110 RTE_RING_QUEUE_VARIABLE
113 #define RTE_RING_MZ_PREFIX "RG_" 115 #define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \ 116 sizeof(RTE_RING_MZ_PREFIX) + 1) 120 #if RTE_CACHE_LINE_SIZE < 128 121 #define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2) 122 #define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2) 124 #define PROD_ALIGN RTE_CACHE_LINE_SIZE 125 #define CONS_ALIGN RTE_CACHE_LINE_SIZE 129 struct rte_ring_headtail {
130 volatile uint32_t head;
131 volatile uint32_t tail;
166 #define RING_F_SP_ENQ 0x0001 167 #define RING_F_SC_DEQ 0x0002 176 #define RING_F_EXACT_SZ 0x0004 177 #define RTE_RING_SZ_MASK (0x7fffffffU) 278 int socket_id,
unsigned flags);
300 #define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \ 302 const uint32_t size = (r)->size; \ 303 uint32_t idx = prod_head & (r)->mask; \ 304 obj_type *ring = (obj_type *)ring_start; \ 305 if (likely(idx + n < size)) { \ 306 for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \ 307 ring[idx] = obj_table[i]; \ 308 ring[idx+1] = obj_table[i+1]; \ 309 ring[idx+2] = obj_table[i+2]; \ 310 ring[idx+3] = obj_table[i+3]; \ 314 ring[idx++] = obj_table[i++]; \ 316 ring[idx++] = obj_table[i++]; \ 318 ring[idx++] = obj_table[i++]; \ 321 for (i = 0; idx < size; i++, idx++)\ 322 ring[idx] = obj_table[i]; \ 323 for (idx = 0; i < n; i++, idx++) \ 324 ring[idx] = obj_table[i]; \ 331 #define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \ 333 uint32_t idx = cons_head & (r)->mask; \ 334 const uint32_t size = (r)->size; \ 335 obj_type *ring = (obj_type *)ring_start; \ 336 if (likely(idx + n < size)) { \ 337 for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\ 338 obj_table[i] = ring[idx]; \ 339 obj_table[i+1] = ring[idx+1]; \ 340 obj_table[i+2] = ring[idx+2]; \ 341 obj_table[i+3] = ring[idx+3]; \ 345 obj_table[i++] = ring[idx++]; \ 347 obj_table[i++] = ring[idx++]; \ 349 obj_table[i++] = ring[idx++]; \ 352 for (i = 0; idx < size; i++, idx++) \ 353 obj_table[i] = ring[idx]; \ 354 for (idx = 0; i < n; i++, idx++) \ 355 obj_table[i] = ring[idx]; \ 360 update_tail(
struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
368 while (
unlikely(ht->tail != old_val))
398 __rte_ring_move_prod_head(
struct rte_ring *r,
int is_sp,
399 unsigned int n,
enum rte_ring_queue_behavior behavior,
400 uint32_t *old_head, uint32_t *new_head,
401 uint32_t *free_entries)
404 unsigned int max = n;
411 *old_head = r->prod.head;
418 const uint32_t cons_tail = r->cons.tail;
425 *free_entries = (capacity + cons_tail - *old_head);
429 n = (behavior == RTE_RING_QUEUE_FIXED) ?
435 *new_head = *old_head + n;
437 r->prod.head = *new_head, success = 1;
440 *old_head, *new_head);
466 __rte_ring_do_enqueue(
struct rte_ring *r,
void *
const *obj_table,
467 unsigned int n,
enum rte_ring_queue_behavior behavior,
468 int is_sp,
unsigned int *free_space)
470 uint32_t prod_head, prod_next;
471 uint32_t free_entries;
473 n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
474 &prod_head, &prod_next, &free_entries);
478 ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n,
void *);
481 update_tail(&r->prod, prod_head, prod_next, is_sp);
483 if (free_space != NULL)
484 *free_space = free_entries - n;
512 __rte_ring_move_cons_head(
struct rte_ring *r,
int is_sc,
513 unsigned int n,
enum rte_ring_queue_behavior behavior,
514 uint32_t *old_head, uint32_t *new_head,
517 unsigned int max = n;
525 *old_head = r->cons.head;
532 const uint32_t prod_tail = r->prod.tail;
537 *entries = (prod_tail - *old_head);
541 n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
546 *new_head = *old_head + n;
548 r->cons.head = *new_head, success = 1;
577 __rte_ring_do_dequeue(
struct rte_ring *r,
void **obj_table,
578 unsigned int n,
enum rte_ring_queue_behavior behavior,
579 int is_sc,
unsigned int *available)
581 uint32_t cons_head, cons_next;
584 n = __rte_ring_move_cons_head(r, is_sc, n, behavior,
585 &cons_head, &cons_next, &entries);
589 DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n,
void *);
592 update_tail(&r->cons, cons_head, cons_next, is_sc);
595 if (available != NULL)
596 *available = entries - n;
620 unsigned int n,
unsigned int *free_space)
622 return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
623 __IS_MP, free_space);
643 unsigned int n,
unsigned int *free_space)
645 return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
646 __IS_SP, free_space);
670 unsigned int n,
unsigned int *free_space)
672 return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
673 r->prod.single, free_space);
754 unsigned int n,
unsigned int *available)
756 return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
778 unsigned int n,
unsigned int *available)
780 return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
805 unsigned int *available)
807 return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
808 r->cons.single, available);
880 static inline unsigned 883 uint32_t prod_tail = r->prod.tail;
884 uint32_t cons_tail = r->cons.tail;
885 uint32_t count = (prod_tail - cons_tail) & r->
mask;
897 static inline unsigned 943 static inline unsigned int 957 static inline unsigned int 1003 unsigned int n,
unsigned int *free_space)
1005 return __rte_ring_do_enqueue(r, obj_table, n,
1006 RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
1026 unsigned int n,
unsigned int *free_space)
1028 return __rte_ring_do_enqueue(r, obj_table, n,
1029 RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
1053 unsigned int n,
unsigned int *free_space)
1055 return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
1056 r->prod.single, free_space);
1081 unsigned int n,
unsigned int *available)
1083 return __rte_ring_do_dequeue(r, obj_table, n,
1084 RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
1106 unsigned int n,
unsigned int *available)
1108 return __rte_ring_do_dequeue(r, obj_table, n,
1109 RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
1133 unsigned int n,
unsigned int *available)
1135 return __rte_ring_do_dequeue(r, obj_table, n,
1136 RTE_RING_QUEUE_VARIABLE,
1137 r->cons.single, available);
static void rte_smp_rmb(void)
static int rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
#define __rte_always_inline
const struct rte_memzone * memzone
static __rte_always_inline unsigned int rte_ring_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
char name [RTE_MEMZONE_NAMESIZE] __rte_cache_aligned
static __rte_always_inline int rte_ring_dequeue(struct rte_ring *r, void **obj_p)
static __rte_always_inline unsigned rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static int rte_ring_empty(const struct rte_ring *r)
static __rte_always_inline unsigned rte_ring_mp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static __rte_always_inline int rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
void rte_ring_list_dump(FILE *f)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static __rte_always_inline int rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static __rte_always_inline int rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static unsigned int rte_ring_get_capacity(const struct rte_ring *r)
static unsigned int rte_ring_get_size(const struct rte_ring *r)
static __rte_always_inline int rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
void rte_ring_free(struct rte_ring *r)
static void rte_pause(void)
static __rte_always_inline unsigned rte_ring_sp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static void rte_smp_wmb(void)
static __rte_always_inline unsigned rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
void rte_ring_dump(FILE *f, const struct rte_ring *r)
static unsigned rte_ring_count(const struct rte_ring *r)
struct rte_ring * rte_ring_create(const char *name, unsigned count, int socket_id, unsigned flags)
static __rte_always_inline unsigned int rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
struct rte_ring * rte_ring_lookup(const char *name)
static unsigned rte_ring_free_count(const struct rte_ring *r)
static __rte_always_inline int rte_ring_enqueue(struct rte_ring *r, void *obj)
static __rte_always_inline unsigned rte_ring_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
int rte_ring_init(struct rte_ring *r, const char *name, unsigned count, unsigned flags)
static int rte_ring_full(const struct rte_ring *r)
ssize_t rte_ring_get_memsize(unsigned count)
#define RTE_MEMZONE_NAMESIZE