2 * Definitions for the 'struct ptr_ring' datastructure.
5 * Michael S. Tsirkin <mst@redhat.com>
7 * Copyright (C) 2016 Red Hat, Inc.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
14 * This is a limited-size FIFO maintaining pointers in FIFO order, with
15 * one CPU producing entries and another consuming entries from a FIFO.
17 * This implementation tries to minimize cache-contention when there is a
18 * single producer and a single consumer CPU.
21 #ifndef _LINUX_PTR_RING_H
22 #define _LINUX_PTR_RING_H 1
25 #include <linux/spinlock.h>
26 #include <linux/cache.h>
27 #include <linux/types.h>
28 #include <linux/compiler.h>
29 #include <linux/cache.h>
30 #include <linux/slab.h>
31 #include <asm/errno.h>
35 int producer ____cacheline_aligned_in_smp;
36 spinlock_t producer_lock;
37 int consumer ____cacheline_aligned_in_smp;
38 spinlock_t consumer_lock;
39 /* Shared consumer/producer data */
40 /* Read-only by both the producer and the consumer */
41 int size ____cacheline_aligned_in_smp; /* max entries in queue */
45 /* Note: callers invoking this in a loop must use a compiler barrier,
46 * for example cpu_relax(). If ring is ever resized, callers must hold
47 * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold
48 * producer_lock, the next call to __ptr_ring_produce may fail.
50 static inline bool __ptr_ring_full(struct ptr_ring *r)
52 return r->queue[r->producer];
55 static inline bool ptr_ring_full(struct ptr_ring *r)
59 spin_lock(&r->producer_lock);
60 ret = __ptr_ring_full(r);
61 spin_unlock(&r->producer_lock);
66 static inline bool ptr_ring_full_irq(struct ptr_ring *r)
70 spin_lock_irq(&r->producer_lock);
71 ret = __ptr_ring_full(r);
72 spin_unlock_irq(&r->producer_lock);
77 static inline bool ptr_ring_full_any(struct ptr_ring *r)
82 spin_lock_irqsave(&r->producer_lock, flags);
83 ret = __ptr_ring_full(r);
84 spin_unlock_irqrestore(&r->producer_lock, flags);
89 static inline bool ptr_ring_full_bh(struct ptr_ring *r)
93 spin_lock_bh(&r->producer_lock);
94 ret = __ptr_ring_full(r);
95 spin_unlock_bh(&r->producer_lock);
100 /* Note: callers invoking this in a loop must use a compiler barrier,
101 * for example cpu_relax(). Callers must hold producer_lock.
103 static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
105 if (unlikely(!r->size) || r->queue[r->producer])
108 r->queue[r->producer++] = ptr;
109 if (unlikely(r->producer >= r->size))
115 * Note: resize (below) nests producer lock within consumer lock, so if you
116 * consume in interrupt or BH context, you must disable interrupts/BH when
119 static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
123 spin_lock(&r->producer_lock);
124 ret = __ptr_ring_produce(r, ptr);
125 spin_unlock(&r->producer_lock);
130 static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr)
134 spin_lock_irq(&r->producer_lock);
135 ret = __ptr_ring_produce(r, ptr);
136 spin_unlock_irq(&r->producer_lock);
141 static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr)
146 spin_lock_irqsave(&r->producer_lock, flags);
147 ret = __ptr_ring_produce(r, ptr);
148 spin_unlock_irqrestore(&r->producer_lock, flags);
153 static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
157 spin_lock_bh(&r->producer_lock);
158 ret = __ptr_ring_produce(r, ptr);
159 spin_unlock_bh(&r->producer_lock);
164 /* Note: callers invoking this in a loop must use a compiler barrier,
165 * for example cpu_relax(). Callers must take consumer_lock
166 * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
167 * If ring is never resized, and if the pointer is merely
168 * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
170 static inline void *__ptr_ring_peek(struct ptr_ring *r)
173 return r->queue[r->consumer];
177 /* Note: callers invoking this in a loop must use a compiler barrier,
178 * for example cpu_relax(). Callers must take consumer_lock
179 * if the ring is ever resized - see e.g. ptr_ring_empty.
181 static inline bool __ptr_ring_empty(struct ptr_ring *r)
183 return !__ptr_ring_peek(r);
186 static inline bool ptr_ring_empty(struct ptr_ring *r)
190 spin_lock(&r->consumer_lock);
191 ret = __ptr_ring_empty(r);
192 spin_unlock(&r->consumer_lock);
197 static inline bool ptr_ring_empty_irq(struct ptr_ring *r)
201 spin_lock_irq(&r->consumer_lock);
202 ret = __ptr_ring_empty(r);
203 spin_unlock_irq(&r->consumer_lock);
208 static inline bool ptr_ring_empty_any(struct ptr_ring *r)
213 spin_lock_irqsave(&r->consumer_lock, flags);
214 ret = __ptr_ring_empty(r);
215 spin_unlock_irqrestore(&r->consumer_lock, flags);
220 static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
224 spin_lock_bh(&r->consumer_lock);
225 ret = __ptr_ring_empty(r);
226 spin_unlock_bh(&r->consumer_lock);
231 /* Must only be called after __ptr_ring_peek returned !NULL */
232 static inline void __ptr_ring_discard_one(struct ptr_ring *r)
234 r->queue[r->consumer++] = NULL;
235 if (unlikely(r->consumer >= r->size))
239 static inline void *__ptr_ring_consume(struct ptr_ring *r)
243 ptr = __ptr_ring_peek(r);
245 __ptr_ring_discard_one(r);
251 * Note: resize (below) nests producer lock within consumer lock, so if you
252 * call this in interrupt or BH context, you must disable interrupts/BH when
255 static inline void *ptr_ring_consume(struct ptr_ring *r)
259 spin_lock(&r->consumer_lock);
260 ptr = __ptr_ring_consume(r);
261 spin_unlock(&r->consumer_lock);
266 static inline void *ptr_ring_consume_irq(struct ptr_ring *r)
270 spin_lock_irq(&r->consumer_lock);
271 ptr = __ptr_ring_consume(r);
272 spin_unlock_irq(&r->consumer_lock);
277 static inline void *ptr_ring_consume_any(struct ptr_ring *r)
282 spin_lock_irqsave(&r->consumer_lock, flags);
283 ptr = __ptr_ring_consume(r);
284 spin_unlock_irqrestore(&r->consumer_lock, flags);
289 static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
293 spin_lock_bh(&r->consumer_lock);
294 ptr = __ptr_ring_consume(r);
295 spin_unlock_bh(&r->consumer_lock);
300 /* Cast to structure type and call a function without discarding from FIFO.
301 * Function must return a value.
302 * Callers must take consumer_lock.
304 #define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r)))
306 #define PTR_RING_PEEK_CALL(r, f) ({ \
307 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
309 spin_lock(&(r)->consumer_lock); \
310 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
311 spin_unlock(&(r)->consumer_lock); \
312 __PTR_RING_PEEK_CALL_v; \
315 #define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \
316 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
318 spin_lock_irq(&(r)->consumer_lock); \
319 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
320 spin_unlock_irq(&(r)->consumer_lock); \
321 __PTR_RING_PEEK_CALL_v; \
324 #define PTR_RING_PEEK_CALL_BH(r, f) ({ \
325 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
327 spin_lock_bh(&(r)->consumer_lock); \
328 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
329 spin_unlock_bh(&(r)->consumer_lock); \
330 __PTR_RING_PEEK_CALL_v; \
333 #define PTR_RING_PEEK_CALL_ANY(r, f) ({ \
334 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
335 unsigned long __PTR_RING_PEEK_CALL_f;\
337 spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
338 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
339 spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
340 __PTR_RING_PEEK_CALL_v; \
343 static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
345 return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
348 static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
350 r->queue = __ptr_ring_init_queue_alloc(size, gfp);
355 r->producer = r->consumer = 0;
356 spin_lock_init(&r->producer_lock);
357 spin_lock_init(&r->consumer_lock);
362 static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
364 void (*destroy)(void *))
370 while ((ptr = __ptr_ring_consume(r)))
372 queue[producer++] = ptr;
377 r->producer = producer;
386 * Note: producer lock is nested within consumer lock, so if you
387 * resize you must make sure all uses nest correctly.
388 * In particular if you consume ring in interrupt or BH context, you must
389 * disable interrupts/BH when doing so.
391 static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
392 void (*destroy)(void *))
395 void **queue = __ptr_ring_init_queue_alloc(size, gfp);
401 spin_lock_irqsave(&(r)->consumer_lock, flags);
402 spin_lock(&(r)->producer_lock);
404 old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
406 spin_unlock(&(r)->producer_lock);
407 spin_unlock_irqrestore(&(r)->consumer_lock, flags);
415 * Note: producer lock is nested within consumer lock, so if you
416 * resize you must make sure all uses nest correctly.
417 * In particular if you consume ring in interrupt or BH context, you must
418 * disable interrupts/BH when doing so.
420 static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
422 gfp_t gfp, void (*destroy)(void *))
428 queues = kmalloc(nrings * sizeof *queues, gfp);
432 for (i = 0; i < nrings; ++i) {
433 queues[i] = __ptr_ring_init_queue_alloc(size, gfp);
438 for (i = 0; i < nrings; ++i) {
439 spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
440 spin_lock(&(rings[i])->producer_lock);
441 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
443 spin_unlock(&(rings[i])->producer_lock);
444 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
447 for (i = 0; i < nrings; ++i)
464 static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
469 while ((ptr = ptr_ring_consume(r)))
474 #endif /* _LINUX_PTR_RING_H */