2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 * Based on the following paper by Josh Triplett, Paul E. McKenney
8 * and Jonathan Walpole:
9 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
11 * Code partially derived from nft_hash
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #ifndef _LINUX_RHASHTABLE_H
19 #define _LINUX_RHASHTABLE_H
21 #include <linux/compiler.h>
22 #include <linux/list_nulls.h>
23 #include <linux/workqueue.h>
24 #include <linux/mutex.h>
27 * The end of the chain is marked with a special nulls marks which has
28 * the following format:
30 * +-------+-----------------------------------------------------+-+
32 * +-------+-----------------------------------------------------+-+
34 * Base (4 bits) : Reserved to distinguish between multiple tables.
35 * Specified via &struct rhashtable_params.nulls_base.
36 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
37 * 1 (1 bit) : Nulls marker (always set)
39 * The remaining bits of the next pointer remain unused for now.
41 #define RHT_BASE_BITS 4
42 #define RHT_HASH_BITS 27
43 #define RHT_BASE_SHIFT RHT_HASH_BITS
46 struct rhash_head __rcu *next;
50 * struct bucket_table - Table of hash buckets
51 * @size: Number of hash buckets
52 * @rehash: Current bucket being rehashed
53 * @hash_rnd: Random seed to fold into hash
54 * @locks_mask: Mask to apply before accessing locks[]
55 * @locks: Array of spinlocks protecting individual buckets
56 * @walkers: List of active walkers
57 * @rcu: RCU structure for freeing the table
58 * @future_tbl: Table under construction during rehashing
59 * @buckets: size * hash buckets
65 unsigned int locks_mask;
67 struct list_head walkers;
70 struct bucket_table __rcu *future_tbl;
72 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
75 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
76 typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
81 * struct rhashtable_params - Hash table construction parameters
82 * @nelem_hint: Hint on number of elements, should be 75% of desired size
83 * @key_len: Length of key
84 * @key_offset: Offset of key in struct to be hashed
85 * @head_offset: Offset of rhash_head in struct to be hashed
86 * @max_shift: Maximum number of shifts while expanding
87 * @min_shift: Minimum number of shifts while shrinking
88 * @nulls_base: Base value to generate nulls marker
89 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
90 * @hashfn: Function to hash key
91 * @obj_hashfn: Function to hash object
93 struct rhashtable_params {
103 rht_obj_hashfn_t obj_hashfn;
107 * struct rhashtable - Hash table handle
109 * @nelems: Number of elements in table
110 * @p: Configuration parameters
111 * @run_work: Deferred worker to expand/shrink asynchronously
112 * @mutex: Mutex to protect current/future table swapping
113 * @being_destroyed: True if table is set up for destruction
116 struct bucket_table __rcu *tbl;
118 bool being_destroyed;
119 struct rhashtable_params p;
120 struct work_struct run_work;
125 * struct rhashtable_walker - Hash table walker
126 * @list: List entry on list of walkers
127 * @tbl: The table that we were walking over
129 struct rhashtable_walker {
130 struct list_head list;
131 struct bucket_table *tbl;
135 * struct rhashtable_iter - Hash table iterator, fits into netlink cb
136 * @ht: Table to iterate through
137 * @p: Current pointer
138 * @walker: Associated rhashtable walker
139 * @slot: Current slot
140 * @skip: Number of entries to skip in slot
142 struct rhashtable_iter {
143 struct rhashtable *ht;
144 struct rhash_head *p;
145 struct rhashtable_walker *walker;
150 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
152 return NULLS_MARKER(ht->p.nulls_base + hash);
155 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
156 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
158 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
160 return ((unsigned long) ptr & 1);
163 static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
165 return ((unsigned long) ptr) >> 1;
168 #ifdef CONFIG_PROVE_LOCKING
169 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
170 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
172 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
177 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
182 #endif /* CONFIG_PROVE_LOCKING */
184 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
186 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
187 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
189 int rhashtable_expand(struct rhashtable *ht);
190 int rhashtable_shrink(struct rhashtable *ht);
192 void *rhashtable_lookup(struct rhashtable *ht, const void *key);
193 void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
194 bool (*compare)(void *, void *), void *arg);
196 bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
197 bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
198 struct rhash_head *obj,
199 bool (*compare)(void *, void *),
202 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
203 void rhashtable_walk_exit(struct rhashtable_iter *iter);
204 int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
205 void *rhashtable_walk_next(struct rhashtable_iter *iter);
206 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
208 void rhashtable_destroy(struct rhashtable *ht);
210 #define rht_dereference(p, ht) \
211 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
213 #define rht_dereference_rcu(p, ht) \
214 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
216 #define rht_dereference_bucket(p, tbl, hash) \
217 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
219 #define rht_dereference_bucket_rcu(p, tbl, hash) \
220 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
222 #define rht_entry(tpos, pos, member) \
223 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
226 * rht_for_each_continue - continue iterating over hash chain
227 * @pos: the &struct rhash_head to use as a loop cursor.
228 * @head: the previous &struct rhash_head to continue from
229 * @tbl: the &struct bucket_table
230 * @hash: the hash value / bucket index
232 #define rht_for_each_continue(pos, head, tbl, hash) \
233 for (pos = rht_dereference_bucket(head, tbl, hash); \
234 !rht_is_a_nulls(pos); \
235 pos = rht_dereference_bucket((pos)->next, tbl, hash))
238 * rht_for_each - iterate over hash chain
239 * @pos: the &struct rhash_head to use as a loop cursor.
240 * @tbl: the &struct bucket_table
241 * @hash: the hash value / bucket index
243 #define rht_for_each(pos, tbl, hash) \
244 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
247 * rht_for_each_entry_continue - continue iterating over hash chain
248 * @tpos: the type * to use as a loop cursor.
249 * @pos: the &struct rhash_head to use as a loop cursor.
250 * @head: the previous &struct rhash_head to continue from
251 * @tbl: the &struct bucket_table
252 * @hash: the hash value / bucket index
253 * @member: name of the &struct rhash_head within the hashable struct.
255 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
256 for (pos = rht_dereference_bucket(head, tbl, hash); \
257 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
258 pos = rht_dereference_bucket((pos)->next, tbl, hash))
261 * rht_for_each_entry - iterate over hash chain of given type
262 * @tpos: the type * to use as a loop cursor.
263 * @pos: the &struct rhash_head to use as a loop cursor.
264 * @tbl: the &struct bucket_table
265 * @hash: the hash value / bucket index
266 * @member: name of the &struct rhash_head within the hashable struct.
268 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
269 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
273 * rht_for_each_entry_safe - safely iterate over hash chain of given type
274 * @tpos: the type * to use as a loop cursor.
275 * @pos: the &struct rhash_head to use as a loop cursor.
276 * @next: the &struct rhash_head to use as next in loop cursor.
277 * @tbl: the &struct bucket_table
278 * @hash: the hash value / bucket index
279 * @member: name of the &struct rhash_head within the hashable struct.
281 * This hash chain list-traversal primitive allows for the looped code to
282 * remove the loop cursor from the list.
284 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
285 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
286 next = !rht_is_a_nulls(pos) ? \
287 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
288 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
290 next = !rht_is_a_nulls(pos) ? \
291 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
294 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
295 * @pos: the &struct rhash_head to use as a loop cursor.
296 * @head: the previous &struct rhash_head to continue from
297 * @tbl: the &struct bucket_table
298 * @hash: the hash value / bucket index
300 * This hash chain list-traversal primitive may safely run concurrently with
301 * the _rcu mutation primitives such as rhashtable_insert() as long as the
302 * traversal is guarded by rcu_read_lock().
304 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
305 for (({barrier(); }), \
306 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
307 !rht_is_a_nulls(pos); \
308 pos = rcu_dereference_raw(pos->next))
311 * rht_for_each_rcu - iterate over rcu hash chain
312 * @pos: the &struct rhash_head to use as a loop cursor.
313 * @tbl: the &struct bucket_table
314 * @hash: the hash value / bucket index
316 * This hash chain list-traversal primitive may safely run concurrently with
317 * the _rcu mutation primitives such as rhashtable_insert() as long as the
318 * traversal is guarded by rcu_read_lock().
320 #define rht_for_each_rcu(pos, tbl, hash) \
321 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
324 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
325 * @tpos: the type * to use as a loop cursor.
326 * @pos: the &struct rhash_head to use as a loop cursor.
327 * @head: the previous &struct rhash_head to continue from
328 * @tbl: the &struct bucket_table
329 * @hash: the hash value / bucket index
330 * @member: name of the &struct rhash_head within the hashable struct.
332 * This hash chain list-traversal primitive may safely run concurrently with
333 * the _rcu mutation primitives such as rhashtable_insert() as long as the
334 * traversal is guarded by rcu_read_lock().
336 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
337 for (({barrier(); }), \
338 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
339 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
340 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
343 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
344 * @tpos: the type * to use as a loop cursor.
345 * @pos: the &struct rhash_head to use as a loop cursor.
346 * @tbl: the &struct bucket_table
347 * @hash: the hash value / bucket index
348 * @member: name of the &struct rhash_head within the hashable struct.
350 * This hash chain list-traversal primitive may safely run concurrently with
351 * the _rcu mutation primitives such as rhashtable_insert() as long as the
352 * traversal is guarded by rcu_read_lock().
354 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
355 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
358 #endif /* _LINUX_RHASHTABLE_H */