]> git.karo-electronics.de Git - karo-tx-linux.git/blob - lib/rhashtable.c
rhashtable: involve rhashtable_lookup_insert routine
[karo-tx-linux.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6  *
7  * Based on the following paper:
8  * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9  *
10  * Code partially derived from nft_hash
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26
27 #define HASH_DEFAULT_SIZE       64UL
28 #define HASH_MIN_SIZE           4UL
29 #define BUCKET_LOCKS_PER_CPU   128UL
30
31 /* Base bits plus 1 bit for nulls marker */
32 #define HASH_RESERVED_SPACE     (RHT_BASE_BITS + 1)
33
34 enum {
35         RHT_LOCK_NORMAL,
36         RHT_LOCK_NESTED,
37         RHT_LOCK_NESTED2,
38 };
39
40 /* The bucket lock is selected based on the hash and protects mutations
41  * on a group of hash buckets.
42  *
43  * IMPORTANT: When holding the bucket lock of both the old and new table
44  * during expansions and shrinking, the old bucket lock must always be
45  * acquired first.
46  */
47 static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
48 {
49         return &tbl->locks[hash & tbl->locks_mask];
50 }
51
52 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
53 #define ASSERT_BUCKET_LOCK(TBL, HASH) \
54         BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH))
55
56 #ifdef CONFIG_PROVE_LOCKING
57 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
58 {
59         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
60 }
61 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
62
63 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
64 {
65         spinlock_t *lock = bucket_lock(tbl, hash);
66
67         return (debug_locks) ? lockdep_is_held(lock) : 1;
68 }
69 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
70 #endif
71
72 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
73 {
74         return (void *) he - ht->p.head_offset;
75 }
76
77 static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
78 {
79         return hash & (tbl->size - 1);
80 }
81
82 static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
83 {
84         u32 hash;
85
86         if (unlikely(!ht->p.key_len))
87                 hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
88         else
89                 hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
90                                     ht->p.hash_rnd);
91
92         return hash >> HASH_RESERVED_SPACE;
93 }
94
95 static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
96 {
97         struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
98         u32 hash;
99
100         hash = ht->p.hashfn(key, len, ht->p.hash_rnd);
101         hash >>= HASH_RESERVED_SPACE;
102
103         return rht_bucket_index(tbl, hash);
104 }
105
106 static u32 head_hashfn(const struct rhashtable *ht,
107                        const struct bucket_table *tbl,
108                        const struct rhash_head *he)
109 {
110         return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
111 }
112
113 static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
114 {
115         struct rhash_head __rcu **pprev;
116
117         for (pprev = &tbl->buckets[n];
118              !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
119              pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
120                 ;
121
122         return pprev;
123 }
124
125 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
126 {
127         unsigned int i, size;
128 #if defined(CONFIG_PROVE_LOCKING)
129         unsigned int nr_pcpus = 2;
130 #else
131         unsigned int nr_pcpus = num_possible_cpus();
132 #endif
133
134         nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
135         size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
136
137         /* Never allocate more than one lock per bucket */
138         size = min_t(unsigned int, size, tbl->size);
139
140         if (sizeof(spinlock_t) != 0) {
141 #ifdef CONFIG_NUMA
142                 if (size * sizeof(spinlock_t) > PAGE_SIZE)
143                         tbl->locks = vmalloc(size * sizeof(spinlock_t));
144                 else
145 #endif
146                 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
147                                            GFP_KERNEL);
148                 if (!tbl->locks)
149                         return -ENOMEM;
150                 for (i = 0; i < size; i++)
151                         spin_lock_init(&tbl->locks[i]);
152         }
153         tbl->locks_mask = size - 1;
154
155         return 0;
156 }
157
158 static void bucket_table_free(const struct bucket_table *tbl)
159 {
160         if (tbl)
161                 kvfree(tbl->locks);
162
163         kvfree(tbl);
164 }
165
166 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
167                                                size_t nbuckets)
168 {
169         struct bucket_table *tbl;
170         size_t size;
171         int i;
172
173         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
174         tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
175         if (tbl == NULL)
176                 tbl = vzalloc(size);
177
178         if (tbl == NULL)
179                 return NULL;
180
181         tbl->size = nbuckets;
182
183         if (alloc_bucket_locks(ht, tbl) < 0) {
184                 bucket_table_free(tbl);
185                 return NULL;
186         }
187
188         for (i = 0; i < nbuckets; i++)
189                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
190
191         return tbl;
192 }
193
194 /**
195  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
196  * @ht:         hash table
197  * @new_size:   new table size
198  */
199 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
200 {
201         /* Expand table when exceeding 75% load */
202         return atomic_read(&ht->nelems) > (new_size / 4 * 3);
203 }
204 EXPORT_SYMBOL_GPL(rht_grow_above_75);
205
206 /**
207  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
208  * @ht:         hash table
209  * @new_size:   new table size
210  */
211 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
212 {
213         /* Shrink table beneath 30% load */
214         return atomic_read(&ht->nelems) < (new_size * 3 / 10);
215 }
216 EXPORT_SYMBOL_GPL(rht_shrink_below_30);
217
218 static void hashtable_chain_unzip(const struct rhashtable *ht,
219                                   const struct bucket_table *new_tbl,
220                                   struct bucket_table *old_tbl,
221                                   size_t old_hash)
222 {
223         struct rhash_head *he, *p, *next;
224         spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL;
225         unsigned int new_hash, new_hash2;
226
227         ASSERT_BUCKET_LOCK(old_tbl, old_hash);
228
229         /* Old bucket empty, no work needed. */
230         p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
231                                    old_hash);
232         if (rht_is_a_nulls(p))
233                 return;
234
235         new_hash = new_hash2 = head_hashfn(ht, new_tbl, p);
236         new_bucket_lock = bucket_lock(new_tbl, new_hash);
237
238         /* Advance the old bucket pointer one or more times until it
239          * reaches a node that doesn't hash to the same bucket as the
240          * previous node p. Call the previous node p;
241          */
242         rht_for_each_continue(he, p->next, old_tbl, old_hash) {
243                 new_hash2 = head_hashfn(ht, new_tbl, he);
244                 if (new_hash != new_hash2)
245                         break;
246                 p = he;
247         }
248         rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
249
250         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
251
252         /* If we have encountered an entry that maps to a different bucket in
253          * the new table, lock down that bucket as well as we might cut off
254          * the end of the chain.
255          */
256         new_bucket_lock2 = bucket_lock(new_tbl, new_hash);
257         if (new_bucket_lock != new_bucket_lock2)
258                 spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2);
259
260         /* Find the subsequent node which does hash to the same
261          * bucket as node P, or NULL if no such node exists.
262          */
263         INIT_RHT_NULLS_HEAD(next, ht, old_hash);
264         if (!rht_is_a_nulls(he)) {
265                 rht_for_each_continue(he, he->next, old_tbl, old_hash) {
266                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
267                                 next = he;
268                                 break;
269                         }
270                 }
271         }
272
273         /* Set p's next pointer to that subsequent node pointer,
274          * bypassing the nodes which do not hash to p's bucket
275          */
276         rcu_assign_pointer(p->next, next);
277
278         if (new_bucket_lock != new_bucket_lock2)
279                 spin_unlock_bh(new_bucket_lock2);
280         spin_unlock_bh(new_bucket_lock);
281 }
282
283 static void link_old_to_new(struct bucket_table *new_tbl,
284                             unsigned int new_hash, struct rhash_head *entry)
285 {
286         spinlock_t *new_bucket_lock;
287
288         new_bucket_lock = bucket_lock(new_tbl, new_hash);
289
290         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
291         rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
292         spin_unlock_bh(new_bucket_lock);
293 }
294
295 /**
296  * rhashtable_expand - Expand hash table while allowing concurrent lookups
297  * @ht:         the hash table to expand
298  *
299  * A secondary bucket array is allocated and the hash entries are migrated
300  * while keeping them on both lists until the end of the RCU grace period.
301  *
302  * This function may only be called in a context where it is safe to call
303  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
304  *
305  * The caller must ensure that no concurrent resizing occurs by holding
306  * ht->mutex.
307  *
308  * It is valid to have concurrent insertions and deletions protected by per
309  * bucket locks or concurrent RCU protected lookups and traversals.
310  */
311 int rhashtable_expand(struct rhashtable *ht)
312 {
313         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
314         struct rhash_head *he;
315         spinlock_t *old_bucket_lock;
316         unsigned int new_hash, old_hash;
317         bool complete = false;
318
319         ASSERT_RHT_MUTEX(ht);
320
321         if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
322                 return 0;
323
324         new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
325         if (new_tbl == NULL)
326                 return -ENOMEM;
327
328         ht->shift++;
329
330         /* Make insertions go into the new, empty table right away. Deletions
331          * and lookups will be attempted in both tables until we synchronize.
332          * The synchronize_rcu() guarantees for the new table to be picked up
333          * so no new additions go into the old table while we relink.
334          */
335         rcu_assign_pointer(ht->future_tbl, new_tbl);
336         synchronize_rcu();
337
338         /* For each new bucket, search the corresponding old bucket for the
339          * first entry that hashes to the new bucket, and link the end of
340          * newly formed bucket chain (containing entries added to future
341          * table) to that entry. Since all the entries which will end up in
342          * the new bucket appear in the same old bucket, this constructs an
343          * entirely valid new hash table, but with multiple buckets
344          * "zipped" together into a single imprecise chain.
345          */
346         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
347                 old_hash = rht_bucket_index(old_tbl, new_hash);
348                 old_bucket_lock = bucket_lock(old_tbl, old_hash);
349
350                 spin_lock_bh(old_bucket_lock);
351                 rht_for_each(he, old_tbl, old_hash) {
352                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
353                                 link_old_to_new(new_tbl, new_hash, he);
354                                 break;
355                         }
356                 }
357                 spin_unlock_bh(old_bucket_lock);
358         }
359
360         /* Publish the new table pointer. Lookups may now traverse
361          * the new table, but they will not benefit from any
362          * additional efficiency until later steps unzip the buckets.
363          */
364         rcu_assign_pointer(ht->tbl, new_tbl);
365
366         /* Unzip interleaved hash chains */
367         while (!complete && !ht->being_destroyed) {
368                 /* Wait for readers. All new readers will see the new
369                  * table, and thus no references to the old table will
370                  * remain.
371                  */
372                 synchronize_rcu();
373
374                 /* For each bucket in the old table (each of which
375                  * contains items from multiple buckets of the new
376                  * table): ...
377                  */
378                 complete = true;
379                 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
380                         struct rhash_head *head;
381
382                         old_bucket_lock = bucket_lock(old_tbl, old_hash);
383                         spin_lock_bh(old_bucket_lock);
384
385                         hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash);
386                         head = rht_dereference_bucket(old_tbl->buckets[old_hash],
387                                                       old_tbl, old_hash);
388                         if (!rht_is_a_nulls(head))
389                                 complete = false;
390
391                         spin_unlock_bh(old_bucket_lock);
392                 }
393         }
394
395         bucket_table_free(old_tbl);
396         return 0;
397 }
398 EXPORT_SYMBOL_GPL(rhashtable_expand);
399
400 /**
401  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
402  * @ht:         the hash table to shrink
403  *
404  * This function may only be called in a context where it is safe to call
405  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
406  *
407  * The caller must ensure that no concurrent resizing occurs by holding
408  * ht->mutex.
409  *
410  * The caller must ensure that no concurrent table mutations take place.
411  * It is however valid to have concurrent lookups if they are RCU protected.
412  *
413  * It is valid to have concurrent insertions and deletions protected by per
414  * bucket locks or concurrent RCU protected lookups and traversals.
415  */
416 int rhashtable_shrink(struct rhashtable *ht)
417 {
418         struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
419         spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2;
420         unsigned int new_hash;
421
422         ASSERT_RHT_MUTEX(ht);
423
424         if (ht->shift <= ht->p.min_shift)
425                 return 0;
426
427         new_tbl = bucket_table_alloc(ht, tbl->size / 2);
428         if (new_tbl == NULL)
429                 return -ENOMEM;
430
431         rcu_assign_pointer(ht->future_tbl, new_tbl);
432         synchronize_rcu();
433
434         /* Link the first entry in the old bucket to the end of the
435          * bucket in the new table. As entries are concurrently being
436          * added to the new table, lock down the new bucket. As we
437          * always divide the size in half when shrinking, each bucket
438          * in the new table maps to exactly two buckets in the old
439          * table.
440          *
441          * As removals can occur concurrently on the old table, we need
442          * to lock down both matching buckets in the old table.
443          */
444         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
445                 old_bucket_lock1 = bucket_lock(tbl, new_hash);
446                 old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size);
447                 new_bucket_lock = bucket_lock(new_tbl, new_hash);
448
449                 spin_lock_bh(old_bucket_lock1);
450                 spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
451                 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
452
453                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
454                                    tbl->buckets[new_hash]);
455                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
456                                    tbl->buckets[new_hash + new_tbl->size]);
457
458                 spin_unlock_bh(new_bucket_lock);
459                 spin_unlock_bh(old_bucket_lock2);
460                 spin_unlock_bh(old_bucket_lock1);
461         }
462
463         /* Publish the new, valid hash table */
464         rcu_assign_pointer(ht->tbl, new_tbl);
465         ht->shift--;
466
467         /* Wait for readers. No new readers will have references to the
468          * old hash table.
469          */
470         synchronize_rcu();
471
472         bucket_table_free(tbl);
473
474         return 0;
475 }
476 EXPORT_SYMBOL_GPL(rhashtable_shrink);
477
478 static void rht_deferred_worker(struct work_struct *work)
479 {
480         struct rhashtable *ht;
481         struct bucket_table *tbl;
482
483         ht = container_of(work, struct rhashtable, run_work.work);
484         mutex_lock(&ht->mutex);
485         tbl = rht_dereference(ht->tbl, ht);
486
487         if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
488                 rhashtable_expand(ht);
489         else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
490                 rhashtable_shrink(ht);
491
492         mutex_unlock(&ht->mutex);
493 }
494
495 static void rhashtable_wakeup_worker(struct rhashtable *ht)
496 {
497         struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
498         struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
499         size_t size = tbl->size;
500
501         /* Only adjust the table if no resizing is currently in progress. */
502         if (tbl == new_tbl &&
503             ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
504              (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
505                 schedule_delayed_work(&ht->run_work, 0);
506 }
507
508 static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
509                                 struct bucket_table *tbl, u32 hash)
510 {
511         struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
512                                                          tbl, hash);
513
514         if (rht_is_a_nulls(head))
515                 INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
516         else
517                 RCU_INIT_POINTER(obj->next, head);
518
519         rcu_assign_pointer(tbl->buckets[hash], obj);
520
521         atomic_inc(&ht->nelems);
522
523         rhashtable_wakeup_worker(ht);
524 }
525
526 /**
527  * rhashtable_insert - insert object into hash table
528  * @ht:         hash table
529  * @obj:        pointer to hash head inside object
530  *
531  * Will take a per bucket spinlock to protect against mutual mutations
532  * on the same bucket. Multiple insertions may occur in parallel unless
533  * they map to the same bucket lock.
534  *
535  * It is safe to call this function from atomic context.
536  *
537  * Will trigger an automatic deferred table resizing if the size grows
538  * beyond the watermark indicated by grow_decision() which can be passed
539  * to rhashtable_init().
540  */
541 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
542 {
543         struct bucket_table *tbl;
544         spinlock_t *lock;
545         unsigned hash;
546
547         rcu_read_lock();
548
549         tbl = rht_dereference_rcu(ht->future_tbl, ht);
550         hash = head_hashfn(ht, tbl, obj);
551         lock = bucket_lock(tbl, hash);
552
553         spin_lock_bh(lock);
554         __rhashtable_insert(ht, obj, tbl, hash);
555         spin_unlock_bh(lock);
556
557         rcu_read_unlock();
558 }
559 EXPORT_SYMBOL_GPL(rhashtable_insert);
560
561 /**
562  * rhashtable_remove - remove object from hash table
563  * @ht:         hash table
564  * @obj:        pointer to hash head inside object
565  *
566  * Since the hash chain is single linked, the removal operation needs to
567  * walk the bucket chain upon removal. The removal operation is thus
568  * considerable slow if the hash table is not correctly sized.
569  *
570  * Will automatically shrink the table via rhashtable_expand() if the
571  * shrink_decision function specified at rhashtable_init() returns true.
572  *
573  * The caller must ensure that no concurrent table mutations occur. It is
574  * however valid to have concurrent lookups if they are RCU protected.
575  */
576 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
577 {
578         struct bucket_table *tbl;
579         struct rhash_head __rcu **pprev;
580         struct rhash_head *he;
581         spinlock_t *lock;
582         unsigned int hash;
583
584         rcu_read_lock();
585         tbl = rht_dereference_rcu(ht->tbl, ht);
586         hash = head_hashfn(ht, tbl, obj);
587
588         lock = bucket_lock(tbl, hash);
589         spin_lock_bh(lock);
590
591 restart:
592         pprev = &tbl->buckets[hash];
593         rht_for_each(he, tbl, hash) {
594                 if (he != obj) {
595                         pprev = &he->next;
596                         continue;
597                 }
598
599                 rcu_assign_pointer(*pprev, obj->next);
600                 atomic_dec(&ht->nelems);
601
602                 spin_unlock_bh(lock);
603
604                 rhashtable_wakeup_worker(ht);
605
606                 rcu_read_unlock();
607
608                 return true;
609         }
610
611         if (tbl != rht_dereference_rcu(ht->tbl, ht)) {
612                 spin_unlock_bh(lock);
613
614                 tbl = rht_dereference_rcu(ht->tbl, ht);
615                 hash = head_hashfn(ht, tbl, obj);
616
617                 lock = bucket_lock(tbl, hash);
618                 spin_lock_bh(lock);
619                 goto restart;
620         }
621
622         spin_unlock_bh(lock);
623         rcu_read_unlock();
624
625         return false;
626 }
627 EXPORT_SYMBOL_GPL(rhashtable_remove);
628
629 struct rhashtable_compare_arg {
630         struct rhashtable *ht;
631         const void *key;
632 };
633
634 static bool rhashtable_compare(void *ptr, void *arg)
635 {
636         struct rhashtable_compare_arg *x = arg;
637         struct rhashtable *ht = x->ht;
638
639         return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
640 }
641
642 /**
643  * rhashtable_lookup - lookup key in hash table
644  * @ht:         hash table
645  * @key:        pointer to key
646  *
647  * Computes the hash value for the key and traverses the bucket chain looking
648  * for a entry with an identical key. The first matching entry is returned.
649  *
650  * This lookup function may only be used for fixed key hash table (key_len
651  * parameter set). It will BUG() if used inappropriately.
652  *
653  * Lookups may occur in parallel with hashtable mutations and resizing.
654  */
655 void *rhashtable_lookup(struct rhashtable *ht, const void *key)
656 {
657         struct rhashtable_compare_arg arg = {
658                 .ht = ht,
659                 .key = key,
660         };
661
662         BUG_ON(!ht->p.key_len);
663
664         return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
665 }
666 EXPORT_SYMBOL_GPL(rhashtable_lookup);
667
668 /**
669  * rhashtable_lookup_compare - search hash table with compare function
670  * @ht:         hash table
671  * @key:        the pointer to the key
672  * @compare:    compare function, must return true on match
673  * @arg:        argument passed on to compare function
674  *
675  * Traverses the bucket chain behind the provided hash value and calls the
676  * specified compare function for each entry.
677  *
678  * Lookups may occur in parallel with hashtable mutations and resizing.
679  *
680  * Returns the first entry on which the compare function returned true.
681  */
682 void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
683                                 bool (*compare)(void *, void *), void *arg)
684 {
685         const struct bucket_table *tbl, *old_tbl;
686         struct rhash_head *he;
687         u32 hash;
688
689         rcu_read_lock();
690
691         old_tbl = rht_dereference_rcu(ht->tbl, ht);
692         tbl = rht_dereference_rcu(ht->future_tbl, ht);
693         hash = key_hashfn(ht, key, ht->p.key_len);
694 restart:
695         rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
696                 if (!compare(rht_obj(ht, he), arg))
697                         continue;
698                 rcu_read_unlock();
699                 return rht_obj(ht, he);
700         }
701
702         if (unlikely(tbl != old_tbl)) {
703                 tbl = old_tbl;
704                 goto restart;
705         }
706         rcu_read_unlock();
707
708         return NULL;
709 }
710 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
711
712 /**
713  * rhashtable_lookup_insert - lookup and insert object into hash table
714  * @ht:         hash table
715  * @obj:        pointer to hash head inside object
716  *
717  * Locks down the bucket chain in both the old and new table if a resize
718  * is in progress to ensure that writers can't remove from the old table
719  * and can't insert to the new table during the atomic operation of search
720  * and insertion. Searches for duplicates in both the old and new table if
721  * a resize is in progress.
722  *
723  * This lookup function may only be used for fixed key hash table (key_len
724  * parameter set). It will BUG() if used inappropriately.
725  *
726  * It is safe to call this function from atomic context.
727  *
728  * Will trigger an automatic deferred table resizing if the size grows
729  * beyond the watermark indicated by grow_decision() which can be passed
730  * to rhashtable_init().
731  */
732 bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
733 {
734         struct bucket_table *new_tbl, *old_tbl;
735         spinlock_t *new_bucket_lock, *old_bucket_lock;
736         u32 new_hash, old_hash;
737         bool success = true;
738
739         BUG_ON(!ht->p.key_len);
740
741         rcu_read_lock();
742
743         old_tbl = rht_dereference_rcu(ht->tbl, ht);
744         old_hash = head_hashfn(ht, old_tbl, obj);
745         old_bucket_lock = bucket_lock(old_tbl, old_hash);
746         spin_lock_bh(old_bucket_lock);
747
748         new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
749         new_hash = head_hashfn(ht, new_tbl, obj);
750         new_bucket_lock = bucket_lock(new_tbl, new_hash);
751         if (unlikely(old_tbl != new_tbl))
752                 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
753
754         if (rhashtable_lookup(ht, rht_obj(ht, obj) + ht->p.key_offset)) {
755                 success = false;
756                 goto exit;
757         }
758
759         __rhashtable_insert(ht, obj, new_tbl, new_hash);
760
761 exit:
762         if (unlikely(old_tbl != new_tbl))
763                 spin_unlock_bh(new_bucket_lock);
764         spin_unlock_bh(old_bucket_lock);
765
766         rcu_read_unlock();
767
768         return success;
769 }
770 EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
771
772 static size_t rounded_hashtable_size(struct rhashtable_params *params)
773 {
774         return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
775                    1UL << params->min_shift);
776 }
777
778 /**
779  * rhashtable_init - initialize a new hash table
780  * @ht:         hash table to be initialized
781  * @params:     configuration parameters
782  *
783  * Initializes a new hash table based on the provided configuration
784  * parameters. A table can be configured either with a variable or
785  * fixed length key:
786  *
787  * Configuration Example 1: Fixed length keys
788  * struct test_obj {
789  *      int                     key;
790  *      void *                  my_member;
791  *      struct rhash_head       node;
792  * };
793  *
794  * struct rhashtable_params params = {
795  *      .head_offset = offsetof(struct test_obj, node),
796  *      .key_offset = offsetof(struct test_obj, key),
797  *      .key_len = sizeof(int),
798  *      .hashfn = jhash,
799  *      .nulls_base = (1U << RHT_BASE_SHIFT),
800  * };
801  *
802  * Configuration Example 2: Variable length keys
803  * struct test_obj {
804  *      [...]
805  *      struct rhash_head       node;
806  * };
807  *
808  * u32 my_hash_fn(const void *data, u32 seed)
809  * {
810  *      struct test_obj *obj = data;
811  *
812  *      return [... hash ...];
813  * }
814  *
815  * struct rhashtable_params params = {
816  *      .head_offset = offsetof(struct test_obj, node),
817  *      .hashfn = jhash,
818  *      .obj_hashfn = my_hash_fn,
819  * };
820  */
821 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
822 {
823         struct bucket_table *tbl;
824         size_t size;
825
826         size = HASH_DEFAULT_SIZE;
827
828         if ((params->key_len && !params->hashfn) ||
829             (!params->key_len && !params->obj_hashfn))
830                 return -EINVAL;
831
832         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
833                 return -EINVAL;
834
835         params->min_shift = max_t(size_t, params->min_shift,
836                                   ilog2(HASH_MIN_SIZE));
837
838         if (params->nelem_hint)
839                 size = rounded_hashtable_size(params);
840
841         memset(ht, 0, sizeof(*ht));
842         mutex_init(&ht->mutex);
843         memcpy(&ht->p, params, sizeof(*params));
844
845         if (params->locks_mul)
846                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
847         else
848                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
849
850         tbl = bucket_table_alloc(ht, size);
851         if (tbl == NULL)
852                 return -ENOMEM;
853
854         ht->shift = ilog2(tbl->size);
855         RCU_INIT_POINTER(ht->tbl, tbl);
856         RCU_INIT_POINTER(ht->future_tbl, tbl);
857
858         if (!ht->p.hash_rnd)
859                 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
860
861         if (ht->p.grow_decision || ht->p.shrink_decision)
862                 INIT_DEFERRABLE_WORK(&ht->run_work, rht_deferred_worker);
863
864         return 0;
865 }
866 EXPORT_SYMBOL_GPL(rhashtable_init);
867
868 /**
869  * rhashtable_destroy - destroy hash table
870  * @ht:         the hash table to destroy
871  *
872  * Frees the bucket array. This function is not rcu safe, therefore the caller
873  * has to make sure that no resizing may happen by unpublishing the hashtable
874  * and waiting for the quiescent cycle before releasing the bucket array.
875  */
876 void rhashtable_destroy(struct rhashtable *ht)
877 {
878         ht->being_destroyed = true;
879
880         mutex_lock(&ht->mutex);
881
882         cancel_delayed_work(&ht->run_work);
883         bucket_table_free(rht_dereference(ht->tbl, ht));
884
885         mutex_unlock(&ht->mutex);
886 }
887 EXPORT_SYMBOL_GPL(rhashtable_destroy);
888
889 /**************************************************************************
890  * Self Test
891  **************************************************************************/
892
893 #ifdef CONFIG_TEST_RHASHTABLE
894
895 #define TEST_HT_SIZE    8
896 #define TEST_ENTRIES    2048
897 #define TEST_PTR        ((void *) 0xdeadbeef)
898 #define TEST_NEXPANDS   4
899
900 struct test_obj {
901         void                    *ptr;
902         int                     value;
903         struct rhash_head       node;
904 };
905
906 static int __init test_rht_lookup(struct rhashtable *ht)
907 {
908         unsigned int i;
909
910         for (i = 0; i < TEST_ENTRIES * 2; i++) {
911                 struct test_obj *obj;
912                 bool expected = !(i % 2);
913                 u32 key = i;
914
915                 obj = rhashtable_lookup(ht, &key);
916
917                 if (expected && !obj) {
918                         pr_warn("Test failed: Could not find key %u\n", key);
919                         return -ENOENT;
920                 } else if (!expected && obj) {
921                         pr_warn("Test failed: Unexpected entry found for key %u\n",
922                                 key);
923                         return -EEXIST;
924                 } else if (expected && obj) {
925                         if (obj->ptr != TEST_PTR || obj->value != i) {
926                                 pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
927                                         obj->ptr, TEST_PTR, obj->value, i);
928                                 return -EINVAL;
929                         }
930                 }
931         }
932
933         return 0;
934 }
935
936 static void test_bucket_stats(struct rhashtable *ht, bool quiet)
937 {
938         unsigned int cnt, rcu_cnt, i, total = 0;
939         struct rhash_head *pos;
940         struct test_obj *obj;
941         struct bucket_table *tbl;
942
943         tbl = rht_dereference_rcu(ht->tbl, ht);
944         for (i = 0; i < tbl->size; i++) {
945                 rcu_cnt = cnt = 0;
946
947                 if (!quiet)
948                         pr_info(" [%#4x/%zu]", i, tbl->size);
949
950                 rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
951                         cnt++;
952                         total++;
953                         if (!quiet)
954                                 pr_cont(" [%p],", obj);
955                 }
956
957                 rht_for_each_entry_rcu(obj, pos, tbl, i, node)
958                         rcu_cnt++;
959
960                 if (rcu_cnt != cnt)
961                         pr_warn("Test failed: Chain count mismach %d != %d",
962                                 cnt, rcu_cnt);
963
964                 if (!quiet)
965                         pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
966                                 i, tbl->buckets[i], cnt);
967         }
968
969         pr_info("  Traversal complete: counted=%u, nelems=%u, entries=%d\n",
970                 total, atomic_read(&ht->nelems), TEST_ENTRIES);
971
972         if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
973                 pr_warn("Test failed: Total count mismatch ^^^");
974 }
975
976 static int __init test_rhashtable(struct rhashtable *ht)
977 {
978         struct bucket_table *tbl;
979         struct test_obj *obj;
980         struct rhash_head *pos, *next;
981         int err;
982         unsigned int i;
983
984         /*
985          * Insertion Test:
986          * Insert TEST_ENTRIES into table with all keys even numbers
987          */
988         pr_info("  Adding %d keys\n", TEST_ENTRIES);
989         for (i = 0; i < TEST_ENTRIES; i++) {
990                 struct test_obj *obj;
991
992                 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
993                 if (!obj) {
994                         err = -ENOMEM;
995                         goto error;
996                 }
997
998                 obj->ptr = TEST_PTR;
999                 obj->value = i * 2;
1000
1001                 rhashtable_insert(ht, &obj->node);
1002         }
1003
1004         rcu_read_lock();
1005         test_bucket_stats(ht, true);
1006         test_rht_lookup(ht);
1007         rcu_read_unlock();
1008
1009         for (i = 0; i < TEST_NEXPANDS; i++) {
1010                 pr_info("  Table expansion iteration %u...\n", i);
1011                 mutex_lock(&ht->mutex);
1012                 rhashtable_expand(ht);
1013                 mutex_unlock(&ht->mutex);
1014
1015                 rcu_read_lock();
1016                 pr_info("  Verifying lookups...\n");
1017                 test_rht_lookup(ht);
1018                 rcu_read_unlock();
1019         }
1020
1021         for (i = 0; i < TEST_NEXPANDS; i++) {
1022                 pr_info("  Table shrinkage iteration %u...\n", i);
1023                 mutex_lock(&ht->mutex);
1024                 rhashtable_shrink(ht);
1025                 mutex_unlock(&ht->mutex);
1026
1027                 rcu_read_lock();
1028                 pr_info("  Verifying lookups...\n");
1029                 test_rht_lookup(ht);
1030                 rcu_read_unlock();
1031         }
1032
1033         rcu_read_lock();
1034         test_bucket_stats(ht, true);
1035         rcu_read_unlock();
1036
1037         pr_info("  Deleting %d keys\n", TEST_ENTRIES);
1038         for (i = 0; i < TEST_ENTRIES; i++) {
1039                 u32 key = i * 2;
1040
1041                 obj = rhashtable_lookup(ht, &key);
1042                 BUG_ON(!obj);
1043
1044                 rhashtable_remove(ht, &obj->node);
1045                 kfree(obj);
1046         }
1047
1048         return 0;
1049
1050 error:
1051         tbl = rht_dereference_rcu(ht->tbl, ht);
1052         for (i = 0; i < tbl->size; i++)
1053                 rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
1054                         kfree(obj);
1055
1056         return err;
1057 }
1058
1059 static int __init test_rht_init(void)
1060 {
1061         struct rhashtable ht;
1062         struct rhashtable_params params = {
1063                 .nelem_hint = TEST_HT_SIZE,
1064                 .head_offset = offsetof(struct test_obj, node),
1065                 .key_offset = offsetof(struct test_obj, value),
1066                 .key_len = sizeof(int),
1067                 .hashfn = jhash,
1068                 .nulls_base = (3U << RHT_BASE_SHIFT),
1069                 .grow_decision = rht_grow_above_75,
1070                 .shrink_decision = rht_shrink_below_30,
1071         };
1072         int err;
1073
1074         pr_info("Running resizable hashtable tests...\n");
1075
1076         err = rhashtable_init(&ht, &params);
1077         if (err < 0) {
1078                 pr_warn("Test failed: Unable to initialize hashtable: %d\n",
1079                         err);
1080                 return err;
1081         }
1082
1083         err = test_rhashtable(&ht);
1084
1085         rhashtable_destroy(&ht);
1086
1087         return err;
1088 }
1089
1090 subsys_initcall(test_rht_init);
1091
1092 #endif /* CONFIG_TEST_RHASHTABLE */