]> git.karo-electronics.de Git - karo-tx-linux.git/blob - lib/rhashtable.c
Merge tag 'linux-can-next-for-3.20-20150204' of git://git.kernel.org/pub/scm/linux...
[karo-tx-linux.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6  *
7  * Based on the following paper:
8  * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9  *
10  * Code partially derived from nft_hash
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26
27 #define HASH_DEFAULT_SIZE       64UL
28 #define HASH_MIN_SIZE           4UL
29 #define BUCKET_LOCKS_PER_CPU   128UL
30
31 /* Base bits plus 1 bit for nulls marker */
32 #define HASH_RESERVED_SPACE     (RHT_BASE_BITS + 1)
33
34 enum {
35         RHT_LOCK_NORMAL,
36         RHT_LOCK_NESTED,
37         RHT_LOCK_NESTED2,
38 };
39
40 /* The bucket lock is selected based on the hash and protects mutations
41  * on a group of hash buckets.
42  *
43  * IMPORTANT: When holding the bucket lock of both the old and new table
44  * during expansions and shrinking, the old bucket lock must always be
45  * acquired first.
46  */
47 static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
48 {
49         return &tbl->locks[hash & tbl->locks_mask];
50 }
51
52 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
53 #define ASSERT_BUCKET_LOCK(TBL, HASH) \
54         BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH))
55
56 #ifdef CONFIG_PROVE_LOCKING
57 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
58 {
59         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
60 }
61 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
62
63 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
64 {
65         spinlock_t *lock = bucket_lock(tbl, hash);
66
67         return (debug_locks) ? lockdep_is_held(lock) : 1;
68 }
69 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
70 #endif
71
72 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
73 {
74         return (void *) he - ht->p.head_offset;
75 }
76
77 static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
78 {
79         return hash & (tbl->size - 1);
80 }
81
82 static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
83 {
84         u32 hash;
85
86         if (unlikely(!ht->p.key_len))
87                 hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
88         else
89                 hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
90                                     ht->p.hash_rnd);
91
92         return hash >> HASH_RESERVED_SPACE;
93 }
94
95 static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
96 {
97         struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
98         u32 hash;
99
100         hash = ht->p.hashfn(key, len, ht->p.hash_rnd);
101         hash >>= HASH_RESERVED_SPACE;
102
103         return rht_bucket_index(tbl, hash);
104 }
105
106 static u32 head_hashfn(const struct rhashtable *ht,
107                        const struct bucket_table *tbl,
108                        const struct rhash_head *he)
109 {
110         return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
111 }
112
113 static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
114 {
115         struct rhash_head __rcu **pprev;
116
117         for (pprev = &tbl->buckets[n];
118              !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
119              pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
120                 ;
121
122         return pprev;
123 }
124
125 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
126 {
127         unsigned int i, size;
128 #if defined(CONFIG_PROVE_LOCKING)
129         unsigned int nr_pcpus = 2;
130 #else
131         unsigned int nr_pcpus = num_possible_cpus();
132 #endif
133
134         nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
135         size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
136
137         /* Never allocate more than one lock per bucket */
138         size = min_t(unsigned int, size, tbl->size);
139
140         if (sizeof(spinlock_t) != 0) {
141 #ifdef CONFIG_NUMA
142                 if (size * sizeof(spinlock_t) > PAGE_SIZE)
143                         tbl->locks = vmalloc(size * sizeof(spinlock_t));
144                 else
145 #endif
146                 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
147                                            GFP_KERNEL);
148                 if (!tbl->locks)
149                         return -ENOMEM;
150                 for (i = 0; i < size; i++)
151                         spin_lock_init(&tbl->locks[i]);
152         }
153         tbl->locks_mask = size - 1;
154
155         return 0;
156 }
157
158 static void bucket_table_free(const struct bucket_table *tbl)
159 {
160         if (tbl)
161                 kvfree(tbl->locks);
162
163         kvfree(tbl);
164 }
165
166 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
167                                                size_t nbuckets)
168 {
169         struct bucket_table *tbl;
170         size_t size;
171         int i;
172
173         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
174         tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
175         if (tbl == NULL)
176                 tbl = vzalloc(size);
177
178         if (tbl == NULL)
179                 return NULL;
180
181         tbl->size = nbuckets;
182
183         if (alloc_bucket_locks(ht, tbl) < 0) {
184                 bucket_table_free(tbl);
185                 return NULL;
186         }
187
188         for (i = 0; i < nbuckets; i++)
189                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
190
191         return tbl;
192 }
193
194 /**
195  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
196  * @ht:         hash table
197  * @new_size:   new table size
198  */
199 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
200 {
201         /* Expand table when exceeding 75% load */
202         return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
203                (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
204 }
205 EXPORT_SYMBOL_GPL(rht_grow_above_75);
206
207 /**
208  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
209  * @ht:         hash table
210  * @new_size:   new table size
211  */
212 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
213 {
214         /* Shrink table beneath 30% load */
215         return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
216                (atomic_read(&ht->shift) > ht->p.min_shift);
217 }
218 EXPORT_SYMBOL_GPL(rht_shrink_below_30);
219
220 static void hashtable_chain_unzip(const struct rhashtable *ht,
221                                   const struct bucket_table *new_tbl,
222                                   struct bucket_table *old_tbl,
223                                   size_t old_hash)
224 {
225         struct rhash_head *he, *p, *next;
226         spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL;
227         unsigned int new_hash, new_hash2;
228
229         ASSERT_BUCKET_LOCK(old_tbl, old_hash);
230
231         /* Old bucket empty, no work needed. */
232         p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
233                                    old_hash);
234         if (rht_is_a_nulls(p))
235                 return;
236
237         new_hash = new_hash2 = head_hashfn(ht, new_tbl, p);
238         new_bucket_lock = bucket_lock(new_tbl, new_hash);
239
240         /* Advance the old bucket pointer one or more times until it
241          * reaches a node that doesn't hash to the same bucket as the
242          * previous node p. Call the previous node p;
243          */
244         rht_for_each_continue(he, p->next, old_tbl, old_hash) {
245                 new_hash2 = head_hashfn(ht, new_tbl, he);
246                 if (new_hash != new_hash2)
247                         break;
248                 p = he;
249         }
250         rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
251
252         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
253
254         /* If we have encountered an entry that maps to a different bucket in
255          * the new table, lock down that bucket as well as we might cut off
256          * the end of the chain.
257          */
258         new_bucket_lock2 = bucket_lock(new_tbl, new_hash);
259         if (new_bucket_lock != new_bucket_lock2)
260                 spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2);
261
262         /* Find the subsequent node which does hash to the same
263          * bucket as node P, or NULL if no such node exists.
264          */
265         INIT_RHT_NULLS_HEAD(next, ht, old_hash);
266         if (!rht_is_a_nulls(he)) {
267                 rht_for_each_continue(he, he->next, old_tbl, old_hash) {
268                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
269                                 next = he;
270                                 break;
271                         }
272                 }
273         }
274
275         /* Set p's next pointer to that subsequent node pointer,
276          * bypassing the nodes which do not hash to p's bucket
277          */
278         rcu_assign_pointer(p->next, next);
279
280         if (new_bucket_lock != new_bucket_lock2)
281                 spin_unlock_bh(new_bucket_lock2);
282         spin_unlock_bh(new_bucket_lock);
283 }
284
285 static void link_old_to_new(struct bucket_table *new_tbl,
286                             unsigned int new_hash, struct rhash_head *entry)
287 {
288         spinlock_t *new_bucket_lock;
289
290         new_bucket_lock = bucket_lock(new_tbl, new_hash);
291
292         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
293         rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
294         spin_unlock_bh(new_bucket_lock);
295 }
296
297 /**
298  * rhashtable_expand - Expand hash table while allowing concurrent lookups
299  * @ht:         the hash table to expand
300  *
301  * A secondary bucket array is allocated and the hash entries are migrated
302  * while keeping them on both lists until the end of the RCU grace period.
303  *
304  * This function may only be called in a context where it is safe to call
305  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
306  *
307  * The caller must ensure that no concurrent resizing occurs by holding
308  * ht->mutex.
309  *
310  * It is valid to have concurrent insertions and deletions protected by per
311  * bucket locks or concurrent RCU protected lookups and traversals.
312  */
313 int rhashtable_expand(struct rhashtable *ht)
314 {
315         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
316         struct rhash_head *he;
317         spinlock_t *old_bucket_lock;
318         unsigned int new_hash, old_hash;
319         bool complete = false;
320
321         ASSERT_RHT_MUTEX(ht);
322
323         new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
324         if (new_tbl == NULL)
325                 return -ENOMEM;
326
327         atomic_inc(&ht->shift);
328
329         /* Make insertions go into the new, empty table right away. Deletions
330          * and lookups will be attempted in both tables until we synchronize.
331          * The synchronize_rcu() guarantees for the new table to be picked up
332          * so no new additions go into the old table while we relink.
333          */
334         rcu_assign_pointer(ht->future_tbl, new_tbl);
335         synchronize_rcu();
336
337         /* For each new bucket, search the corresponding old bucket for the
338          * first entry that hashes to the new bucket, and link the end of
339          * newly formed bucket chain (containing entries added to future
340          * table) to that entry. Since all the entries which will end up in
341          * the new bucket appear in the same old bucket, this constructs an
342          * entirely valid new hash table, but with multiple buckets
343          * "zipped" together into a single imprecise chain.
344          */
345         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
346                 old_hash = rht_bucket_index(old_tbl, new_hash);
347                 old_bucket_lock = bucket_lock(old_tbl, old_hash);
348
349                 spin_lock_bh(old_bucket_lock);
350                 rht_for_each(he, old_tbl, old_hash) {
351                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
352                                 link_old_to_new(new_tbl, new_hash, he);
353                                 break;
354                         }
355                 }
356                 spin_unlock_bh(old_bucket_lock);
357         }
358
359         /* Publish the new table pointer. Lookups may now traverse
360          * the new table, but they will not benefit from any
361          * additional efficiency until later steps unzip the buckets.
362          */
363         rcu_assign_pointer(ht->tbl, new_tbl);
364
365         /* Unzip interleaved hash chains */
366         while (!complete && !ht->being_destroyed) {
367                 /* Wait for readers. All new readers will see the new
368                  * table, and thus no references to the old table will
369                  * remain.
370                  */
371                 synchronize_rcu();
372
373                 /* For each bucket in the old table (each of which
374                  * contains items from multiple buckets of the new
375                  * table): ...
376                  */
377                 complete = true;
378                 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
379                         struct rhash_head *head;
380
381                         old_bucket_lock = bucket_lock(old_tbl, old_hash);
382                         spin_lock_bh(old_bucket_lock);
383
384                         hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash);
385                         head = rht_dereference_bucket(old_tbl->buckets[old_hash],
386                                                       old_tbl, old_hash);
387                         if (!rht_is_a_nulls(head))
388                                 complete = false;
389
390                         spin_unlock_bh(old_bucket_lock);
391                 }
392         }
393
394         bucket_table_free(old_tbl);
395         return 0;
396 }
397 EXPORT_SYMBOL_GPL(rhashtable_expand);
398
399 /**
400  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
401  * @ht:         the hash table to shrink
402  *
403  * This function may only be called in a context where it is safe to call
404  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
405  *
406  * The caller must ensure that no concurrent resizing occurs by holding
407  * ht->mutex.
408  *
409  * The caller must ensure that no concurrent table mutations take place.
410  * It is however valid to have concurrent lookups if they are RCU protected.
411  *
412  * It is valid to have concurrent insertions and deletions protected by per
413  * bucket locks or concurrent RCU protected lookups and traversals.
414  */
415 int rhashtable_shrink(struct rhashtable *ht)
416 {
417         struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
418         spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2;
419         unsigned int new_hash;
420
421         ASSERT_RHT_MUTEX(ht);
422
423         new_tbl = bucket_table_alloc(ht, tbl->size / 2);
424         if (new_tbl == NULL)
425                 return -ENOMEM;
426
427         rcu_assign_pointer(ht->future_tbl, new_tbl);
428         synchronize_rcu();
429
430         /* Link the first entry in the old bucket to the end of the
431          * bucket in the new table. As entries are concurrently being
432          * added to the new table, lock down the new bucket. As we
433          * always divide the size in half when shrinking, each bucket
434          * in the new table maps to exactly two buckets in the old
435          * table.
436          *
437          * As removals can occur concurrently on the old table, we need
438          * to lock down both matching buckets in the old table.
439          */
440         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
441                 old_bucket_lock1 = bucket_lock(tbl, new_hash);
442                 old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size);
443                 new_bucket_lock = bucket_lock(new_tbl, new_hash);
444
445                 spin_lock_bh(old_bucket_lock1);
446
447                 /* Depending on the lock per buckets mapping, the bucket in
448                  * the lower and upper region may map to the same lock.
449                  */
450                 if (old_bucket_lock1 != old_bucket_lock2) {
451                         spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
452                         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
453                 } else {
454                         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
455                 }
456
457                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
458                                    tbl->buckets[new_hash]);
459                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
460                                    tbl->buckets[new_hash + new_tbl->size]);
461
462                 spin_unlock_bh(new_bucket_lock);
463                 if (old_bucket_lock1 != old_bucket_lock2)
464                         spin_unlock_bh(old_bucket_lock2);
465                 spin_unlock_bh(old_bucket_lock1);
466         }
467
468         /* Publish the new, valid hash table */
469         rcu_assign_pointer(ht->tbl, new_tbl);
470         atomic_dec(&ht->shift);
471
472         /* Wait for readers. No new readers will have references to the
473          * old hash table.
474          */
475         synchronize_rcu();
476
477         bucket_table_free(tbl);
478
479         return 0;
480 }
481 EXPORT_SYMBOL_GPL(rhashtable_shrink);
482
483 static void rht_deferred_worker(struct work_struct *work)
484 {
485         struct rhashtable *ht;
486         struct bucket_table *tbl;
487         struct rhashtable_walker *walker;
488
489         ht = container_of(work, struct rhashtable, run_work);
490         mutex_lock(&ht->mutex);
491         if (ht->being_destroyed)
492                 goto unlock;
493
494         tbl = rht_dereference(ht->tbl, ht);
495
496         list_for_each_entry(walker, &ht->walkers, list)
497                 walker->resize = true;
498
499         if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
500                 rhashtable_expand(ht);
501         else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
502                 rhashtable_shrink(ht);
503
504 unlock:
505         mutex_unlock(&ht->mutex);
506 }
507
508 static void rhashtable_wakeup_worker(struct rhashtable *ht)
509 {
510         struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
511         struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
512         size_t size = tbl->size;
513
514         /* Only adjust the table if no resizing is currently in progress. */
515         if (tbl == new_tbl &&
516             ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
517              (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
518                 schedule_work(&ht->run_work);
519 }
520
521 static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
522                                 struct bucket_table *tbl, u32 hash)
523 {
524         struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
525                                                          tbl, hash);
526
527         if (rht_is_a_nulls(head))
528                 INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
529         else
530                 RCU_INIT_POINTER(obj->next, head);
531
532         rcu_assign_pointer(tbl->buckets[hash], obj);
533
534         atomic_inc(&ht->nelems);
535
536         rhashtable_wakeup_worker(ht);
537 }
538
539 /**
540  * rhashtable_insert - insert object into hash table
541  * @ht:         hash table
542  * @obj:        pointer to hash head inside object
543  *
544  * Will take a per bucket spinlock to protect against mutual mutations
545  * on the same bucket. Multiple insertions may occur in parallel unless
546  * they map to the same bucket lock.
547  *
548  * It is safe to call this function from atomic context.
549  *
550  * Will trigger an automatic deferred table resizing if the size grows
551  * beyond the watermark indicated by grow_decision() which can be passed
552  * to rhashtable_init().
553  */
554 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
555 {
556         struct bucket_table *tbl;
557         spinlock_t *lock;
558         unsigned hash;
559
560         rcu_read_lock();
561
562         tbl = rht_dereference_rcu(ht->future_tbl, ht);
563         hash = head_hashfn(ht, tbl, obj);
564         lock = bucket_lock(tbl, hash);
565
566         spin_lock_bh(lock);
567         __rhashtable_insert(ht, obj, tbl, hash);
568         spin_unlock_bh(lock);
569
570         rcu_read_unlock();
571 }
572 EXPORT_SYMBOL_GPL(rhashtable_insert);
573
574 /**
575  * rhashtable_remove - remove object from hash table
576  * @ht:         hash table
577  * @obj:        pointer to hash head inside object
578  *
579  * Since the hash chain is single linked, the removal operation needs to
580  * walk the bucket chain upon removal. The removal operation is thus
581  * considerable slow if the hash table is not correctly sized.
582  *
583  * Will automatically shrink the table via rhashtable_expand() if the
584  * shrink_decision function specified at rhashtable_init() returns true.
585  *
586  * The caller must ensure that no concurrent table mutations occur. It is
587  * however valid to have concurrent lookups if they are RCU protected.
588  */
589 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
590 {
591         struct bucket_table *tbl;
592         struct rhash_head __rcu **pprev;
593         struct rhash_head *he;
594         spinlock_t *lock;
595         unsigned int hash;
596         bool ret = false;
597
598         rcu_read_lock();
599         tbl = rht_dereference_rcu(ht->tbl, ht);
600         hash = head_hashfn(ht, tbl, obj);
601
602         lock = bucket_lock(tbl, hash);
603         spin_lock_bh(lock);
604
605 restart:
606         pprev = &tbl->buckets[hash];
607         rht_for_each(he, tbl, hash) {
608                 if (he != obj) {
609                         pprev = &he->next;
610                         continue;
611                 }
612
613                 rcu_assign_pointer(*pprev, obj->next);
614
615                 ret = true;
616                 break;
617         }
618
619         /* The entry may be linked in either 'tbl', 'future_tbl', or both.
620          * 'future_tbl' only exists for a short period of time during
621          * resizing. Thus traversing both is fine and the added cost is
622          * very rare.
623          */
624         if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) {
625                 spin_unlock_bh(lock);
626
627                 tbl = rht_dereference_rcu(ht->future_tbl, ht);
628                 hash = head_hashfn(ht, tbl, obj);
629
630                 lock = bucket_lock(tbl, hash);
631                 spin_lock_bh(lock);
632                 goto restart;
633         }
634
635         spin_unlock_bh(lock);
636
637         if (ret) {
638                 atomic_dec(&ht->nelems);
639                 rhashtable_wakeup_worker(ht);
640         }
641
642         rcu_read_unlock();
643
644         return ret;
645 }
646 EXPORT_SYMBOL_GPL(rhashtable_remove);
647
648 struct rhashtable_compare_arg {
649         struct rhashtable *ht;
650         const void *key;
651 };
652
653 static bool rhashtable_compare(void *ptr, void *arg)
654 {
655         struct rhashtable_compare_arg *x = arg;
656         struct rhashtable *ht = x->ht;
657
658         return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
659 }
660
661 /**
662  * rhashtable_lookup - lookup key in hash table
663  * @ht:         hash table
664  * @key:        pointer to key
665  *
666  * Computes the hash value for the key and traverses the bucket chain looking
667  * for a entry with an identical key. The first matching entry is returned.
668  *
669  * This lookup function may only be used for fixed key hash table (key_len
670  * parameter set). It will BUG() if used inappropriately.
671  *
672  * Lookups may occur in parallel with hashtable mutations and resizing.
673  */
674 void *rhashtable_lookup(struct rhashtable *ht, const void *key)
675 {
676         struct rhashtable_compare_arg arg = {
677                 .ht = ht,
678                 .key = key,
679         };
680
681         BUG_ON(!ht->p.key_len);
682
683         return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
684 }
685 EXPORT_SYMBOL_GPL(rhashtable_lookup);
686
687 /**
688  * rhashtable_lookup_compare - search hash table with compare function
689  * @ht:         hash table
690  * @key:        the pointer to the key
691  * @compare:    compare function, must return true on match
692  * @arg:        argument passed on to compare function
693  *
694  * Traverses the bucket chain behind the provided hash value and calls the
695  * specified compare function for each entry.
696  *
697  * Lookups may occur in parallel with hashtable mutations and resizing.
698  *
699  * Returns the first entry on which the compare function returned true.
700  */
701 void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
702                                 bool (*compare)(void *, void *), void *arg)
703 {
704         const struct bucket_table *tbl, *old_tbl;
705         struct rhash_head *he;
706         u32 hash;
707
708         rcu_read_lock();
709
710         old_tbl = rht_dereference_rcu(ht->tbl, ht);
711         tbl = rht_dereference_rcu(ht->future_tbl, ht);
712         hash = key_hashfn(ht, key, ht->p.key_len);
713 restart:
714         rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
715                 if (!compare(rht_obj(ht, he), arg))
716                         continue;
717                 rcu_read_unlock();
718                 return rht_obj(ht, he);
719         }
720
721         if (unlikely(tbl != old_tbl)) {
722                 tbl = old_tbl;
723                 goto restart;
724         }
725         rcu_read_unlock();
726
727         return NULL;
728 }
729 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
730
731 /**
732  * rhashtable_lookup_insert - lookup and insert object into hash table
733  * @ht:         hash table
734  * @obj:        pointer to hash head inside object
735  *
736  * Locks down the bucket chain in both the old and new table if a resize
737  * is in progress to ensure that writers can't remove from the old table
738  * and can't insert to the new table during the atomic operation of search
739  * and insertion. Searches for duplicates in both the old and new table if
740  * a resize is in progress.
741  *
742  * This lookup function may only be used for fixed key hash table (key_len
743  * parameter set). It will BUG() if used inappropriately.
744  *
745  * It is safe to call this function from atomic context.
746  *
747  * Will trigger an automatic deferred table resizing if the size grows
748  * beyond the watermark indicated by grow_decision() which can be passed
749  * to rhashtable_init().
750  */
751 bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
752 {
753         struct rhashtable_compare_arg arg = {
754                 .ht = ht,
755                 .key = rht_obj(ht, obj) + ht->p.key_offset,
756         };
757
758         BUG_ON(!ht->p.key_len);
759
760         return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
761                                                 &arg);
762 }
763 EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
764
765 /**
766  * rhashtable_lookup_compare_insert - search and insert object to hash table
767  *                                    with compare function
768  * @ht:         hash table
769  * @obj:        pointer to hash head inside object
770  * @compare:    compare function, must return true on match
771  * @arg:        argument passed on to compare function
772  *
773  * Locks down the bucket chain in both the old and new table if a resize
774  * is in progress to ensure that writers can't remove from the old table
775  * and can't insert to the new table during the atomic operation of search
776  * and insertion. Searches for duplicates in both the old and new table if
777  * a resize is in progress.
778  *
779  * Lookups may occur in parallel with hashtable mutations and resizing.
780  *
781  * Will trigger an automatic deferred table resizing if the size grows
782  * beyond the watermark indicated by grow_decision() which can be passed
783  * to rhashtable_init().
784  */
785 bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
786                                       struct rhash_head *obj,
787                                       bool (*compare)(void *, void *),
788                                       void *arg)
789 {
790         struct bucket_table *new_tbl, *old_tbl;
791         spinlock_t *new_bucket_lock, *old_bucket_lock;
792         u32 new_hash, old_hash;
793         bool success = true;
794
795         BUG_ON(!ht->p.key_len);
796
797         rcu_read_lock();
798
799         old_tbl = rht_dereference_rcu(ht->tbl, ht);
800         old_hash = head_hashfn(ht, old_tbl, obj);
801         old_bucket_lock = bucket_lock(old_tbl, old_hash);
802         spin_lock_bh(old_bucket_lock);
803
804         new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
805         new_hash = head_hashfn(ht, new_tbl, obj);
806         new_bucket_lock = bucket_lock(new_tbl, new_hash);
807         if (unlikely(old_tbl != new_tbl))
808                 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
809
810         if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
811                                       compare, arg)) {
812                 success = false;
813                 goto exit;
814         }
815
816         __rhashtable_insert(ht, obj, new_tbl, new_hash);
817
818 exit:
819         if (unlikely(old_tbl != new_tbl))
820                 spin_unlock_bh(new_bucket_lock);
821         spin_unlock_bh(old_bucket_lock);
822
823         rcu_read_unlock();
824
825         return success;
826 }
827 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
828
829 /**
830  * rhashtable_walk_init - Initialise an iterator
831  * @ht:         Table to walk over
832  * @iter:       Hash table Iterator
833  *
834  * This function prepares a hash table walk.
835  *
836  * Note that if you restart a walk after rhashtable_walk_stop you
837  * may see the same object twice.  Also, you may miss objects if
838  * there are removals in between rhashtable_walk_stop and the next
839  * call to rhashtable_walk_start.
840  *
841  * For a completely stable walk you should construct your own data
842  * structure outside the hash table.
843  *
844  * This function may sleep so you must not call it from interrupt
845  * context or with spin locks held.
846  *
847  * You must call rhashtable_walk_exit if this function returns
848  * successfully.
849  */
850 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
851 {
852         iter->ht = ht;
853         iter->p = NULL;
854         iter->slot = 0;
855         iter->skip = 0;
856
857         iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
858         if (!iter->walker)
859                 return -ENOMEM;
860
861         mutex_lock(&ht->mutex);
862         list_add(&iter->walker->list, &ht->walkers);
863         mutex_unlock(&ht->mutex);
864
865         return 0;
866 }
867 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
868
869 /**
870  * rhashtable_walk_exit - Free an iterator
871  * @iter:       Hash table Iterator
872  *
873  * This function frees resources allocated by rhashtable_walk_init.
874  */
875 void rhashtable_walk_exit(struct rhashtable_iter *iter)
876 {
877         mutex_lock(&iter->ht->mutex);
878         list_del(&iter->walker->list);
879         mutex_unlock(&iter->ht->mutex);
880         kfree(iter->walker);
881 }
882 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
883
884 /**
885  * rhashtable_walk_start - Start a hash table walk
886  * @iter:       Hash table iterator
887  *
888  * Start a hash table walk.  Note that we take the RCU lock in all
889  * cases including when we return an error.  So you must always call
890  * rhashtable_walk_stop to clean up.
891  *
892  * Returns zero if successful.
893  *
894  * Returns -EAGAIN if resize event occured.  Note that the iterator
895  * will rewind back to the beginning and you may use it immediately
896  * by calling rhashtable_walk_next.
897  */
898 int rhashtable_walk_start(struct rhashtable_iter *iter)
899 {
900         rcu_read_lock();
901
902         if (iter->walker->resize) {
903                 iter->slot = 0;
904                 iter->skip = 0;
905                 iter->walker->resize = false;
906                 return -EAGAIN;
907         }
908
909         return 0;
910 }
911 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
912
913 /**
914  * rhashtable_walk_next - Return the next object and advance the iterator
915  * @iter:       Hash table iterator
916  *
917  * Note that you must call rhashtable_walk_stop when you are finished
918  * with the walk.
919  *
920  * Returns the next object or NULL when the end of the table is reached.
921  *
922  * Returns -EAGAIN if resize event occured.  Note that the iterator
923  * will rewind back to the beginning and you may continue to use it.
924  */
925 void *rhashtable_walk_next(struct rhashtable_iter *iter)
926 {
927         const struct bucket_table *tbl;
928         struct rhashtable *ht = iter->ht;
929         struct rhash_head *p = iter->p;
930         void *obj = NULL;
931
932         tbl = rht_dereference_rcu(ht->tbl, ht);
933
934         if (p) {
935                 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
936                 goto next;
937         }
938
939         for (; iter->slot < tbl->size; iter->slot++) {
940                 int skip = iter->skip;
941
942                 rht_for_each_rcu(p, tbl, iter->slot) {
943                         if (!skip)
944                                 break;
945                         skip--;
946                 }
947
948 next:
949                 if (!rht_is_a_nulls(p)) {
950                         iter->skip++;
951                         iter->p = p;
952                         obj = rht_obj(ht, p);
953                         goto out;
954                 }
955
956                 iter->skip = 0;
957         }
958
959         iter->p = NULL;
960
961 out:
962         if (iter->walker->resize) {
963                 iter->p = NULL;
964                 iter->slot = 0;
965                 iter->skip = 0;
966                 iter->walker->resize = false;
967                 return ERR_PTR(-EAGAIN);
968         }
969
970         return obj;
971 }
972 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
973
974 /**
975  * rhashtable_walk_stop - Finish a hash table walk
976  * @iter:       Hash table iterator
977  *
978  * Finish a hash table walk.
979  */
980 void rhashtable_walk_stop(struct rhashtable_iter *iter)
981 {
982         rcu_read_unlock();
983         iter->p = NULL;
984 }
985 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
986
987 static size_t rounded_hashtable_size(struct rhashtable_params *params)
988 {
989         return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
990                    1UL << params->min_shift);
991 }
992
993 /**
994  * rhashtable_init - initialize a new hash table
995  * @ht:         hash table to be initialized
996  * @params:     configuration parameters
997  *
998  * Initializes a new hash table based on the provided configuration
999  * parameters. A table can be configured either with a variable or
1000  * fixed length key:
1001  *
1002  * Configuration Example 1: Fixed length keys
1003  * struct test_obj {
1004  *      int                     key;
1005  *      void *                  my_member;
1006  *      struct rhash_head       node;
1007  * };
1008  *
1009  * struct rhashtable_params params = {
1010  *      .head_offset = offsetof(struct test_obj, node),
1011  *      .key_offset = offsetof(struct test_obj, key),
1012  *      .key_len = sizeof(int),
1013  *      .hashfn = jhash,
1014  *      .nulls_base = (1U << RHT_BASE_SHIFT),
1015  * };
1016  *
1017  * Configuration Example 2: Variable length keys
1018  * struct test_obj {
1019  *      [...]
1020  *      struct rhash_head       node;
1021  * };
1022  *
1023  * u32 my_hash_fn(const void *data, u32 seed)
1024  * {
1025  *      struct test_obj *obj = data;
1026  *
1027  *      return [... hash ...];
1028  * }
1029  *
1030  * struct rhashtable_params params = {
1031  *      .head_offset = offsetof(struct test_obj, node),
1032  *      .hashfn = jhash,
1033  *      .obj_hashfn = my_hash_fn,
1034  * };
1035  */
1036 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
1037 {
1038         struct bucket_table *tbl;
1039         size_t size;
1040
1041         size = HASH_DEFAULT_SIZE;
1042
1043         if ((params->key_len && !params->hashfn) ||
1044             (!params->key_len && !params->obj_hashfn))
1045                 return -EINVAL;
1046
1047         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
1048                 return -EINVAL;
1049
1050         params->min_shift = max_t(size_t, params->min_shift,
1051                                   ilog2(HASH_MIN_SIZE));
1052
1053         if (params->nelem_hint)
1054                 size = rounded_hashtable_size(params);
1055
1056         memset(ht, 0, sizeof(*ht));
1057         mutex_init(&ht->mutex);
1058         memcpy(&ht->p, params, sizeof(*params));
1059         INIT_LIST_HEAD(&ht->walkers);
1060
1061         if (params->locks_mul)
1062                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1063         else
1064                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1065
1066         tbl = bucket_table_alloc(ht, size);
1067         if (tbl == NULL)
1068                 return -ENOMEM;
1069
1070         atomic_set(&ht->nelems, 0);
1071         atomic_set(&ht->shift, ilog2(tbl->size));
1072         RCU_INIT_POINTER(ht->tbl, tbl);
1073         RCU_INIT_POINTER(ht->future_tbl, tbl);
1074
1075         if (!ht->p.hash_rnd)
1076                 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
1077
1078         if (ht->p.grow_decision || ht->p.shrink_decision)
1079                 INIT_WORK(&ht->run_work, rht_deferred_worker);
1080
1081         return 0;
1082 }
1083 EXPORT_SYMBOL_GPL(rhashtable_init);
1084
1085 /**
1086  * rhashtable_destroy - destroy hash table
1087  * @ht:         the hash table to destroy
1088  *
1089  * Frees the bucket array. This function is not rcu safe, therefore the caller
1090  * has to make sure that no resizing may happen by unpublishing the hashtable
1091  * and waiting for the quiescent cycle before releasing the bucket array.
1092  */
1093 void rhashtable_destroy(struct rhashtable *ht)
1094 {
1095         ht->being_destroyed = true;
1096
1097         if (ht->p.grow_decision || ht->p.shrink_decision)
1098                 cancel_work_sync(&ht->run_work);
1099
1100         mutex_lock(&ht->mutex);
1101         bucket_table_free(rht_dereference(ht->tbl, ht));
1102         mutex_unlock(&ht->mutex);
1103 }
1104 EXPORT_SYMBOL_GPL(rhashtable_destroy);