]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/lustre/lustre/libcfs/hash.c
drivers: staging: lustre: Fix 'that open brace { should be on the previous line'...
[karo-tx-linux.git] / drivers / staging / lustre / lustre / libcfs / hash.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/hash.c
37  *
38  * Implement a hash class for hash process in lustre system.
39  *
40  * Author: YuZhangyong <yzy@clusterfs.com>
41  *
42  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43  * - Simplified API and improved documentation
44  * - Added per-hash feature flags:
45  *   * CFS_HASH_DEBUG additional validation
46  *   * CFS_HASH_REHASH dynamic rehashing
47  * - Added per-hash statistics
48  * - General performance enhancements
49  *
50  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51  * - move all stuff to libcfs
52  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54  * - buckets are allocated one by one(instead of contiguous memory),
55  *   to avoid unnecessary cacheline conflict
56  *
57  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58  * - "bucket" is a group of hlist_head now, user can specify bucket size
59  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60  *   one lock for reducing memory overhead.
61  *
62  * - support lockless hash, caller will take care of locks:
63  *   avoid lock overhead for hash tables that are already protected
64  *   by locking in the caller for another reason
65  *
66  * - support both spin_lock/rwlock for bucket:
67  *   overhead of spinlock contention is lower than read/write
68  *   contention of rwlock, so using spinlock to serialize operations on
69  *   bucket is more reasonable for those frequently changed hash tables
70  *
71  * - support one-single lock mode:
72  *   one lock to protect all hash operations to avoid overhead of
73  *   multiple locks if hash table is always small
74  *
75  * - removed a lot of unnecessary addref & decref on hash element:
76  *   addref & decref are atomic operations in many use-cases which
77  *   are expensive.
78  *
79  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80  *   some lustre use-cases require these functions to be strictly
81  *   non-blocking, we need to schedule required rehash on a different
82  *   thread on those cases.
83  *
84  * - safer rehash on large hash table
85  *   In old implementation, rehash function will exclusively lock the
86  *   hash table and finish rehash in one batch, it's dangerous on SMP
87  *   system because rehash millions of elements could take long time.
88  *   New implemented rehash can release lock and relax CPU in middle
89  *   of rehash, it's safe for another thread to search/change on the
90  *   hash table even it's in rehasing.
91  *
92  * - support two different refcount modes
93  *   . hash table has refcount on element
94  *   . hash table doesn't change refcount on adding/removing element
95  *
96  * - support long name hash table (for param-tree)
97  *
98  * - fix a bug for cfs_hash_rehash_key:
99  *   in old implementation, cfs_hash_rehash_key could screw up the
100  *   hash-table because @key is overwritten without any protection.
101  *   Now we need user to define hs_keycpy for those rehash enabled
102  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
103  *   inside lock by calling hs_keycpy.
104  *
105  * - better hash iteration:
106  *   Now we support both locked iteration & lockless iteration of hash
107  *   table. Also, user can break the iteration by return 1 in callback.
108  */
109
110 #include "../../include/linux/libcfs/libcfs.h"
111 #include <linux/seq_file.h>
112
113 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
114 static unsigned int warn_on_depth = 8;
115 module_param(warn_on_depth, uint, 0644);
116 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
117 #endif
118
119 struct cfs_wi_sched *cfs_sched_rehash;
120
121 static inline void
122 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
123
124 static inline void
125 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
126
127 static inline void
128 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
129 {
130         spin_lock(&lock->spin);
131 }
132
133 static inline void
134 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
135 {
136         spin_unlock(&lock->spin);
137 }
138
139 static inline void
140 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
141 {
142         if (!exclusive)
143                 read_lock(&lock->rw);
144         else
145                 write_lock(&lock->rw);
146 }
147
148 static inline void
149 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
150 {
151         if (!exclusive)
152                 read_unlock(&lock->rw);
153         else
154                 write_unlock(&lock->rw);
155 }
156
157 /** No lock hash */
158 static cfs_hash_lock_ops_t cfs_hash_nl_lops = {
159         .hs_lock        = cfs_hash_nl_lock,
160         .hs_unlock      = cfs_hash_nl_unlock,
161         .hs_bkt_lock    = cfs_hash_nl_lock,
162         .hs_bkt_unlock  = cfs_hash_nl_unlock,
163 };
164
165 /** no bucket lock, one spinlock to protect everything */
166 static cfs_hash_lock_ops_t cfs_hash_nbl_lops = {
167         .hs_lock        = cfs_hash_spin_lock,
168         .hs_unlock      = cfs_hash_spin_unlock,
169         .hs_bkt_lock    = cfs_hash_nl_lock,
170         .hs_bkt_unlock  = cfs_hash_nl_unlock,
171 };
172
173 /** spin bucket lock, rehash is enabled */
174 static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops = {
175         .hs_lock        = cfs_hash_rw_lock,
176         .hs_unlock      = cfs_hash_rw_unlock,
177         .hs_bkt_lock    = cfs_hash_spin_lock,
178         .hs_bkt_unlock  = cfs_hash_spin_unlock,
179 };
180
181 /** rw bucket lock, rehash is enabled */
182 static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops = {
183         .hs_lock        = cfs_hash_rw_lock,
184         .hs_unlock      = cfs_hash_rw_unlock,
185         .hs_bkt_lock    = cfs_hash_rw_lock,
186         .hs_bkt_unlock  = cfs_hash_rw_unlock,
187 };
188
189 /** spin bucket lock, rehash is disabled */
190 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops = {
191         .hs_lock        = cfs_hash_nl_lock,
192         .hs_unlock      = cfs_hash_nl_unlock,
193         .hs_bkt_lock    = cfs_hash_spin_lock,
194         .hs_bkt_unlock  = cfs_hash_spin_unlock,
195 };
196
197 /** rw bucket lock, rehash is disabled */
198 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops = {
199         .hs_lock        = cfs_hash_nl_lock,
200         .hs_unlock      = cfs_hash_nl_unlock,
201         .hs_bkt_lock    = cfs_hash_rw_lock,
202         .hs_bkt_unlock  = cfs_hash_rw_unlock,
203 };
204
205 static void
206 cfs_hash_lock_setup(struct cfs_hash *hs)
207 {
208         if (cfs_hash_with_no_lock(hs)) {
209                 hs->hs_lops = &cfs_hash_nl_lops;
210
211         } else if (cfs_hash_with_no_bktlock(hs)) {
212                 hs->hs_lops = &cfs_hash_nbl_lops;
213                 spin_lock_init(&hs->hs_lock.spin);
214
215         } else if (cfs_hash_with_rehash(hs)) {
216                 rwlock_init(&hs->hs_lock.rw);
217
218                 if (cfs_hash_with_rw_bktlock(hs))
219                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
220                 else if (cfs_hash_with_spin_bktlock(hs))
221                         hs->hs_lops = &cfs_hash_bkt_spin_lops;
222                 else
223                         LBUG();
224         } else {
225                 if (cfs_hash_with_rw_bktlock(hs))
226                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
227                 else if (cfs_hash_with_spin_bktlock(hs))
228                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
229                 else
230                         LBUG();
231         }
232 }
233
234 /**
235  * Simple hash head without depth tracking
236  * new element is always added to head of hlist
237  */
238 typedef struct {
239         struct hlist_head       hh_head;        /**< entries list */
240 } cfs_hash_head_t;
241
242 static int
243 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
244 {
245         return sizeof(cfs_hash_head_t);
246 }
247
248 static struct hlist_head *
249 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
250 {
251         cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
252
253         return &head[bd->bd_offset].hh_head;
254 }
255
256 static int
257 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
258                       struct hlist_node *hnode)
259 {
260         hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
261         return -1; /* unknown depth */
262 }
263
264 static int
265 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
266                       struct hlist_node *hnode)
267 {
268         hlist_del_init(hnode);
269         return -1; /* unknown depth */
270 }
271
272 /**
273  * Simple hash head with depth tracking
274  * new element is always added to head of hlist
275  */
276 typedef struct {
277         struct hlist_head       hd_head;        /**< entries list */
278         unsigned int        hd_depth;       /**< list length */
279 } cfs_hash_head_dep_t;
280
281 static int
282 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
283 {
284         return sizeof(cfs_hash_head_dep_t);
285 }
286
287 static struct hlist_head *
288 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
289 {
290         cfs_hash_head_dep_t   *head;
291
292         head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
293         return &head[bd->bd_offset].hd_head;
294 }
295
296 static int
297 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
298                       struct hlist_node *hnode)
299 {
300         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
301                                                cfs_hash_head_dep_t, hd_head);
302         hlist_add_head(hnode, &hh->hd_head);
303         return ++hh->hd_depth;
304 }
305
306 static int
307 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
308                       struct hlist_node *hnode)
309 {
310         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
311                                                cfs_hash_head_dep_t, hd_head);
312         hlist_del_init(hnode);
313         return --hh->hd_depth;
314 }
315
316 /**
317  * double links hash head without depth tracking
318  * new element is always added to tail of hlist
319  */
320 typedef struct {
321         struct hlist_head       dh_head;        /**< entries list */
322         struct hlist_node       *dh_tail;       /**< the last entry */
323 } cfs_hash_dhead_t;
324
325 static int
326 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
327 {
328         return sizeof(cfs_hash_dhead_t);
329 }
330
331 static struct hlist_head *
332 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
333 {
334         cfs_hash_dhead_t *head;
335
336         head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
337         return &head[bd->bd_offset].dh_head;
338 }
339
340 static int
341 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
342                       struct hlist_node *hnode)
343 {
344         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
345                                             cfs_hash_dhead_t, dh_head);
346
347         if (dh->dh_tail != NULL) /* not empty */
348                 hlist_add_behind(hnode, dh->dh_tail);
349         else /* empty list */
350                 hlist_add_head(hnode, &dh->dh_head);
351         dh->dh_tail = hnode;
352         return -1; /* unknown depth */
353 }
354
355 static int
356 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
357                       struct hlist_node *hnd)
358 {
359         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
360                                             cfs_hash_dhead_t, dh_head);
361
362         if (hnd->next == NULL) { /* it's the tail */
363                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
364                               container_of(hnd->pprev, struct hlist_node, next);
365         }
366         hlist_del_init(hnd);
367         return -1; /* unknown depth */
368 }
369
370 /**
371  * double links hash head with depth tracking
372  * new element is always added to tail of hlist
373  */
374 typedef struct {
375         struct hlist_head       dd_head;        /**< entries list */
376         struct hlist_node       *dd_tail;       /**< the last entry */
377         unsigned int        dd_depth;       /**< list length */
378 } cfs_hash_dhead_dep_t;
379
380 static int
381 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
382 {
383         return sizeof(cfs_hash_dhead_dep_t);
384 }
385
386 static struct hlist_head *
387 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
388 {
389         cfs_hash_dhead_dep_t *head;
390
391         head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
392         return &head[bd->bd_offset].dd_head;
393 }
394
395 static int
396 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
397                       struct hlist_node *hnode)
398 {
399         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
400                                                 cfs_hash_dhead_dep_t, dd_head);
401
402         if (dh->dd_tail != NULL) /* not empty */
403                 hlist_add_behind(hnode, dh->dd_tail);
404         else /* empty list */
405                 hlist_add_head(hnode, &dh->dd_head);
406         dh->dd_tail = hnode;
407         return ++dh->dd_depth;
408 }
409
410 static int
411 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
412                       struct hlist_node *hnd)
413 {
414         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
415                                                 cfs_hash_dhead_dep_t, dd_head);
416
417         if (hnd->next == NULL) { /* it's the tail */
418                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
419                               container_of(hnd->pprev, struct hlist_node, next);
420         }
421         hlist_del_init(hnd);
422         return --dh->dd_depth;
423 }
424
425 static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
426        .hop_hhead      = cfs_hash_hh_hhead,
427        .hop_hhead_size = cfs_hash_hh_hhead_size,
428        .hop_hnode_add  = cfs_hash_hh_hnode_add,
429        .hop_hnode_del  = cfs_hash_hh_hnode_del,
430 };
431
432 static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
433        .hop_hhead      = cfs_hash_hd_hhead,
434        .hop_hhead_size = cfs_hash_hd_hhead_size,
435        .hop_hnode_add  = cfs_hash_hd_hnode_add,
436        .hop_hnode_del  = cfs_hash_hd_hnode_del,
437 };
438
439 static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
440        .hop_hhead      = cfs_hash_dh_hhead,
441        .hop_hhead_size = cfs_hash_dh_hhead_size,
442        .hop_hnode_add  = cfs_hash_dh_hnode_add,
443        .hop_hnode_del  = cfs_hash_dh_hnode_del,
444 };
445
446 static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
447        .hop_hhead      = cfs_hash_dd_hhead,
448        .hop_hhead_size = cfs_hash_dd_hhead_size,
449        .hop_hnode_add  = cfs_hash_dd_hnode_add,
450        .hop_hnode_del  = cfs_hash_dd_hnode_del,
451 };
452
453 static void
454 cfs_hash_hlist_setup(struct cfs_hash *hs)
455 {
456         if (cfs_hash_with_add_tail(hs)) {
457                 hs->hs_hops = cfs_hash_with_depth(hs) ?
458                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
459         } else {
460                 hs->hs_hops = cfs_hash_with_depth(hs) ?
461                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
462         }
463 }
464
465 static void
466 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
467                      unsigned int bits, const void *key, struct cfs_hash_bd *bd)
468 {
469         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
470
471         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
472
473         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
474         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
475 }
476
477 void
478 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
479 {
480         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
481         if (likely(hs->hs_rehash_buckets == NULL)) {
482                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
483                                      hs->hs_cur_bits, key, bd);
484         } else {
485                 LASSERT(hs->hs_rehash_bits != 0);
486                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
487                                      hs->hs_rehash_bits, key, bd);
488         }
489 }
490 EXPORT_SYMBOL(cfs_hash_bd_get);
491
492 static inline void
493 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
494 {
495         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
496                 return;
497
498         bd->bd_bucket->hsb_depmax = dep_cur;
499 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
500         if (likely(warn_on_depth == 0 ||
501                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
502                 return;
503
504         spin_lock(&hs->hs_dep_lock);
505         hs->hs_dep_max  = dep_cur;
506         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
507         hs->hs_dep_off  = bd->bd_offset;
508         hs->hs_dep_bits = hs->hs_cur_bits;
509         spin_unlock(&hs->hs_dep_lock);
510
511         cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
512 # endif
513 }
514
515 void
516 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
517                        struct hlist_node *hnode)
518 {
519         int             rc;
520
521         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
522         cfs_hash_bd_dep_record(hs, bd, rc);
523         bd->bd_bucket->hsb_version++;
524         if (unlikely(bd->bd_bucket->hsb_version == 0))
525                 bd->bd_bucket->hsb_version++;
526         bd->bd_bucket->hsb_count++;
527
528         if (cfs_hash_with_counter(hs))
529                 atomic_inc(&hs->hs_count);
530         if (!cfs_hash_with_no_itemref(hs))
531                 cfs_hash_get(hs, hnode);
532 }
533 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
534
535 void
536 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
537                        struct hlist_node *hnode)
538 {
539         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
540
541         LASSERT(bd->bd_bucket->hsb_count > 0);
542         bd->bd_bucket->hsb_count--;
543         bd->bd_bucket->hsb_version++;
544         if (unlikely(bd->bd_bucket->hsb_version == 0))
545                 bd->bd_bucket->hsb_version++;
546
547         if (cfs_hash_with_counter(hs)) {
548                 LASSERT(atomic_read(&hs->hs_count) > 0);
549                 atomic_dec(&hs->hs_count);
550         }
551         if (!cfs_hash_with_no_itemref(hs))
552                 cfs_hash_put_locked(hs, hnode);
553 }
554 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
555
556 void
557 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
558                         struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
559 {
560         struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
561         struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
562         int             rc;
563
564         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
565                 return;
566
567         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
568          * in cfs_hash_bd_del/add_locked */
569         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
570         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
571         cfs_hash_bd_dep_record(hs, bd_new, rc);
572
573         LASSERT(obkt->hsb_count > 0);
574         obkt->hsb_count--;
575         obkt->hsb_version++;
576         if (unlikely(obkt->hsb_version == 0))
577                 obkt->hsb_version++;
578         nbkt->hsb_count++;
579         nbkt->hsb_version++;
580         if (unlikely(nbkt->hsb_version == 0))
581                 nbkt->hsb_version++;
582 }
583 EXPORT_SYMBOL(cfs_hash_bd_move_locked);
584
585 enum {
586         /** always set, for sanity (avoid ZERO intent) */
587         CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
588         /** return entry with a ref */
589         CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
590         /** add entry if not existing */
591         CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
592         /** delete entry, ignore other masks */
593         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
594 };
595
596 typedef enum cfs_hash_lookup_intent {
597         /** return item w/o refcount */
598         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
599         /** return item with refcount */
600         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
601                                        CFS_HS_LOOKUP_MASK_REF),
602         /** return item w/o refcount if existed, otherwise add */
603         CFS_HS_LOOKUP_IT_ADD    = (CFS_HS_LOOKUP_MASK_FIND |
604                                        CFS_HS_LOOKUP_MASK_ADD),
605         /** return item with refcount if existed, otherwise add */
606         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
607                                        CFS_HS_LOOKUP_MASK_ADD),
608         /** delete if existed */
609         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
610                                        CFS_HS_LOOKUP_MASK_DEL)
611 } cfs_hash_lookup_intent_t;
612
613 static struct hlist_node *
614 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
615                           const void *key, struct hlist_node *hnode,
616                           cfs_hash_lookup_intent_t intent)
617
618 {
619         struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
620         struct hlist_node  *ehnode;
621         struct hlist_node  *match;
622         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
623
624         /* with this function, we can avoid a lot of useless refcount ops,
625          * which are expensive atomic operations most time. */
626         match = intent_add ? NULL : hnode;
627         hlist_for_each(ehnode, hhead) {
628                 if (!cfs_hash_keycmp(hs, key, ehnode))
629                         continue;
630
631                 if (match != NULL && match != ehnode) /* can't match */
632                         continue;
633
634                 /* match and ... */
635                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
636                         cfs_hash_bd_del_locked(hs, bd, ehnode);
637                         return ehnode;
638                 }
639
640                 /* caller wants refcount? */
641                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
642                         cfs_hash_get(hs, ehnode);
643                 return ehnode;
644         }
645         /* no match item */
646         if (!intent_add)
647                 return NULL;
648
649         LASSERT(hnode != NULL);
650         cfs_hash_bd_add_locked(hs, bd, hnode);
651         return hnode;
652 }
653
654 struct hlist_node *
655 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
656 {
657         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
658                                          CFS_HS_LOOKUP_IT_FIND);
659 }
660 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
661
662 struct hlist_node *
663 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
664 {
665         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
666                                          CFS_HS_LOOKUP_IT_PEEK);
667 }
668 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
669
670 struct hlist_node *
671 cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
672                            const void *key, struct hlist_node *hnode,
673                            int noref)
674 {
675         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
676                                          CFS_HS_LOOKUP_IT_ADD |
677                                          (!noref * CFS_HS_LOOKUP_MASK_REF));
678 }
679 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
680
681 struct hlist_node *
682 cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
683                            const void *key, struct hlist_node *hnode)
684 {
685         /* hnode can be NULL, we find the first item with @key */
686         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
687                                          CFS_HS_LOOKUP_IT_FINDDEL);
688 }
689 EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
690
691 static void
692 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
693                        unsigned n, int excl)
694 {
695         struct cfs_hash_bucket *prev = NULL;
696         int             i;
697
698         /**
699          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
700          * NB: it's possible that several bds point to the same bucket but
701          * have different bd::bd_offset, so need take care of deadlock.
702          */
703         cfs_hash_for_each_bd(bds, n, i) {
704                 if (prev == bds[i].bd_bucket)
705                         continue;
706
707                 LASSERT(prev == NULL ||
708                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
709                 cfs_hash_bd_lock(hs, &bds[i], excl);
710                 prev = bds[i].bd_bucket;
711         }
712 }
713
714 static void
715 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
716                          unsigned n, int excl)
717 {
718         struct cfs_hash_bucket *prev = NULL;
719         int             i;
720
721         cfs_hash_for_each_bd(bds, n, i) {
722                 if (prev != bds[i].bd_bucket) {
723                         cfs_hash_bd_unlock(hs, &bds[i], excl);
724                         prev = bds[i].bd_bucket;
725                 }
726         }
727 }
728
729 static struct hlist_node *
730 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
731                                 unsigned n, const void *key)
732 {
733         struct hlist_node  *ehnode;
734         unsigned           i;
735
736         cfs_hash_for_each_bd(bds, n, i) {
737                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
738                                                    CFS_HS_LOOKUP_IT_FIND);
739                 if (ehnode != NULL)
740                         return ehnode;
741         }
742         return NULL;
743 }
744
745 static struct hlist_node *
746 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs,
747                                  struct cfs_hash_bd *bds, unsigned n, const void *key,
748                                  struct hlist_node *hnode, int noref)
749 {
750         struct hlist_node  *ehnode;
751         int             intent;
752         unsigned           i;
753
754         LASSERT(hnode != NULL);
755         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
756
757         cfs_hash_for_each_bd(bds, n, i) {
758                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
759                                                    NULL, intent);
760                 if (ehnode != NULL)
761                         return ehnode;
762         }
763
764         if (i == 1) { /* only one bucket */
765                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
766         } else {
767                 struct cfs_hash_bd      mybd;
768
769                 cfs_hash_bd_get(hs, key, &mybd);
770                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
771         }
772
773         return hnode;
774 }
775
776 static struct hlist_node *
777 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
778                                  unsigned n, const void *key,
779                                  struct hlist_node *hnode)
780 {
781         struct hlist_node  *ehnode;
782         unsigned           i;
783
784         cfs_hash_for_each_bd(bds, n, i) {
785                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
786                                                    CFS_HS_LOOKUP_IT_FINDDEL);
787                 if (ehnode != NULL)
788                         return ehnode;
789         }
790         return NULL;
791 }
792
793 static void
794 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
795 {
796         int     rc;
797
798         if (bd2->bd_bucket == NULL)
799                 return;
800
801         if (bd1->bd_bucket == NULL) {
802                 *bd1 = *bd2;
803                 bd2->bd_bucket = NULL;
804                 return;
805         }
806
807         rc = cfs_hash_bd_compare(bd1, bd2);
808         if (rc == 0) {
809                 bd2->bd_bucket = NULL;
810
811         } else if (rc > 0) { /* swab bd1 and bd2 */
812                 struct cfs_hash_bd tmp;
813
814                 tmp = *bd2;
815                 *bd2 = *bd1;
816                 *bd1 = tmp;
817         }
818 }
819
820 void
821 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds)
822 {
823         /* NB: caller should hold hs_lock.rw if REHASH is set */
824         cfs_hash_bd_from_key(hs, hs->hs_buckets,
825                              hs->hs_cur_bits, key, &bds[0]);
826         if (likely(hs->hs_rehash_buckets == NULL)) {
827                 /* no rehash or not rehashing */
828                 bds[1].bd_bucket = NULL;
829                 return;
830         }
831
832         LASSERT(hs->hs_rehash_bits != 0);
833         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
834                              hs->hs_rehash_bits, key, &bds[1]);
835
836         cfs_hash_bd_order(&bds[0], &bds[1]);
837 }
838 EXPORT_SYMBOL(cfs_hash_dual_bd_get);
839
840 void
841 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
842 {
843         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
844 }
845 EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
846
847 void
848 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
849 {
850         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
851 }
852 EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
853
854 struct hlist_node *
855 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
856                                const void *key)
857 {
858         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
859 }
860 EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
861
862 struct hlist_node *
863 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
864                                 const void *key, struct hlist_node *hnode,
865                                 int noref)
866 {
867         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
868                                                 hnode, noref);
869 }
870 EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
871
872 struct hlist_node *
873 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
874                                 const void *key, struct hlist_node *hnode)
875 {
876         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
877 }
878 EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
879
880 static void
881 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
882                       int bkt_size, int prev_size, int size)
883 {
884         int     i;
885
886         for (i = prev_size; i < size; i++) {
887                 if (buckets[i] != NULL)
888                         LIBCFS_FREE(buckets[i], bkt_size);
889         }
890
891         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
892 }
893
894 /*
895  * Create or grow bucket memory. Return old_buckets if no allocation was
896  * needed, the newly allocated buckets if allocation was needed and
897  * successful, and NULL on error.
898  */
899 static struct cfs_hash_bucket **
900 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
901                          unsigned int old_size, unsigned int new_size)
902 {
903         struct cfs_hash_bucket **new_bkts;
904         int              i;
905
906         LASSERT(old_size == 0 || old_bkts != NULL);
907
908         if (old_bkts != NULL && old_size == new_size)
909                 return old_bkts;
910
911         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
912         if (new_bkts == NULL)
913                 return NULL;
914
915         if (old_bkts != NULL) {
916                 memcpy(new_bkts, old_bkts,
917                        min(old_size, new_size) * sizeof(*old_bkts));
918         }
919
920         for (i = old_size; i < new_size; i++) {
921                 struct hlist_head *hhead;
922                 struct cfs_hash_bd     bd;
923
924                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
925                 if (new_bkts[i] == NULL) {
926                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
927                                               old_size, new_size);
928                         return NULL;
929                 }
930
931                 new_bkts[i]->hsb_index   = i;
932                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
933                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
934                 bd.bd_bucket = new_bkts[i];
935                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
936                         INIT_HLIST_HEAD(hhead);
937
938                 if (cfs_hash_with_no_lock(hs) ||
939                     cfs_hash_with_no_bktlock(hs))
940                         continue;
941
942                 if (cfs_hash_with_rw_bktlock(hs))
943                         rwlock_init(&new_bkts[i]->hsb_lock.rw);
944                 else if (cfs_hash_with_spin_bktlock(hs))
945                         spin_lock_init(&new_bkts[i]->hsb_lock.spin);
946                 else
947                         LBUG(); /* invalid use-case */
948         }
949         return new_bkts;
950 }
951
952 /**
953  * Initialize new libcfs hash, where:
954  * @name     - Descriptive hash name
955  * @cur_bits - Initial hash table size, in bits
956  * @max_bits - Maximum allowed hash table resize, in bits
957  * @ops      - Registered hash table operations
958  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
959  *         - CFS_HASH_SORT enable chained hash sort
960  */
961 static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
962
963 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
964 static int cfs_hash_dep_print(cfs_workitem_t *wi)
965 {
966         struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
967         int      dep;
968         int      bkt;
969         int      off;
970         int      bits;
971
972         spin_lock(&hs->hs_dep_lock);
973         dep  = hs->hs_dep_max;
974         bkt  = hs->hs_dep_bkt;
975         off  = hs->hs_dep_off;
976         bits = hs->hs_dep_bits;
977         spin_unlock(&hs->hs_dep_lock);
978
979         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
980                       hs->hs_name, bits, dep, bkt, off);
981         spin_lock(&hs->hs_dep_lock);
982         hs->hs_dep_bits = 0; /* mark as workitem done */
983         spin_unlock(&hs->hs_dep_lock);
984         return 0;
985 }
986
987 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
988 {
989         spin_lock_init(&hs->hs_dep_lock);
990         cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
991 }
992
993 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
994 {
995         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
996                 return;
997
998         spin_lock(&hs->hs_dep_lock);
999         while (hs->hs_dep_bits != 0) {
1000                 spin_unlock(&hs->hs_dep_lock);
1001                 cond_resched();
1002                 spin_lock(&hs->hs_dep_lock);
1003         }
1004         spin_unlock(&hs->hs_dep_lock);
1005 }
1006
1007 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1008
1009 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
1010 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
1011
1012 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1013
1014 struct cfs_hash *
1015 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1016                 unsigned bkt_bits, unsigned extra_bytes,
1017                 unsigned min_theta, unsigned max_theta,
1018                 cfs_hash_ops_t *ops, unsigned flags)
1019 {
1020         struct cfs_hash *hs;
1021         int      len;
1022
1023         CLASSERT(CFS_HASH_THETA_BITS < 15);
1024
1025         LASSERT(name != NULL);
1026         LASSERT(ops != NULL);
1027         LASSERT(ops->hs_key);
1028         LASSERT(ops->hs_hash);
1029         LASSERT(ops->hs_object);
1030         LASSERT(ops->hs_keycmp);
1031         LASSERT(ops->hs_get != NULL);
1032         LASSERT(ops->hs_put_locked != NULL);
1033
1034         if ((flags & CFS_HASH_REHASH) != 0)
1035                 flags |= CFS_HASH_COUNTER; /* must have counter */
1036
1037         LASSERT(cur_bits > 0);
1038         LASSERT(cur_bits >= bkt_bits);
1039         LASSERT(max_bits >= cur_bits && max_bits < 31);
1040         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1041         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1042                      (flags & CFS_HASH_NO_LOCK) == 0));
1043         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1044                       ops->hs_keycpy != NULL));
1045
1046         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1047               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1048         LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1049         if (hs == NULL)
1050                 return NULL;
1051
1052         strncpy(hs->hs_name, name, len);
1053         hs->hs_name[len - 1] = '\0';
1054         hs->hs_flags = flags;
1055
1056         atomic_set(&hs->hs_refcount, 1);
1057         atomic_set(&hs->hs_count, 0);
1058
1059         cfs_hash_lock_setup(hs);
1060         cfs_hash_hlist_setup(hs);
1061
1062         hs->hs_cur_bits = (__u8)cur_bits;
1063         hs->hs_min_bits = (__u8)cur_bits;
1064         hs->hs_max_bits = (__u8)max_bits;
1065         hs->hs_bkt_bits = (__u8)bkt_bits;
1066
1067         hs->hs_ops       = ops;
1068         hs->hs_extra_bytes = extra_bytes;
1069         hs->hs_rehash_bits = 0;
1070         cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1071         cfs_hash_depth_wi_init(hs);
1072
1073         if (cfs_hash_with_rehash(hs))
1074                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1075
1076         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1077                                                   CFS_HASH_NBKT(hs));
1078         if (hs->hs_buckets != NULL)
1079                 return hs;
1080
1081         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1082         return NULL;
1083 }
1084 EXPORT_SYMBOL(cfs_hash_create);
1085
1086 /**
1087  * Cleanup libcfs hash @hs.
1088  */
1089 static void
1090 cfs_hash_destroy(struct cfs_hash *hs)
1091 {
1092         struct hlist_node     *hnode;
1093         struct hlist_node     *pos;
1094         struct cfs_hash_bd       bd;
1095         int                i;
1096
1097         LASSERT(hs != NULL);
1098         LASSERT(!cfs_hash_is_exiting(hs) &&
1099                 !cfs_hash_is_iterating(hs));
1100
1101         /**
1102          * prohibit further rehashes, don't need any lock because
1103          * I'm the only (last) one can change it.
1104          */
1105         hs->hs_exiting = 1;
1106         if (cfs_hash_with_rehash(hs))
1107                 cfs_hash_rehash_cancel(hs);
1108
1109         cfs_hash_depth_wi_cancel(hs);
1110         /* rehash should be done/canceled */
1111         LASSERT(hs->hs_buckets != NULL &&
1112                 hs->hs_rehash_buckets == NULL);
1113
1114         cfs_hash_for_each_bucket(hs, &bd, i) {
1115                 struct hlist_head *hhead;
1116
1117                 LASSERT(bd.bd_bucket != NULL);
1118                 /* no need to take this lock, just for consistent code */
1119                 cfs_hash_bd_lock(hs, &bd, 1);
1120
1121                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1122                         hlist_for_each_safe(hnode, pos, hhead) {
1123                                 LASSERTF(!cfs_hash_with_assert_empty(hs),
1124                                          "hash %s bucket %u(%u) is not "
1125                                          " empty: %u items left\n",
1126                                          hs->hs_name, bd.bd_bucket->hsb_index,
1127                                          bd.bd_offset, bd.bd_bucket->hsb_count);
1128                                 /* can't assert key valicate, because we
1129                                  * can interrupt rehash */
1130                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1131                                 cfs_hash_exit(hs, hnode);
1132                         }
1133                 }
1134                 LASSERT(bd.bd_bucket->hsb_count == 0);
1135                 cfs_hash_bd_unlock(hs, &bd, 1);
1136                 cond_resched();
1137         }
1138
1139         LASSERT(atomic_read(&hs->hs_count) == 0);
1140
1141         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1142                               0, CFS_HASH_NBKT(hs));
1143         i = cfs_hash_with_bigname(hs) ?
1144             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1145         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1146 }
1147
1148 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1149 {
1150         if (atomic_inc_not_zero(&hs->hs_refcount))
1151                 return hs;
1152         return NULL;
1153 }
1154 EXPORT_SYMBOL(cfs_hash_getref);
1155
1156 void cfs_hash_putref(struct cfs_hash *hs)
1157 {
1158         if (atomic_dec_and_test(&hs->hs_refcount))
1159                 cfs_hash_destroy(hs);
1160 }
1161 EXPORT_SYMBOL(cfs_hash_putref);
1162
1163 static inline int
1164 cfs_hash_rehash_bits(struct cfs_hash *hs)
1165 {
1166         if (cfs_hash_with_no_lock(hs) ||
1167             !cfs_hash_with_rehash(hs))
1168                 return -EOPNOTSUPP;
1169
1170         if (unlikely(cfs_hash_is_exiting(hs)))
1171                 return -ESRCH;
1172
1173         if (unlikely(cfs_hash_is_rehashing(hs)))
1174                 return -EALREADY;
1175
1176         if (unlikely(cfs_hash_is_iterating(hs)))
1177                 return -EAGAIN;
1178
1179         /* XXX: need to handle case with max_theta != 2.0
1180          *      and the case with min_theta != 0.5 */
1181         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1182             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1183                 return hs->hs_cur_bits + 1;
1184
1185         if (!cfs_hash_with_shrink(hs))
1186                 return 0;
1187
1188         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1189             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1190                 return hs->hs_cur_bits - 1;
1191
1192         return 0;
1193 }
1194
1195 /**
1196  * don't allow inline rehash if:
1197  * - user wants non-blocking change (add/del) on hash table
1198  * - too many elements
1199  */
1200 static inline int
1201 cfs_hash_rehash_inline(struct cfs_hash *hs)
1202 {
1203         return !cfs_hash_with_nblk_change(hs) &&
1204                atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1205 }
1206
1207 /**
1208  * Add item @hnode to libcfs hash @hs using @key.  The registered
1209  * ops->hs_get function will be called when the item is added.
1210  */
1211 void
1212 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1213 {
1214         struct cfs_hash_bd   bd;
1215         int          bits;
1216
1217         LASSERT(hlist_unhashed(hnode));
1218
1219         cfs_hash_lock(hs, 0);
1220         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1221
1222         cfs_hash_key_validate(hs, key, hnode);
1223         cfs_hash_bd_add_locked(hs, &bd, hnode);
1224
1225         cfs_hash_bd_unlock(hs, &bd, 1);
1226
1227         bits = cfs_hash_rehash_bits(hs);
1228         cfs_hash_unlock(hs, 0);
1229         if (bits > 0)
1230                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1231 }
1232 EXPORT_SYMBOL(cfs_hash_add);
1233
1234 static struct hlist_node *
1235 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1236                      struct hlist_node *hnode, int noref)
1237 {
1238         struct hlist_node *ehnode;
1239         struct cfs_hash_bd     bds[2];
1240         int            bits = 0;
1241
1242         LASSERT(hlist_unhashed(hnode));
1243
1244         cfs_hash_lock(hs, 0);
1245         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1246
1247         cfs_hash_key_validate(hs, key, hnode);
1248         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1249                                                  hnode, noref);
1250         cfs_hash_dual_bd_unlock(hs, bds, 1);
1251
1252         if (ehnode == hnode) /* new item added */
1253                 bits = cfs_hash_rehash_bits(hs);
1254         cfs_hash_unlock(hs, 0);
1255         if (bits > 0)
1256                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1257
1258         return ehnode;
1259 }
1260
1261 /**
1262  * Add item @hnode to libcfs hash @hs using @key.  The registered
1263  * ops->hs_get function will be called if the item was added.
1264  * Returns 0 on success or -EALREADY on key collisions.
1265  */
1266 int
1267 cfs_hash_add_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1268 {
1269         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1270                -EALREADY : 0;
1271 }
1272 EXPORT_SYMBOL(cfs_hash_add_unique);
1273
1274 /**
1275  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1276  * already exists in the hash then ops->hs_get will be called on the
1277  * conflicting entry and that entry will be returned to the caller.
1278  * Otherwise ops->hs_get is called on the item which was added.
1279  */
1280 void *
1281 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1282                         struct hlist_node *hnode)
1283 {
1284         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1285
1286         return cfs_hash_object(hs, hnode);
1287 }
1288 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1289
1290 /**
1291  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1292  * is required to ensure the correct hash bucket is locked since there
1293  * is no direct linkage from the item to the bucket.  The object
1294  * removed from the hash will be returned and obs->hs_put is called
1295  * on the removed object.
1296  */
1297 void *
1298 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1299 {
1300         void       *obj  = NULL;
1301         int          bits = 0;
1302         struct cfs_hash_bd   bds[2];
1303
1304         cfs_hash_lock(hs, 0);
1305         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1306
1307         /* NB: do nothing if @hnode is not in hash table */
1308         if (hnode == NULL || !hlist_unhashed(hnode)) {
1309                 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1310                         cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1311                 } else {
1312                         hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1313                                                                 key, hnode);
1314                 }
1315         }
1316
1317         if (hnode != NULL) {
1318                 obj  = cfs_hash_object(hs, hnode);
1319                 bits = cfs_hash_rehash_bits(hs);
1320         }
1321
1322         cfs_hash_dual_bd_unlock(hs, bds, 1);
1323         cfs_hash_unlock(hs, 0);
1324         if (bits > 0)
1325                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1326
1327         return obj;
1328 }
1329 EXPORT_SYMBOL(cfs_hash_del);
1330
1331 /**
1332  * Delete item given @key in libcfs hash @hs.  The first @key found in
1333  * the hash will be removed, if the key exists multiple times in the hash
1334  * @hs this function must be called once per key.  The removed object
1335  * will be returned and ops->hs_put is called on the removed object.
1336  */
1337 void *
1338 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1339 {
1340         return cfs_hash_del(hs, key, NULL);
1341 }
1342 EXPORT_SYMBOL(cfs_hash_del_key);
1343
1344 /**
1345  * Lookup an item using @key in the libcfs hash @hs and return it.
1346  * If the @key is found in the hash hs->hs_get() is called and the
1347  * matching objects is returned.  It is the callers responsibility
1348  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1349  * when when finished with the object.  If the @key was not found
1350  * in the hash @hs NULL is returned.
1351  */
1352 void *
1353 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1354 {
1355         void             *obj = NULL;
1356         struct hlist_node     *hnode;
1357         struct cfs_hash_bd       bds[2];
1358
1359         cfs_hash_lock(hs, 0);
1360         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1361
1362         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1363         if (hnode != NULL)
1364                 obj = cfs_hash_object(hs, hnode);
1365
1366         cfs_hash_dual_bd_unlock(hs, bds, 0);
1367         cfs_hash_unlock(hs, 0);
1368
1369         return obj;
1370 }
1371 EXPORT_SYMBOL(cfs_hash_lookup);
1372
1373 static void
1374 cfs_hash_for_each_enter(struct cfs_hash *hs) {
1375         LASSERT(!cfs_hash_is_exiting(hs));
1376
1377         if (!cfs_hash_with_rehash(hs))
1378                 return;
1379         /*
1380          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1381          * because it's just an unreliable signal to rehash-thread,
1382          * rehash-thread will try to finish rehash ASAP when seeing this.
1383          */
1384         hs->hs_iterating = 1;
1385
1386         cfs_hash_lock(hs, 1);
1387         hs->hs_iterators++;
1388
1389         /* NB: iteration is mostly called by service thread,
1390          * we tend to cancel pending rehash-request, instead of
1391          * blocking service thread, we will relaunch rehash request
1392          * after iteration */
1393         if (cfs_hash_is_rehashing(hs))
1394                 cfs_hash_rehash_cancel_locked(hs);
1395         cfs_hash_unlock(hs, 1);
1396 }
1397
1398 static void
1399 cfs_hash_for_each_exit(struct cfs_hash *hs) {
1400         int remained;
1401         int bits;
1402
1403         if (!cfs_hash_with_rehash(hs))
1404                 return;
1405         cfs_hash_lock(hs, 1);
1406         remained = --hs->hs_iterators;
1407         bits = cfs_hash_rehash_bits(hs);
1408         cfs_hash_unlock(hs, 1);
1409         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1410         if (remained == 0)
1411                 hs->hs_iterating = 0;
1412         if (bits > 0) {
1413                 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1414                                     CFS_HASH_LOOP_HOG);
1415         }
1416 }
1417
1418 /**
1419  * For each item in the libcfs hash @hs call the passed callback @func
1420  * and pass to it as an argument each hash item and the private @data.
1421  *
1422  * a) the function may sleep!
1423  * b) during the callback:
1424  *    . the bucket lock is held so the callback must never sleep.
1425  *    . if @removal_safe is true, use can remove current item by
1426  *      cfs_hash_bd_del_locked
1427  */
1428 static __u64
1429 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1430                         void *data, int remove_safe) {
1431         struct hlist_node     *hnode;
1432         struct hlist_node     *pos;
1433         struct cfs_hash_bd       bd;
1434         __u64            count = 0;
1435         int                excl  = !!remove_safe;
1436         int                loop  = 0;
1437         int                i;
1438
1439         cfs_hash_for_each_enter(hs);
1440
1441         cfs_hash_lock(hs, 0);
1442         LASSERT(!cfs_hash_is_rehashing(hs));
1443
1444         cfs_hash_for_each_bucket(hs, &bd, i) {
1445                 struct hlist_head *hhead;
1446
1447                 cfs_hash_bd_lock(hs, &bd, excl);
1448                 if (func == NULL) { /* only glimpse size */
1449                         count += bd.bd_bucket->hsb_count;
1450                         cfs_hash_bd_unlock(hs, &bd, excl);
1451                         continue;
1452                 }
1453
1454                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1455                         hlist_for_each_safe(hnode, pos, hhead) {
1456                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1457                                 count++;
1458                                 loop++;
1459                                 if (func(hs, &bd, hnode, data)) {
1460                                         cfs_hash_bd_unlock(hs, &bd, excl);
1461                                         goto out;
1462                                 }
1463                         }
1464                 }
1465                 cfs_hash_bd_unlock(hs, &bd, excl);
1466                 if (loop < CFS_HASH_LOOP_HOG)
1467                         continue;
1468                 loop = 0;
1469                 cfs_hash_unlock(hs, 0);
1470                 cond_resched();
1471                 cfs_hash_lock(hs, 0);
1472         }
1473  out:
1474         cfs_hash_unlock(hs, 0);
1475
1476         cfs_hash_for_each_exit(hs);
1477         return count;
1478 }
1479
1480 typedef struct {
1481         cfs_hash_cond_opt_cb_t  func;
1482         void               *arg;
1483 } cfs_hash_cond_arg_t;
1484
1485 static int
1486 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1487                          struct hlist_node *hnode, void *data)
1488 {
1489         cfs_hash_cond_arg_t *cond = data;
1490
1491         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1492                 cfs_hash_bd_del_locked(hs, bd, hnode);
1493         return 0;
1494 }
1495
1496 /**
1497  * Delete item from the libcfs hash @hs when @func return true.
1498  * The write lock being hold during loop for each bucket to avoid
1499  * any object be reference.
1500  */
1501 void
1502 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1503 {
1504         cfs_hash_cond_arg_t arg = {
1505                 .func   = func,
1506                 .arg    = data,
1507         };
1508
1509         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1510 }
1511 EXPORT_SYMBOL(cfs_hash_cond_del);
1512
1513 void
1514 cfs_hash_for_each(struct cfs_hash *hs,
1515                   cfs_hash_for_each_cb_t func, void *data)
1516 {
1517         cfs_hash_for_each_tight(hs, func, data, 0);
1518 }
1519 EXPORT_SYMBOL(cfs_hash_for_each);
1520
1521 void
1522 cfs_hash_for_each_safe(struct cfs_hash *hs,
1523                        cfs_hash_for_each_cb_t func, void *data) {
1524         cfs_hash_for_each_tight(hs, func, data, 1);
1525 }
1526 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1527
1528 static int
1529 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1530               struct hlist_node *hnode, void *data)
1531 {
1532         *(int *)data = 0;
1533         return 1; /* return 1 to break the loop */
1534 }
1535
1536 int
1537 cfs_hash_is_empty(struct cfs_hash *hs)
1538 {
1539         int empty = 1;
1540
1541         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1542         return empty;
1543 }
1544 EXPORT_SYMBOL(cfs_hash_is_empty);
1545
1546 __u64
1547 cfs_hash_size_get(struct cfs_hash *hs)
1548 {
1549         return cfs_hash_with_counter(hs) ?
1550                atomic_read(&hs->hs_count) :
1551                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1552 }
1553 EXPORT_SYMBOL(cfs_hash_size_get);
1554
1555 /*
1556  * cfs_hash_for_each_relax:
1557  * Iterate the hash table and call @func on each item without
1558  * any lock. This function can't guarantee to finish iteration
1559  * if these features are enabled:
1560  *
1561  *  a. if rehash_key is enabled, an item can be moved from
1562  *     one bucket to another bucket
1563  *  b. user can remove non-zero-ref item from hash-table,
1564  *     so the item can be removed from hash-table, even worse,
1565  *     it's possible that user changed key and insert to another
1566  *     hash bucket.
1567  * there's no way for us to finish iteration correctly on previous
1568  * two cases, so iteration has to be stopped on change.
1569  */
1570 static int
1571 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1572                         void *data) {
1573         struct hlist_node *hnode;
1574         struct hlist_node *tmp;
1575         struct cfs_hash_bd     bd;
1576         __u32        version;
1577         int            count = 0;
1578         int            stop_on_change;
1579         int            rc;
1580         int            i;
1581
1582         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1583                          !cfs_hash_with_no_itemref(hs) ||
1584                          CFS_HOP(hs, put_locked) == NULL;
1585         cfs_hash_lock(hs, 0);
1586         LASSERT(!cfs_hash_is_rehashing(hs));
1587
1588         cfs_hash_for_each_bucket(hs, &bd, i) {
1589                 struct hlist_head *hhead;
1590
1591                 cfs_hash_bd_lock(hs, &bd, 0);
1592                 version = cfs_hash_bd_version_get(&bd);
1593
1594                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1595                         for (hnode = hhead->first; hnode != NULL;) {
1596                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1597                                 cfs_hash_get(hs, hnode);
1598                                 cfs_hash_bd_unlock(hs, &bd, 0);
1599                                 cfs_hash_unlock(hs, 0);
1600
1601                                 rc = func(hs, &bd, hnode, data);
1602                                 if (stop_on_change)
1603                                         cfs_hash_put(hs, hnode);
1604                                 cond_resched();
1605                                 count++;
1606
1607                                 cfs_hash_lock(hs, 0);
1608                                 cfs_hash_bd_lock(hs, &bd, 0);
1609                                 if (!stop_on_change) {
1610                                         tmp = hnode->next;
1611                                         cfs_hash_put_locked(hs, hnode);
1612                                         hnode = tmp;
1613                                 } else { /* bucket changed? */
1614                                         if (version !=
1615                                             cfs_hash_bd_version_get(&bd))
1616                                                 break;
1617                                         /* safe to continue because no change */
1618                                         hnode = hnode->next;
1619                                 }
1620                                 if (rc) /* callback wants to break iteration */
1621                                         break;
1622                         }
1623                 }
1624                 cfs_hash_bd_unlock(hs, &bd, 0);
1625         }
1626         cfs_hash_unlock(hs, 0);
1627
1628         return count;
1629 }
1630
1631 int
1632 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1633                          cfs_hash_for_each_cb_t func, void *data) {
1634         if (cfs_hash_with_no_lock(hs) ||
1635             cfs_hash_with_rehash_key(hs) ||
1636             !cfs_hash_with_no_itemref(hs))
1637                 return -EOPNOTSUPP;
1638
1639         if (CFS_HOP(hs, get) == NULL ||
1640             (CFS_HOP(hs, put) == NULL &&
1641              CFS_HOP(hs, put_locked) == NULL))
1642                 return -EOPNOTSUPP;
1643
1644         cfs_hash_for_each_enter(hs);
1645         cfs_hash_for_each_relax(hs, func, data);
1646         cfs_hash_for_each_exit(hs);
1647
1648         return 0;
1649 }
1650 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1651
1652 /**
1653  * For each hash bucket in the libcfs hash @hs call the passed callback
1654  * @func until all the hash buckets are empty.  The passed callback @func
1655  * or the previously registered callback hs->hs_put must remove the item
1656  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1657  * functions.  No rwlocks will be held during the callback @func it is
1658  * safe to sleep if needed.  This function will not terminate until the
1659  * hash is empty.  Note it is still possible to concurrently add new
1660  * items in to the hash.  It is the callers responsibility to ensure
1661  * the required locking is in place to prevent concurrent insertions.
1662  */
1663 int
1664 cfs_hash_for_each_empty(struct cfs_hash *hs,
1665                         cfs_hash_for_each_cb_t func, void *data) {
1666         unsigned  i = 0;
1667
1668         if (cfs_hash_with_no_lock(hs))
1669                 return -EOPNOTSUPP;
1670
1671         if (CFS_HOP(hs, get) == NULL ||
1672             (CFS_HOP(hs, put) == NULL &&
1673              CFS_HOP(hs, put_locked) == NULL))
1674                 return -EOPNOTSUPP;
1675
1676         cfs_hash_for_each_enter(hs);
1677         while (cfs_hash_for_each_relax(hs, func, data)) {
1678                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1679                        hs->hs_name, i++);
1680         }
1681         cfs_hash_for_each_exit(hs);
1682         return 0;
1683 }
1684 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1685
1686 void
1687 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1688                         cfs_hash_for_each_cb_t func, void *data)
1689 {
1690         struct hlist_head   *hhead;
1691         struct hlist_node   *hnode;
1692         struct cfs_hash_bd       bd;
1693
1694         cfs_hash_for_each_enter(hs);
1695         cfs_hash_lock(hs, 0);
1696         if (hindex >= CFS_HASH_NHLIST(hs))
1697                 goto out;
1698
1699         cfs_hash_bd_index_set(hs, hindex, &bd);
1700
1701         cfs_hash_bd_lock(hs, &bd, 0);
1702         hhead = cfs_hash_bd_hhead(hs, &bd);
1703         hlist_for_each(hnode, hhead) {
1704                 if (func(hs, &bd, hnode, data))
1705                         break;
1706         }
1707         cfs_hash_bd_unlock(hs, &bd, 0);
1708  out:
1709         cfs_hash_unlock(hs, 0);
1710         cfs_hash_for_each_exit(hs);
1711 }
1712
1713 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1714
1715 /*
1716  * For each item in the libcfs hash @hs which matches the @key call
1717  * the passed callback @func and pass to it as an argument each hash
1718  * item and the private @data. During the callback the bucket lock
1719  * is held so the callback must never sleep.
1720    */
1721 void
1722 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1723                       cfs_hash_for_each_cb_t func, void *data) {
1724         struct hlist_node   *hnode;
1725         struct cfs_hash_bd       bds[2];
1726         unsigned            i;
1727
1728         cfs_hash_lock(hs, 0);
1729
1730         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1731
1732         cfs_hash_for_each_bd(bds, 2, i) {
1733                 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1734
1735                 hlist_for_each(hnode, hlist) {
1736                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1737
1738                         if (cfs_hash_keycmp(hs, key, hnode)) {
1739                                 if (func(hs, &bds[i], hnode, data))
1740                                         break;
1741                         }
1742                 }
1743         }
1744
1745         cfs_hash_dual_bd_unlock(hs, bds, 0);
1746         cfs_hash_unlock(hs, 0);
1747 }
1748 EXPORT_SYMBOL(cfs_hash_for_each_key);
1749
1750 /**
1751  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1752  * to grow the hash size when excessive chaining is detected, or to
1753  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1754  * flag is set in @hs the libcfs hash may be dynamically rehashed
1755  * during addition or removal if the hash's theta value exceeds
1756  * either the hs->hs_min_theta or hs->max_theta values.  By default
1757  * these values are tuned to keep the chained hash depth small, and
1758  * this approach assumes a reasonably uniform hashing function.  The
1759  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1760  */
1761 void
1762 cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1763 {
1764         int     i;
1765
1766         /* need hold cfs_hash_lock(hs, 1) */
1767         LASSERT(cfs_hash_with_rehash(hs) &&
1768                 !cfs_hash_with_no_lock(hs));
1769
1770         if (!cfs_hash_is_rehashing(hs))
1771                 return;
1772
1773         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1774                 hs->hs_rehash_bits = 0;
1775                 return;
1776         }
1777
1778         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1779                 cfs_hash_unlock(hs, 1);
1780                 /* raise console warning while waiting too long */
1781                 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1782                        "hash %s is still rehashing, rescheded %d\n",
1783                        hs->hs_name, i - 1);
1784                 cond_resched();
1785                 cfs_hash_lock(hs, 1);
1786         }
1787 }
1788 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1789
1790 void
1791 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1792 {
1793         cfs_hash_lock(hs, 1);
1794         cfs_hash_rehash_cancel_locked(hs);
1795         cfs_hash_unlock(hs, 1);
1796 }
1797 EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1798
1799 int
1800 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1801 {
1802         int     rc;
1803
1804         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1805
1806         cfs_hash_lock(hs, 1);
1807
1808         rc = cfs_hash_rehash_bits(hs);
1809         if (rc <= 0) {
1810                 cfs_hash_unlock(hs, 1);
1811                 return rc;
1812         }
1813
1814         hs->hs_rehash_bits = rc;
1815         if (!do_rehash) {
1816                 /* launch and return */
1817                 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1818                 cfs_hash_unlock(hs, 1);
1819                 return 0;
1820         }
1821
1822         /* rehash right now */
1823         cfs_hash_unlock(hs, 1);
1824
1825         return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1826 }
1827 EXPORT_SYMBOL(cfs_hash_rehash);
1828
1829 static int
1830 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1831 {
1832         struct cfs_hash_bd      new;
1833         struct hlist_head  *hhead;
1834         struct hlist_node  *hnode;
1835         struct hlist_node  *pos;
1836         void          *key;
1837         int             c = 0;
1838
1839         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1840         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1841                 hlist_for_each_safe(hnode, pos, hhead) {
1842                         key = cfs_hash_key(hs, hnode);
1843                         LASSERT(key != NULL);
1844                         /* Validate hnode is in the correct bucket. */
1845                         cfs_hash_bucket_validate(hs, old, hnode);
1846                         /*
1847                          * Delete from old hash bucket; move to new bucket.
1848                          * ops->hs_key must be defined.
1849                          */
1850                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1851                                              hs->hs_rehash_bits, key, &new);
1852                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1853                         c++;
1854                 }
1855         }
1856
1857         return c;
1858 }
1859
1860 static int
1861 cfs_hash_rehash_worker(cfs_workitem_t *wi)
1862 {
1863         struct cfs_hash  *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
1864         struct cfs_hash_bucket **bkts;
1865         struct cfs_hash_bd       bd;
1866         unsigned int    old_size;
1867         unsigned int    new_size;
1868         int              bsize;
1869         int              count = 0;
1870         int              rc = 0;
1871         int              i;
1872
1873         LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1874
1875         cfs_hash_lock(hs, 0);
1876         LASSERT(cfs_hash_is_rehashing(hs));
1877
1878         old_size = CFS_HASH_NBKT(hs);
1879         new_size = CFS_HASH_RH_NBKT(hs);
1880
1881         cfs_hash_unlock(hs, 0);
1882
1883         /*
1884          * don't need hs::hs_rwlock for hs::hs_buckets,
1885          * because nobody can change bkt-table except me.
1886          */
1887         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1888                                         old_size, new_size);
1889         cfs_hash_lock(hs, 1);
1890         if (bkts == NULL) {
1891                 rc = -ENOMEM;
1892                 goto out;
1893         }
1894
1895         if (bkts == hs->hs_buckets) {
1896                 bkts = NULL; /* do nothing */
1897                 goto out;
1898         }
1899
1900         rc = __cfs_hash_theta(hs);
1901         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1902                 /* free the new allocated bkt-table */
1903                 old_size = new_size;
1904                 new_size = CFS_HASH_NBKT(hs);
1905                 rc = -EALREADY;
1906                 goto out;
1907         }
1908
1909         LASSERT(hs->hs_rehash_buckets == NULL);
1910         hs->hs_rehash_buckets = bkts;
1911
1912         rc = 0;
1913         cfs_hash_for_each_bucket(hs, &bd, i) {
1914                 if (cfs_hash_is_exiting(hs)) {
1915                         rc = -ESRCH;
1916                         /* someone wants to destroy the hash, abort now */
1917                         if (old_size < new_size) /* OK to free old bkt-table */
1918                                 break;
1919                         /* it's shrinking, need free new bkt-table */
1920                         hs->hs_rehash_buckets = NULL;
1921                         old_size = new_size;
1922                         new_size = CFS_HASH_NBKT(hs);
1923                         goto out;
1924                 }
1925
1926                 count += cfs_hash_rehash_bd(hs, &bd);
1927                 if (count < CFS_HASH_LOOP_HOG ||
1928                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1929                         continue;
1930                 }
1931
1932                 count = 0;
1933                 cfs_hash_unlock(hs, 1);
1934                 cond_resched();
1935                 cfs_hash_lock(hs, 1);
1936         }
1937
1938         hs->hs_rehash_count++;
1939
1940         bkts = hs->hs_buckets;
1941         hs->hs_buckets = hs->hs_rehash_buckets;
1942         hs->hs_rehash_buckets = NULL;
1943
1944         hs->hs_cur_bits = hs->hs_rehash_bits;
1945  out:
1946         hs->hs_rehash_bits = 0;
1947         if (rc == -ESRCH) /* never be scheduled again */
1948                 cfs_wi_exit(cfs_sched_rehash, wi);
1949         bsize = cfs_hash_bkt_size(hs);
1950         cfs_hash_unlock(hs, 1);
1951         /* can't refer to @hs anymore because it could be destroyed */
1952         if (bkts != NULL)
1953                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1954         if (rc != 0)
1955                 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1956         /* return 1 only if cfs_wi_exit is called */
1957         return rc == -ESRCH;
1958 }
1959
1960 /**
1961  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1962  * @old_key must be provided to locate the objects previous location
1963  * in the hash, and the @new_key will be used to reinsert the object.
1964  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1965  * combo when it is critical that there is no window in time where the
1966  * object is missing from the hash.  When an object is being rehashed
1967  * the registered cfs_hash_get() and cfs_hash_put() functions will
1968  * not be called.
1969  */
1970 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
1971                          void *new_key, struct hlist_node *hnode)
1972 {
1973         struct cfs_hash_bd      bds[3];
1974         struct cfs_hash_bd      old_bds[2];
1975         struct cfs_hash_bd      new_bd;
1976
1977         LASSERT(!hlist_unhashed(hnode));
1978
1979         cfs_hash_lock(hs, 0);
1980
1981         cfs_hash_dual_bd_get(hs, old_key, old_bds);
1982         cfs_hash_bd_get(hs, new_key, &new_bd);
1983
1984         bds[0] = old_bds[0];
1985         bds[1] = old_bds[1];
1986         bds[2] = new_bd;
1987
1988         /* NB: bds[0] and bds[1] are ordered already */
1989         cfs_hash_bd_order(&bds[1], &bds[2]);
1990         cfs_hash_bd_order(&bds[0], &bds[1]);
1991
1992         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
1993         if (likely(old_bds[1].bd_bucket == NULL)) {
1994                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
1995         } else {
1996                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
1997                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
1998         }
1999         /* overwrite key inside locks, otherwise may screw up with
2000          * other operations, i.e: rehash */
2001         cfs_hash_keycpy(hs, new_key, hnode);
2002
2003         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2004         cfs_hash_unlock(hs, 0);
2005 }
2006 EXPORT_SYMBOL(cfs_hash_rehash_key);
2007
2008 int cfs_hash_debug_header(struct seq_file *m)
2009 {
2010         return seq_printf(m, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2011                  CFS_HASH_BIGNAME_LEN,
2012                  "name", "cur", "min", "max", "theta", "t-min", "t-max",
2013                  "flags", "rehash", "count", "maxdep", "maxdepb",
2014                  " distribution");
2015 }
2016 EXPORT_SYMBOL(cfs_hash_debug_header);
2017
2018 static struct cfs_hash_bucket **
2019 cfs_hash_full_bkts(struct cfs_hash *hs)
2020 {
2021         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2022         if (hs->hs_rehash_buckets == NULL)
2023                 return hs->hs_buckets;
2024
2025         LASSERT(hs->hs_rehash_bits != 0);
2026         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2027                hs->hs_rehash_buckets : hs->hs_buckets;
2028 }
2029
2030 static unsigned int
2031 cfs_hash_full_nbkt(struct cfs_hash *hs)
2032 {
2033         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2034         if (hs->hs_rehash_buckets == NULL)
2035                 return CFS_HASH_NBKT(hs);
2036
2037         LASSERT(hs->hs_rehash_bits != 0);
2038         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2039                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2040 }
2041
2042 int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2043 {
2044         int                 dist[8] = { 0, };
2045         int                 maxdep  = -1;
2046         int                 maxdepb = -1;
2047         int                 total   = 0;
2048         int                 theta;
2049         int                 i;
2050
2051         cfs_hash_lock(hs, 0);
2052         theta = __cfs_hash_theta(hs);
2053
2054         seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
2055                       CFS_HASH_BIGNAME_LEN, hs->hs_name,
2056                       1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2057                       1 << hs->hs_max_bits,
2058                       __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2059                       __cfs_hash_theta_int(hs->hs_min_theta),
2060                       __cfs_hash_theta_frac(hs->hs_min_theta),
2061                       __cfs_hash_theta_int(hs->hs_max_theta),
2062                       __cfs_hash_theta_frac(hs->hs_max_theta),
2063                       hs->hs_flags, hs->hs_rehash_count);
2064
2065         /*
2066          * The distribution is a summary of the chained hash depth in
2067          * each of the libcfs hash buckets.  Each buckets hsb_count is
2068          * divided by the hash theta value and used to generate a
2069          * histogram of the hash distribution.  A uniform hash will
2070          * result in all hash buckets being close to the average thus
2071          * only the first few entries in the histogram will be non-zero.
2072          * If you hash function results in a non-uniform hash the will
2073          * be observable by outlier bucks in the distribution histogram.
2074          *
2075          * Uniform hash distribution:      128/128/0/0/0/0/0/0
2076          * Non-Uniform hash distribution:  128/125/0/0/0/0/2/1
2077          */
2078         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2079                 struct cfs_hash_bd  bd;
2080
2081                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2082                 cfs_hash_bd_lock(hs, &bd, 0);
2083                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2084                         maxdep  = bd.bd_bucket->hsb_depmax;
2085                         maxdepb = ffz(~maxdep);
2086                 }
2087                 total += bd.bd_bucket->hsb_count;
2088                 dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2089                 cfs_hash_bd_unlock(hs, &bd, 0);
2090         }
2091
2092         seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2093         for (i = 0; i < 8; i++)
2094                 seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2095
2096         cfs_hash_unlock(hs, 0);
2097
2098         return 0;
2099 }
2100 EXPORT_SYMBOL(cfs_hash_debug_str);