4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/hash.c
38 * Implement a hash class for hash process in lustre system.
40 * Author: YuZhangyong <yzy@clusterfs.com>
42 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43 * - Simplified API and improved documentation
44 * - Added per-hash feature flags:
45 * * CFS_HASH_DEBUG additional validation
46 * * CFS_HASH_REHASH dynamic rehashing
47 * - Added per-hash statistics
48 * - General performance enhancements
50 * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51 * - move all stuff to libcfs
52 * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53 * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54 * - buckets are allocated one by one(instead of contiguous memory),
55 * to avoid unnecessary cacheline conflict
57 * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58 * - "bucket" is a group of hlist_head now, user can specify bucket size
59 * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60 * one lock for reducing memory overhead.
62 * - support lockless hash, caller will take care of locks:
63 * avoid lock overhead for hash tables that are already protected
64 * by locking in the caller for another reason
66 * - support both spin_lock/rwlock for bucket:
67 * overhead of spinlock contention is lower than read/write
68 * contention of rwlock, so using spinlock to serialize operations on
69 * bucket is more reasonable for those frequently changed hash tables
71 * - support one-single lock mode:
72 * one lock to protect all hash operations to avoid overhead of
73 * multiple locks if hash table is always small
75 * - removed a lot of unnecessary addref & decref on hash element:
76 * addref & decref are atomic operations in many use-cases which
79 * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80 * some lustre use-cases require these functions to be strictly
81 * non-blocking, we need to schedule required rehash on a different
82 * thread on those cases.
84 * - safer rehash on large hash table
85 * In old implementation, rehash function will exclusively lock the
86 * hash table and finish rehash in one batch, it's dangerous on SMP
87 * system because rehash millions of elements could take long time.
88 * New implemented rehash can release lock and relax CPU in middle
89 * of rehash, it's safe for another thread to search/change on the
90 * hash table even it's in rehasing.
92 * - support two different refcount modes
93 * . hash table has refcount on element
94 * . hash table doesn't change refcount on adding/removing element
96 * - support long name hash table (for param-tree)
98 * - fix a bug for cfs_hash_rehash_key:
99 * in old implementation, cfs_hash_rehash_key could screw up the
100 * hash-table because @key is overwritten without any protection.
101 * Now we need user to define hs_keycpy for those rehash enabled
102 * hash tables, cfs_hash_rehash_key will overwrite hash-key
103 * inside lock by calling hs_keycpy.
105 * - better hash iteration:
106 * Now we support both locked iteration & lockless iteration of hash
107 * table. Also, user can break the iteration by return 1 in callback.
110 #include "../../include/linux/libcfs/libcfs.h"
111 #include <linux/seq_file.h>
113 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
114 static unsigned int warn_on_depth = 8;
115 module_param(warn_on_depth, uint, 0644);
116 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
119 struct cfs_wi_sched *cfs_sched_rehash;
122 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
125 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
128 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
129 __acquires(&lock->spin)
131 spin_lock(&lock->spin);
135 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
136 __releases(&lock->spin)
138 spin_unlock(&lock->spin);
142 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
143 __acquires(&lock->rw)
146 read_lock(&lock->rw);
148 write_lock(&lock->rw);
152 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
153 __releases(&lock->rw)
156 read_unlock(&lock->rw);
158 write_unlock(&lock->rw);
162 static cfs_hash_lock_ops_t cfs_hash_nl_lops = {
163 .hs_lock = cfs_hash_nl_lock,
164 .hs_unlock = cfs_hash_nl_unlock,
165 .hs_bkt_lock = cfs_hash_nl_lock,
166 .hs_bkt_unlock = cfs_hash_nl_unlock,
169 /** no bucket lock, one spinlock to protect everything */
170 static cfs_hash_lock_ops_t cfs_hash_nbl_lops = {
171 .hs_lock = cfs_hash_spin_lock,
172 .hs_unlock = cfs_hash_spin_unlock,
173 .hs_bkt_lock = cfs_hash_nl_lock,
174 .hs_bkt_unlock = cfs_hash_nl_unlock,
177 /** spin bucket lock, rehash is enabled */
178 static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops = {
179 .hs_lock = cfs_hash_rw_lock,
180 .hs_unlock = cfs_hash_rw_unlock,
181 .hs_bkt_lock = cfs_hash_spin_lock,
182 .hs_bkt_unlock = cfs_hash_spin_unlock,
185 /** rw bucket lock, rehash is enabled */
186 static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops = {
187 .hs_lock = cfs_hash_rw_lock,
188 .hs_unlock = cfs_hash_rw_unlock,
189 .hs_bkt_lock = cfs_hash_rw_lock,
190 .hs_bkt_unlock = cfs_hash_rw_unlock,
193 /** spin bucket lock, rehash is disabled */
194 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops = {
195 .hs_lock = cfs_hash_nl_lock,
196 .hs_unlock = cfs_hash_nl_unlock,
197 .hs_bkt_lock = cfs_hash_spin_lock,
198 .hs_bkt_unlock = cfs_hash_spin_unlock,
201 /** rw bucket lock, rehash is disabled */
202 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops = {
203 .hs_lock = cfs_hash_nl_lock,
204 .hs_unlock = cfs_hash_nl_unlock,
205 .hs_bkt_lock = cfs_hash_rw_lock,
206 .hs_bkt_unlock = cfs_hash_rw_unlock,
210 cfs_hash_lock_setup(struct cfs_hash *hs)
212 if (cfs_hash_with_no_lock(hs)) {
213 hs->hs_lops = &cfs_hash_nl_lops;
215 } else if (cfs_hash_with_no_bktlock(hs)) {
216 hs->hs_lops = &cfs_hash_nbl_lops;
217 spin_lock_init(&hs->hs_lock.spin);
219 } else if (cfs_hash_with_rehash(hs)) {
220 rwlock_init(&hs->hs_lock.rw);
222 if (cfs_hash_with_rw_bktlock(hs))
223 hs->hs_lops = &cfs_hash_bkt_rw_lops;
224 else if (cfs_hash_with_spin_bktlock(hs))
225 hs->hs_lops = &cfs_hash_bkt_spin_lops;
229 if (cfs_hash_with_rw_bktlock(hs))
230 hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
231 else if (cfs_hash_with_spin_bktlock(hs))
232 hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
239 * Simple hash head without depth tracking
240 * new element is always added to head of hlist
243 struct hlist_head hh_head; /**< entries list */
247 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
249 return sizeof(cfs_hash_head_t);
252 static struct hlist_head *
253 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
255 cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
257 return &head[bd->bd_offset].hh_head;
261 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
262 struct hlist_node *hnode)
264 hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
265 return -1; /* unknown depth */
269 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
270 struct hlist_node *hnode)
272 hlist_del_init(hnode);
273 return -1; /* unknown depth */
277 * Simple hash head with depth tracking
278 * new element is always added to head of hlist
281 struct hlist_head hd_head; /**< entries list */
282 unsigned int hd_depth; /**< list length */
283 } cfs_hash_head_dep_t;
286 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
288 return sizeof(cfs_hash_head_dep_t);
291 static struct hlist_head *
292 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
294 cfs_hash_head_dep_t *head;
296 head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
297 return &head[bd->bd_offset].hd_head;
301 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
302 struct hlist_node *hnode)
304 cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
305 cfs_hash_head_dep_t, hd_head);
306 hlist_add_head(hnode, &hh->hd_head);
307 return ++hh->hd_depth;
311 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
312 struct hlist_node *hnode)
314 cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
315 cfs_hash_head_dep_t, hd_head);
316 hlist_del_init(hnode);
317 return --hh->hd_depth;
321 * double links hash head without depth tracking
322 * new element is always added to tail of hlist
325 struct hlist_head dh_head; /**< entries list */
326 struct hlist_node *dh_tail; /**< the last entry */
330 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
332 return sizeof(cfs_hash_dhead_t);
335 static struct hlist_head *
336 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
338 cfs_hash_dhead_t *head;
340 head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
341 return &head[bd->bd_offset].dh_head;
345 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
346 struct hlist_node *hnode)
348 cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
349 cfs_hash_dhead_t, dh_head);
351 if (dh->dh_tail != NULL) /* not empty */
352 hlist_add_behind(hnode, dh->dh_tail);
353 else /* empty list */
354 hlist_add_head(hnode, &dh->dh_head);
356 return -1; /* unknown depth */
360 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
361 struct hlist_node *hnd)
363 cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
364 cfs_hash_dhead_t, dh_head);
366 if (hnd->next == NULL) { /* it's the tail */
367 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
368 container_of(hnd->pprev, struct hlist_node, next);
371 return -1; /* unknown depth */
375 * double links hash head with depth tracking
376 * new element is always added to tail of hlist
379 struct hlist_head dd_head; /**< entries list */
380 struct hlist_node *dd_tail; /**< the last entry */
381 unsigned int dd_depth; /**< list length */
382 } cfs_hash_dhead_dep_t;
385 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
387 return sizeof(cfs_hash_dhead_dep_t);
390 static struct hlist_head *
391 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
393 cfs_hash_dhead_dep_t *head;
395 head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
396 return &head[bd->bd_offset].dd_head;
400 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
401 struct hlist_node *hnode)
403 cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
404 cfs_hash_dhead_dep_t, dd_head);
406 if (dh->dd_tail != NULL) /* not empty */
407 hlist_add_behind(hnode, dh->dd_tail);
408 else /* empty list */
409 hlist_add_head(hnode, &dh->dd_head);
411 return ++dh->dd_depth;
415 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
416 struct hlist_node *hnd)
418 cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
419 cfs_hash_dhead_dep_t, dd_head);
421 if (hnd->next == NULL) { /* it's the tail */
422 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
423 container_of(hnd->pprev, struct hlist_node, next);
426 return --dh->dd_depth;
429 static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
430 .hop_hhead = cfs_hash_hh_hhead,
431 .hop_hhead_size = cfs_hash_hh_hhead_size,
432 .hop_hnode_add = cfs_hash_hh_hnode_add,
433 .hop_hnode_del = cfs_hash_hh_hnode_del,
436 static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
437 .hop_hhead = cfs_hash_hd_hhead,
438 .hop_hhead_size = cfs_hash_hd_hhead_size,
439 .hop_hnode_add = cfs_hash_hd_hnode_add,
440 .hop_hnode_del = cfs_hash_hd_hnode_del,
443 static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
444 .hop_hhead = cfs_hash_dh_hhead,
445 .hop_hhead_size = cfs_hash_dh_hhead_size,
446 .hop_hnode_add = cfs_hash_dh_hnode_add,
447 .hop_hnode_del = cfs_hash_dh_hnode_del,
450 static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
451 .hop_hhead = cfs_hash_dd_hhead,
452 .hop_hhead_size = cfs_hash_dd_hhead_size,
453 .hop_hnode_add = cfs_hash_dd_hnode_add,
454 .hop_hnode_del = cfs_hash_dd_hnode_del,
458 cfs_hash_hlist_setup(struct cfs_hash *hs)
460 if (cfs_hash_with_add_tail(hs)) {
461 hs->hs_hops = cfs_hash_with_depth(hs) ?
462 &cfs_hash_dd_hops : &cfs_hash_dh_hops;
464 hs->hs_hops = cfs_hash_with_depth(hs) ?
465 &cfs_hash_hd_hops : &cfs_hash_hh_hops;
470 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
471 unsigned int bits, const void *key, struct cfs_hash_bd *bd)
473 unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
475 LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
477 bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
478 bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
482 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
484 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
485 if (likely(hs->hs_rehash_buckets == NULL)) {
486 cfs_hash_bd_from_key(hs, hs->hs_buckets,
487 hs->hs_cur_bits, key, bd);
489 LASSERT(hs->hs_rehash_bits != 0);
490 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
491 hs->hs_rehash_bits, key, bd);
494 EXPORT_SYMBOL(cfs_hash_bd_get);
497 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
499 if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
502 bd->bd_bucket->hsb_depmax = dep_cur;
503 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
504 if (likely(warn_on_depth == 0 ||
505 max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
508 spin_lock(&hs->hs_dep_lock);
509 hs->hs_dep_max = dep_cur;
510 hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
511 hs->hs_dep_off = bd->bd_offset;
512 hs->hs_dep_bits = hs->hs_cur_bits;
513 spin_unlock(&hs->hs_dep_lock);
515 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
520 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
521 struct hlist_node *hnode)
525 rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
526 cfs_hash_bd_dep_record(hs, bd, rc);
527 bd->bd_bucket->hsb_version++;
528 if (unlikely(bd->bd_bucket->hsb_version == 0))
529 bd->bd_bucket->hsb_version++;
530 bd->bd_bucket->hsb_count++;
532 if (cfs_hash_with_counter(hs))
533 atomic_inc(&hs->hs_count);
534 if (!cfs_hash_with_no_itemref(hs))
535 cfs_hash_get(hs, hnode);
537 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
540 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
541 struct hlist_node *hnode)
543 hs->hs_hops->hop_hnode_del(hs, bd, hnode);
545 LASSERT(bd->bd_bucket->hsb_count > 0);
546 bd->bd_bucket->hsb_count--;
547 bd->bd_bucket->hsb_version++;
548 if (unlikely(bd->bd_bucket->hsb_version == 0))
549 bd->bd_bucket->hsb_version++;
551 if (cfs_hash_with_counter(hs)) {
552 LASSERT(atomic_read(&hs->hs_count) > 0);
553 atomic_dec(&hs->hs_count);
555 if (!cfs_hash_with_no_itemref(hs))
556 cfs_hash_put_locked(hs, hnode);
558 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
561 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
562 struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
564 struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
565 struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
568 if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
571 /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
572 * in cfs_hash_bd_del/add_locked */
573 hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
574 rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
575 cfs_hash_bd_dep_record(hs, bd_new, rc);
577 LASSERT(obkt->hsb_count > 0);
580 if (unlikely(obkt->hsb_version == 0))
584 if (unlikely(nbkt->hsb_version == 0))
587 EXPORT_SYMBOL(cfs_hash_bd_move_locked);
590 /** always set, for sanity (avoid ZERO intent) */
591 CFS_HS_LOOKUP_MASK_FIND = 1 << 0,
592 /** return entry with a ref */
593 CFS_HS_LOOKUP_MASK_REF = 1 << 1,
594 /** add entry if not existing */
595 CFS_HS_LOOKUP_MASK_ADD = 1 << 2,
596 /** delete entry, ignore other masks */
597 CFS_HS_LOOKUP_MASK_DEL = 1 << 3,
600 typedef enum cfs_hash_lookup_intent {
601 /** return item w/o refcount */
602 CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND,
603 /** return item with refcount */
604 CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND |
605 CFS_HS_LOOKUP_MASK_REF),
606 /** return item w/o refcount if existed, otherwise add */
607 CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND |
608 CFS_HS_LOOKUP_MASK_ADD),
609 /** return item with refcount if existed, otherwise add */
610 CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
611 CFS_HS_LOOKUP_MASK_ADD),
612 /** delete if existed */
613 CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
614 CFS_HS_LOOKUP_MASK_DEL)
615 } cfs_hash_lookup_intent_t;
617 static struct hlist_node *
618 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
619 const void *key, struct hlist_node *hnode,
620 cfs_hash_lookup_intent_t intent)
623 struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
624 struct hlist_node *ehnode;
625 struct hlist_node *match;
626 int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
628 /* with this function, we can avoid a lot of useless refcount ops,
629 * which are expensive atomic operations most time. */
630 match = intent_add ? NULL : hnode;
631 hlist_for_each(ehnode, hhead) {
632 if (!cfs_hash_keycmp(hs, key, ehnode))
635 if (match != NULL && match != ehnode) /* can't match */
639 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
640 cfs_hash_bd_del_locked(hs, bd, ehnode);
644 /* caller wants refcount? */
645 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
646 cfs_hash_get(hs, ehnode);
653 LASSERT(hnode != NULL);
654 cfs_hash_bd_add_locked(hs, bd, hnode);
659 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
661 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
662 CFS_HS_LOOKUP_IT_FIND);
664 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
667 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
669 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
670 CFS_HS_LOOKUP_IT_PEEK);
672 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
675 cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
676 const void *key, struct hlist_node *hnode,
679 return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
680 CFS_HS_LOOKUP_IT_ADD |
681 (!noref * CFS_HS_LOOKUP_MASK_REF));
683 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
686 cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
687 const void *key, struct hlist_node *hnode)
689 /* hnode can be NULL, we find the first item with @key */
690 return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
691 CFS_HS_LOOKUP_IT_FINDDEL);
693 EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
696 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
697 unsigned n, int excl)
699 struct cfs_hash_bucket *prev = NULL;
703 * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
704 * NB: it's possible that several bds point to the same bucket but
705 * have different bd::bd_offset, so need take care of deadlock.
707 cfs_hash_for_each_bd(bds, n, i) {
708 if (prev == bds[i].bd_bucket)
711 LASSERT(prev == NULL ||
712 prev->hsb_index < bds[i].bd_bucket->hsb_index);
713 cfs_hash_bd_lock(hs, &bds[i], excl);
714 prev = bds[i].bd_bucket;
719 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
720 unsigned n, int excl)
722 struct cfs_hash_bucket *prev = NULL;
725 cfs_hash_for_each_bd(bds, n, i) {
726 if (prev != bds[i].bd_bucket) {
727 cfs_hash_bd_unlock(hs, &bds[i], excl);
728 prev = bds[i].bd_bucket;
733 static struct hlist_node *
734 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
735 unsigned n, const void *key)
737 struct hlist_node *ehnode;
740 cfs_hash_for_each_bd(bds, n, i) {
741 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
742 CFS_HS_LOOKUP_IT_FIND);
749 static struct hlist_node *
750 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs,
751 struct cfs_hash_bd *bds, unsigned n, const void *key,
752 struct hlist_node *hnode, int noref)
754 struct hlist_node *ehnode;
758 LASSERT(hnode != NULL);
759 intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
761 cfs_hash_for_each_bd(bds, n, i) {
762 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
768 if (i == 1) { /* only one bucket */
769 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
771 struct cfs_hash_bd mybd;
773 cfs_hash_bd_get(hs, key, &mybd);
774 cfs_hash_bd_add_locked(hs, &mybd, hnode);
780 static struct hlist_node *
781 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
782 unsigned n, const void *key,
783 struct hlist_node *hnode)
785 struct hlist_node *ehnode;
788 cfs_hash_for_each_bd(bds, n, i) {
789 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
790 CFS_HS_LOOKUP_IT_FINDDEL);
798 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
802 if (bd2->bd_bucket == NULL)
805 if (bd1->bd_bucket == NULL) {
807 bd2->bd_bucket = NULL;
811 rc = cfs_hash_bd_compare(bd1, bd2);
813 bd2->bd_bucket = NULL;
815 } else if (rc > 0) { /* swab bd1 and bd2 */
816 struct cfs_hash_bd tmp;
825 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds)
827 /* NB: caller should hold hs_lock.rw if REHASH is set */
828 cfs_hash_bd_from_key(hs, hs->hs_buckets,
829 hs->hs_cur_bits, key, &bds[0]);
830 if (likely(hs->hs_rehash_buckets == NULL)) {
831 /* no rehash or not rehashing */
832 bds[1].bd_bucket = NULL;
836 LASSERT(hs->hs_rehash_bits != 0);
837 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
838 hs->hs_rehash_bits, key, &bds[1]);
840 cfs_hash_bd_order(&bds[0], &bds[1]);
842 EXPORT_SYMBOL(cfs_hash_dual_bd_get);
845 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
847 cfs_hash_multi_bd_lock(hs, bds, 2, excl);
849 EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
852 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
854 cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
856 EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
859 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
862 return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
864 EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
867 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
868 const void *key, struct hlist_node *hnode,
871 return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
874 EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
877 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
878 const void *key, struct hlist_node *hnode)
880 return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
882 EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
885 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
886 int bkt_size, int prev_size, int size)
890 for (i = prev_size; i < size; i++) {
891 if (buckets[i] != NULL)
892 LIBCFS_FREE(buckets[i], bkt_size);
895 LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
899 * Create or grow bucket memory. Return old_buckets if no allocation was
900 * needed, the newly allocated buckets if allocation was needed and
901 * successful, and NULL on error.
903 static struct cfs_hash_bucket **
904 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
905 unsigned int old_size, unsigned int new_size)
907 struct cfs_hash_bucket **new_bkts;
910 LASSERT(old_size == 0 || old_bkts != NULL);
912 if (old_bkts != NULL && old_size == new_size)
915 LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
916 if (new_bkts == NULL)
919 if (old_bkts != NULL) {
920 memcpy(new_bkts, old_bkts,
921 min(old_size, new_size) * sizeof(*old_bkts));
924 for (i = old_size; i < new_size; i++) {
925 struct hlist_head *hhead;
926 struct cfs_hash_bd bd;
928 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
929 if (new_bkts[i] == NULL) {
930 cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
935 new_bkts[i]->hsb_index = i;
936 new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
937 new_bkts[i]->hsb_depmax = -1; /* unknown */
938 bd.bd_bucket = new_bkts[i];
939 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
940 INIT_HLIST_HEAD(hhead);
942 if (cfs_hash_with_no_lock(hs) ||
943 cfs_hash_with_no_bktlock(hs))
946 if (cfs_hash_with_rw_bktlock(hs))
947 rwlock_init(&new_bkts[i]->hsb_lock.rw);
948 else if (cfs_hash_with_spin_bktlock(hs))
949 spin_lock_init(&new_bkts[i]->hsb_lock.spin);
951 LBUG(); /* invalid use-case */
957 * Initialize new libcfs hash, where:
958 * @name - Descriptive hash name
959 * @cur_bits - Initial hash table size, in bits
960 * @max_bits - Maximum allowed hash table resize, in bits
961 * @ops - Registered hash table operations
962 * @flags - CFS_HASH_REHASH enable synamic hash resizing
963 * - CFS_HASH_SORT enable chained hash sort
965 static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
967 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
968 static int cfs_hash_dep_print(cfs_workitem_t *wi)
970 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
976 spin_lock(&hs->hs_dep_lock);
977 dep = hs->hs_dep_max;
978 bkt = hs->hs_dep_bkt;
979 off = hs->hs_dep_off;
980 bits = hs->hs_dep_bits;
981 spin_unlock(&hs->hs_dep_lock);
983 LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
984 hs->hs_name, bits, dep, bkt, off);
985 spin_lock(&hs->hs_dep_lock);
986 hs->hs_dep_bits = 0; /* mark as workitem done */
987 spin_unlock(&hs->hs_dep_lock);
991 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
993 spin_lock_init(&hs->hs_dep_lock);
994 cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
997 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
999 if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
1002 spin_lock(&hs->hs_dep_lock);
1003 while (hs->hs_dep_bits != 0) {
1004 spin_unlock(&hs->hs_dep_lock);
1006 spin_lock(&hs->hs_dep_lock);
1008 spin_unlock(&hs->hs_dep_lock);
1011 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1013 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
1014 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
1016 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1019 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1020 unsigned bkt_bits, unsigned extra_bytes,
1021 unsigned min_theta, unsigned max_theta,
1022 cfs_hash_ops_t *ops, unsigned flags)
1024 struct cfs_hash *hs;
1027 CLASSERT(CFS_HASH_THETA_BITS < 15);
1029 LASSERT(name != NULL);
1030 LASSERT(ops != NULL);
1031 LASSERT(ops->hs_key);
1032 LASSERT(ops->hs_hash);
1033 LASSERT(ops->hs_object);
1034 LASSERT(ops->hs_keycmp);
1035 LASSERT(ops->hs_get != NULL);
1036 LASSERT(ops->hs_put_locked != NULL);
1038 if ((flags & CFS_HASH_REHASH) != 0)
1039 flags |= CFS_HASH_COUNTER; /* must have counter */
1041 LASSERT(cur_bits > 0);
1042 LASSERT(cur_bits >= bkt_bits);
1043 LASSERT(max_bits >= cur_bits && max_bits < 31);
1044 LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1045 LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1046 (flags & CFS_HASH_NO_LOCK) == 0));
1047 LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1048 ops->hs_keycpy != NULL));
1050 len = (flags & CFS_HASH_BIGNAME) == 0 ?
1051 CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1052 LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1056 strncpy(hs->hs_name, name, len);
1057 hs->hs_name[len - 1] = '\0';
1058 hs->hs_flags = flags;
1060 atomic_set(&hs->hs_refcount, 1);
1061 atomic_set(&hs->hs_count, 0);
1063 cfs_hash_lock_setup(hs);
1064 cfs_hash_hlist_setup(hs);
1066 hs->hs_cur_bits = (__u8)cur_bits;
1067 hs->hs_min_bits = (__u8)cur_bits;
1068 hs->hs_max_bits = (__u8)max_bits;
1069 hs->hs_bkt_bits = (__u8)bkt_bits;
1072 hs->hs_extra_bytes = extra_bytes;
1073 hs->hs_rehash_bits = 0;
1074 cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1075 cfs_hash_depth_wi_init(hs);
1077 if (cfs_hash_with_rehash(hs))
1078 __cfs_hash_set_theta(hs, min_theta, max_theta);
1080 hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1082 if (hs->hs_buckets != NULL)
1085 LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1088 EXPORT_SYMBOL(cfs_hash_create);
1091 * Cleanup libcfs hash @hs.
1094 cfs_hash_destroy(struct cfs_hash *hs)
1096 struct hlist_node *hnode;
1097 struct hlist_node *pos;
1098 struct cfs_hash_bd bd;
1101 LASSERT(hs != NULL);
1102 LASSERT(!cfs_hash_is_exiting(hs) &&
1103 !cfs_hash_is_iterating(hs));
1106 * prohibit further rehashes, don't need any lock because
1107 * I'm the only (last) one can change it.
1110 if (cfs_hash_with_rehash(hs))
1111 cfs_hash_rehash_cancel(hs);
1113 cfs_hash_depth_wi_cancel(hs);
1114 /* rehash should be done/canceled */
1115 LASSERT(hs->hs_buckets != NULL &&
1116 hs->hs_rehash_buckets == NULL);
1118 cfs_hash_for_each_bucket(hs, &bd, i) {
1119 struct hlist_head *hhead;
1121 LASSERT(bd.bd_bucket != NULL);
1122 /* no need to take this lock, just for consistent code */
1123 cfs_hash_bd_lock(hs, &bd, 1);
1125 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1126 hlist_for_each_safe(hnode, pos, hhead) {
1127 LASSERTF(!cfs_hash_with_assert_empty(hs),
1128 "hash %s bucket %u(%u) is not empty: %u items left\n",
1129 hs->hs_name, bd.bd_bucket->hsb_index,
1130 bd.bd_offset, bd.bd_bucket->hsb_count);
1131 /* can't assert key valicate, because we
1132 * can interrupt rehash */
1133 cfs_hash_bd_del_locked(hs, &bd, hnode);
1134 cfs_hash_exit(hs, hnode);
1137 LASSERT(bd.bd_bucket->hsb_count == 0);
1138 cfs_hash_bd_unlock(hs, &bd, 1);
1142 LASSERT(atomic_read(&hs->hs_count) == 0);
1144 cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1145 0, CFS_HASH_NBKT(hs));
1146 i = cfs_hash_with_bigname(hs) ?
1147 CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1148 LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1151 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1153 if (atomic_inc_not_zero(&hs->hs_refcount))
1157 EXPORT_SYMBOL(cfs_hash_getref);
1159 void cfs_hash_putref(struct cfs_hash *hs)
1161 if (atomic_dec_and_test(&hs->hs_refcount))
1162 cfs_hash_destroy(hs);
1164 EXPORT_SYMBOL(cfs_hash_putref);
1167 cfs_hash_rehash_bits(struct cfs_hash *hs)
1169 if (cfs_hash_with_no_lock(hs) ||
1170 !cfs_hash_with_rehash(hs))
1173 if (unlikely(cfs_hash_is_exiting(hs)))
1176 if (unlikely(cfs_hash_is_rehashing(hs)))
1179 if (unlikely(cfs_hash_is_iterating(hs)))
1182 /* XXX: need to handle case with max_theta != 2.0
1183 * and the case with min_theta != 0.5 */
1184 if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1185 (__cfs_hash_theta(hs) > hs->hs_max_theta))
1186 return hs->hs_cur_bits + 1;
1188 if (!cfs_hash_with_shrink(hs))
1191 if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1192 (__cfs_hash_theta(hs) < hs->hs_min_theta))
1193 return hs->hs_cur_bits - 1;
1199 * don't allow inline rehash if:
1200 * - user wants non-blocking change (add/del) on hash table
1201 * - too many elements
1204 cfs_hash_rehash_inline(struct cfs_hash *hs)
1206 return !cfs_hash_with_nblk_change(hs) &&
1207 atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1211 * Add item @hnode to libcfs hash @hs using @key. The registered
1212 * ops->hs_get function will be called when the item is added.
1215 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1217 struct cfs_hash_bd bd;
1220 LASSERT(hlist_unhashed(hnode));
1222 cfs_hash_lock(hs, 0);
1223 cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1225 cfs_hash_key_validate(hs, key, hnode);
1226 cfs_hash_bd_add_locked(hs, &bd, hnode);
1228 cfs_hash_bd_unlock(hs, &bd, 1);
1230 bits = cfs_hash_rehash_bits(hs);
1231 cfs_hash_unlock(hs, 0);
1233 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1235 EXPORT_SYMBOL(cfs_hash_add);
1237 static struct hlist_node *
1238 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1239 struct hlist_node *hnode, int noref)
1241 struct hlist_node *ehnode;
1242 struct cfs_hash_bd bds[2];
1245 LASSERT(hlist_unhashed(hnode));
1247 cfs_hash_lock(hs, 0);
1248 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1250 cfs_hash_key_validate(hs, key, hnode);
1251 ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1253 cfs_hash_dual_bd_unlock(hs, bds, 1);
1255 if (ehnode == hnode) /* new item added */
1256 bits = cfs_hash_rehash_bits(hs);
1257 cfs_hash_unlock(hs, 0);
1259 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1265 * Add item @hnode to libcfs hash @hs using @key. The registered
1266 * ops->hs_get function will be called if the item was added.
1267 * Returns 0 on success or -EALREADY on key collisions.
1270 cfs_hash_add_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1272 return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1275 EXPORT_SYMBOL(cfs_hash_add_unique);
1278 * Add item @hnode to libcfs hash @hs using @key. If this @key
1279 * already exists in the hash then ops->hs_get will be called on the
1280 * conflicting entry and that entry will be returned to the caller.
1281 * Otherwise ops->hs_get is called on the item which was added.
1284 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1285 struct hlist_node *hnode)
1287 hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1289 return cfs_hash_object(hs, hnode);
1291 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1294 * Delete item @hnode from the libcfs hash @hs using @key. The @key
1295 * is required to ensure the correct hash bucket is locked since there
1296 * is no direct linkage from the item to the bucket. The object
1297 * removed from the hash will be returned and obs->hs_put is called
1298 * on the removed object.
1301 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1305 struct cfs_hash_bd bds[2];
1307 cfs_hash_lock(hs, 0);
1308 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1310 /* NB: do nothing if @hnode is not in hash table */
1311 if (hnode == NULL || !hlist_unhashed(hnode)) {
1312 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1313 cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1315 hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1320 if (hnode != NULL) {
1321 obj = cfs_hash_object(hs, hnode);
1322 bits = cfs_hash_rehash_bits(hs);
1325 cfs_hash_dual_bd_unlock(hs, bds, 1);
1326 cfs_hash_unlock(hs, 0);
1328 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1332 EXPORT_SYMBOL(cfs_hash_del);
1335 * Delete item given @key in libcfs hash @hs. The first @key found in
1336 * the hash will be removed, if the key exists multiple times in the hash
1337 * @hs this function must be called once per key. The removed object
1338 * will be returned and ops->hs_put is called on the removed object.
1341 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1343 return cfs_hash_del(hs, key, NULL);
1345 EXPORT_SYMBOL(cfs_hash_del_key);
1348 * Lookup an item using @key in the libcfs hash @hs and return it.
1349 * If the @key is found in the hash hs->hs_get() is called and the
1350 * matching objects is returned. It is the callers responsibility
1351 * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1352 * when when finished with the object. If the @key was not found
1353 * in the hash @hs NULL is returned.
1356 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1359 struct hlist_node *hnode;
1360 struct cfs_hash_bd bds[2];
1362 cfs_hash_lock(hs, 0);
1363 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1365 hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1367 obj = cfs_hash_object(hs, hnode);
1369 cfs_hash_dual_bd_unlock(hs, bds, 0);
1370 cfs_hash_unlock(hs, 0);
1374 EXPORT_SYMBOL(cfs_hash_lookup);
1377 cfs_hash_for_each_enter(struct cfs_hash *hs) {
1378 LASSERT(!cfs_hash_is_exiting(hs));
1380 if (!cfs_hash_with_rehash(hs))
1383 * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1384 * because it's just an unreliable signal to rehash-thread,
1385 * rehash-thread will try to finish rehash ASAP when seeing this.
1387 hs->hs_iterating = 1;
1389 cfs_hash_lock(hs, 1);
1392 /* NB: iteration is mostly called by service thread,
1393 * we tend to cancel pending rehash-request, instead of
1394 * blocking service thread, we will relaunch rehash request
1395 * after iteration */
1396 if (cfs_hash_is_rehashing(hs))
1397 cfs_hash_rehash_cancel_locked(hs);
1398 cfs_hash_unlock(hs, 1);
1402 cfs_hash_for_each_exit(struct cfs_hash *hs) {
1406 if (!cfs_hash_with_rehash(hs))
1408 cfs_hash_lock(hs, 1);
1409 remained = --hs->hs_iterators;
1410 bits = cfs_hash_rehash_bits(hs);
1411 cfs_hash_unlock(hs, 1);
1412 /* NB: it's race on cfs_has_t::hs_iterating, see above */
1414 hs->hs_iterating = 0;
1416 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1422 * For each item in the libcfs hash @hs call the passed callback @func
1423 * and pass to it as an argument each hash item and the private @data.
1425 * a) the function may sleep!
1426 * b) during the callback:
1427 * . the bucket lock is held so the callback must never sleep.
1428 * . if @removal_safe is true, use can remove current item by
1429 * cfs_hash_bd_del_locked
1432 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1433 void *data, int remove_safe) {
1434 struct hlist_node *hnode;
1435 struct hlist_node *pos;
1436 struct cfs_hash_bd bd;
1438 int excl = !!remove_safe;
1442 cfs_hash_for_each_enter(hs);
1444 cfs_hash_lock(hs, 0);
1445 LASSERT(!cfs_hash_is_rehashing(hs));
1447 cfs_hash_for_each_bucket(hs, &bd, i) {
1448 struct hlist_head *hhead;
1450 cfs_hash_bd_lock(hs, &bd, excl);
1451 if (func == NULL) { /* only glimpse size */
1452 count += bd.bd_bucket->hsb_count;
1453 cfs_hash_bd_unlock(hs, &bd, excl);
1457 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1458 hlist_for_each_safe(hnode, pos, hhead) {
1459 cfs_hash_bucket_validate(hs, &bd, hnode);
1462 if (func(hs, &bd, hnode, data)) {
1463 cfs_hash_bd_unlock(hs, &bd, excl);
1468 cfs_hash_bd_unlock(hs, &bd, excl);
1469 if (loop < CFS_HASH_LOOP_HOG)
1472 cfs_hash_unlock(hs, 0);
1474 cfs_hash_lock(hs, 0);
1477 cfs_hash_unlock(hs, 0);
1479 cfs_hash_for_each_exit(hs);
1484 cfs_hash_cond_opt_cb_t func;
1486 } cfs_hash_cond_arg_t;
1489 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1490 struct hlist_node *hnode, void *data)
1492 cfs_hash_cond_arg_t *cond = data;
1494 if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1495 cfs_hash_bd_del_locked(hs, bd, hnode);
1500 * Delete item from the libcfs hash @hs when @func return true.
1501 * The write lock being hold during loop for each bucket to avoid
1502 * any object be reference.
1505 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1507 cfs_hash_cond_arg_t arg = {
1512 cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1514 EXPORT_SYMBOL(cfs_hash_cond_del);
1517 cfs_hash_for_each(struct cfs_hash *hs,
1518 cfs_hash_for_each_cb_t func, void *data)
1520 cfs_hash_for_each_tight(hs, func, data, 0);
1522 EXPORT_SYMBOL(cfs_hash_for_each);
1525 cfs_hash_for_each_safe(struct cfs_hash *hs,
1526 cfs_hash_for_each_cb_t func, void *data) {
1527 cfs_hash_for_each_tight(hs, func, data, 1);
1529 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1532 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1533 struct hlist_node *hnode, void *data)
1536 return 1; /* return 1 to break the loop */
1540 cfs_hash_is_empty(struct cfs_hash *hs)
1544 cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1547 EXPORT_SYMBOL(cfs_hash_is_empty);
1550 cfs_hash_size_get(struct cfs_hash *hs)
1552 return cfs_hash_with_counter(hs) ?
1553 atomic_read(&hs->hs_count) :
1554 cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1556 EXPORT_SYMBOL(cfs_hash_size_get);
1559 * cfs_hash_for_each_relax:
1560 * Iterate the hash table and call @func on each item without
1561 * any lock. This function can't guarantee to finish iteration
1562 * if these features are enabled:
1564 * a. if rehash_key is enabled, an item can be moved from
1565 * one bucket to another bucket
1566 * b. user can remove non-zero-ref item from hash-table,
1567 * so the item can be removed from hash-table, even worse,
1568 * it's possible that user changed key and insert to another
1570 * there's no way for us to finish iteration correctly on previous
1571 * two cases, so iteration has to be stopped on change.
1574 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1576 struct hlist_node *hnode;
1577 struct hlist_node *tmp;
1578 struct cfs_hash_bd bd;
1585 stop_on_change = cfs_hash_with_rehash_key(hs) ||
1586 !cfs_hash_with_no_itemref(hs) ||
1587 hs->hs_ops->hs_put_locked == NULL;
1588 cfs_hash_lock(hs, 0);
1589 LASSERT(!cfs_hash_is_rehashing(hs));
1591 cfs_hash_for_each_bucket(hs, &bd, i) {
1592 struct hlist_head *hhead;
1594 cfs_hash_bd_lock(hs, &bd, 0);
1595 version = cfs_hash_bd_version_get(&bd);
1597 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1598 for (hnode = hhead->first; hnode != NULL;) {
1599 cfs_hash_bucket_validate(hs, &bd, hnode);
1600 cfs_hash_get(hs, hnode);
1601 cfs_hash_bd_unlock(hs, &bd, 0);
1602 cfs_hash_unlock(hs, 0);
1604 rc = func(hs, &bd, hnode, data);
1606 cfs_hash_put(hs, hnode);
1610 cfs_hash_lock(hs, 0);
1611 cfs_hash_bd_lock(hs, &bd, 0);
1612 if (!stop_on_change) {
1614 cfs_hash_put_locked(hs, hnode);
1616 } else { /* bucket changed? */
1618 cfs_hash_bd_version_get(&bd))
1620 /* safe to continue because no change */
1621 hnode = hnode->next;
1623 if (rc) /* callback wants to break iteration */
1627 cfs_hash_bd_unlock(hs, &bd, 0);
1629 cfs_hash_unlock(hs, 0);
1635 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1636 cfs_hash_for_each_cb_t func, void *data) {
1637 if (cfs_hash_with_no_lock(hs) ||
1638 cfs_hash_with_rehash_key(hs) ||
1639 !cfs_hash_with_no_itemref(hs))
1642 if (hs->hs_ops->hs_get == NULL ||
1643 (hs->hs_ops->hs_put == NULL &&
1644 hs->hs_ops->hs_put_locked == NULL))
1647 cfs_hash_for_each_enter(hs);
1648 cfs_hash_for_each_relax(hs, func, data);
1649 cfs_hash_for_each_exit(hs);
1653 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1656 * For each hash bucket in the libcfs hash @hs call the passed callback
1657 * @func until all the hash buckets are empty. The passed callback @func
1658 * or the previously registered callback hs->hs_put must remove the item
1659 * from the hash. You may either use the cfs_hash_del() or hlist_del()
1660 * functions. No rwlocks will be held during the callback @func it is
1661 * safe to sleep if needed. This function will not terminate until the
1662 * hash is empty. Note it is still possible to concurrently add new
1663 * items in to the hash. It is the callers responsibility to ensure
1664 * the required locking is in place to prevent concurrent insertions.
1667 cfs_hash_for_each_empty(struct cfs_hash *hs,
1668 cfs_hash_for_each_cb_t func, void *data) {
1671 if (cfs_hash_with_no_lock(hs))
1674 if (hs->hs_ops->hs_get == NULL ||
1675 (hs->hs_ops->hs_put == NULL &&
1676 hs->hs_ops->hs_put_locked == NULL))
1679 cfs_hash_for_each_enter(hs);
1680 while (cfs_hash_for_each_relax(hs, func, data)) {
1681 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1684 cfs_hash_for_each_exit(hs);
1687 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1690 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1691 cfs_hash_for_each_cb_t func, void *data)
1693 struct hlist_head *hhead;
1694 struct hlist_node *hnode;
1695 struct cfs_hash_bd bd;
1697 cfs_hash_for_each_enter(hs);
1698 cfs_hash_lock(hs, 0);
1699 if (hindex >= CFS_HASH_NHLIST(hs))
1702 cfs_hash_bd_index_set(hs, hindex, &bd);
1704 cfs_hash_bd_lock(hs, &bd, 0);
1705 hhead = cfs_hash_bd_hhead(hs, &bd);
1706 hlist_for_each(hnode, hhead) {
1707 if (func(hs, &bd, hnode, data))
1710 cfs_hash_bd_unlock(hs, &bd, 0);
1712 cfs_hash_unlock(hs, 0);
1713 cfs_hash_for_each_exit(hs);
1716 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1719 * For each item in the libcfs hash @hs which matches the @key call
1720 * the passed callback @func and pass to it as an argument each hash
1721 * item and the private @data. During the callback the bucket lock
1722 * is held so the callback must never sleep.
1725 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1726 cfs_hash_for_each_cb_t func, void *data) {
1727 struct hlist_node *hnode;
1728 struct cfs_hash_bd bds[2];
1731 cfs_hash_lock(hs, 0);
1733 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1735 cfs_hash_for_each_bd(bds, 2, i) {
1736 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1738 hlist_for_each(hnode, hlist) {
1739 cfs_hash_bucket_validate(hs, &bds[i], hnode);
1741 if (cfs_hash_keycmp(hs, key, hnode)) {
1742 if (func(hs, &bds[i], hnode, data))
1748 cfs_hash_dual_bd_unlock(hs, bds, 0);
1749 cfs_hash_unlock(hs, 0);
1751 EXPORT_SYMBOL(cfs_hash_for_each_key);
1754 * Rehash the libcfs hash @hs to the given @bits. This can be used
1755 * to grow the hash size when excessive chaining is detected, or to
1756 * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
1757 * flag is set in @hs the libcfs hash may be dynamically rehashed
1758 * during addition or removal if the hash's theta value exceeds
1759 * either the hs->hs_min_theta or hs->max_theta values. By default
1760 * these values are tuned to keep the chained hash depth small, and
1761 * this approach assumes a reasonably uniform hashing function. The
1762 * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1765 cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1769 /* need hold cfs_hash_lock(hs, 1) */
1770 LASSERT(cfs_hash_with_rehash(hs) &&
1771 !cfs_hash_with_no_lock(hs));
1773 if (!cfs_hash_is_rehashing(hs))
1776 if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1777 hs->hs_rehash_bits = 0;
1781 for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1782 cfs_hash_unlock(hs, 1);
1783 /* raise console warning while waiting too long */
1784 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1785 "hash %s is still rehashing, rescheded %d\n",
1786 hs->hs_name, i - 1);
1788 cfs_hash_lock(hs, 1);
1791 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1794 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1796 cfs_hash_lock(hs, 1);
1797 cfs_hash_rehash_cancel_locked(hs);
1798 cfs_hash_unlock(hs, 1);
1800 EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1803 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1807 LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1809 cfs_hash_lock(hs, 1);
1811 rc = cfs_hash_rehash_bits(hs);
1813 cfs_hash_unlock(hs, 1);
1817 hs->hs_rehash_bits = rc;
1819 /* launch and return */
1820 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1821 cfs_hash_unlock(hs, 1);
1825 /* rehash right now */
1826 cfs_hash_unlock(hs, 1);
1828 return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1830 EXPORT_SYMBOL(cfs_hash_rehash);
1833 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1835 struct cfs_hash_bd new;
1836 struct hlist_head *hhead;
1837 struct hlist_node *hnode;
1838 struct hlist_node *pos;
1842 /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1843 cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1844 hlist_for_each_safe(hnode, pos, hhead) {
1845 key = cfs_hash_key(hs, hnode);
1846 LASSERT(key != NULL);
1847 /* Validate hnode is in the correct bucket. */
1848 cfs_hash_bucket_validate(hs, old, hnode);
1850 * Delete from old hash bucket; move to new bucket.
1851 * ops->hs_key must be defined.
1853 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1854 hs->hs_rehash_bits, key, &new);
1855 cfs_hash_bd_move_locked(hs, old, &new, hnode);
1864 cfs_hash_rehash_worker(cfs_workitem_t *wi)
1866 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
1867 struct cfs_hash_bucket **bkts;
1868 struct cfs_hash_bd bd;
1869 unsigned int old_size;
1870 unsigned int new_size;
1876 LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1878 cfs_hash_lock(hs, 0);
1879 LASSERT(cfs_hash_is_rehashing(hs));
1881 old_size = CFS_HASH_NBKT(hs);
1882 new_size = CFS_HASH_RH_NBKT(hs);
1884 cfs_hash_unlock(hs, 0);
1887 * don't need hs::hs_rwlock for hs::hs_buckets,
1888 * because nobody can change bkt-table except me.
1890 bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1891 old_size, new_size);
1892 cfs_hash_lock(hs, 1);
1898 if (bkts == hs->hs_buckets) {
1899 bkts = NULL; /* do nothing */
1903 rc = __cfs_hash_theta(hs);
1904 if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1905 /* free the new allocated bkt-table */
1906 old_size = new_size;
1907 new_size = CFS_HASH_NBKT(hs);
1912 LASSERT(hs->hs_rehash_buckets == NULL);
1913 hs->hs_rehash_buckets = bkts;
1916 cfs_hash_for_each_bucket(hs, &bd, i) {
1917 if (cfs_hash_is_exiting(hs)) {
1919 /* someone wants to destroy the hash, abort now */
1920 if (old_size < new_size) /* OK to free old bkt-table */
1922 /* it's shrinking, need free new bkt-table */
1923 hs->hs_rehash_buckets = NULL;
1924 old_size = new_size;
1925 new_size = CFS_HASH_NBKT(hs);
1929 count += cfs_hash_rehash_bd(hs, &bd);
1930 if (count < CFS_HASH_LOOP_HOG ||
1931 cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1936 cfs_hash_unlock(hs, 1);
1938 cfs_hash_lock(hs, 1);
1941 hs->hs_rehash_count++;
1943 bkts = hs->hs_buckets;
1944 hs->hs_buckets = hs->hs_rehash_buckets;
1945 hs->hs_rehash_buckets = NULL;
1947 hs->hs_cur_bits = hs->hs_rehash_bits;
1949 hs->hs_rehash_bits = 0;
1950 if (rc == -ESRCH) /* never be scheduled again */
1951 cfs_wi_exit(cfs_sched_rehash, wi);
1952 bsize = cfs_hash_bkt_size(hs);
1953 cfs_hash_unlock(hs, 1);
1954 /* can't refer to @hs anymore because it could be destroyed */
1956 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1958 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1959 /* return 1 only if cfs_wi_exit is called */
1960 return rc == -ESRCH;
1964 * Rehash the object referenced by @hnode in the libcfs hash @hs. The
1965 * @old_key must be provided to locate the objects previous location
1966 * in the hash, and the @new_key will be used to reinsert the object.
1967 * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1968 * combo when it is critical that there is no window in time where the
1969 * object is missing from the hash. When an object is being rehashed
1970 * the registered cfs_hash_get() and cfs_hash_put() functions will
1973 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
1974 void *new_key, struct hlist_node *hnode)
1976 struct cfs_hash_bd bds[3];
1977 struct cfs_hash_bd old_bds[2];
1978 struct cfs_hash_bd new_bd;
1980 LASSERT(!hlist_unhashed(hnode));
1982 cfs_hash_lock(hs, 0);
1984 cfs_hash_dual_bd_get(hs, old_key, old_bds);
1985 cfs_hash_bd_get(hs, new_key, &new_bd);
1987 bds[0] = old_bds[0];
1988 bds[1] = old_bds[1];
1991 /* NB: bds[0] and bds[1] are ordered already */
1992 cfs_hash_bd_order(&bds[1], &bds[2]);
1993 cfs_hash_bd_order(&bds[0], &bds[1]);
1995 cfs_hash_multi_bd_lock(hs, bds, 3, 1);
1996 if (likely(old_bds[1].bd_bucket == NULL)) {
1997 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
1999 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2000 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2002 /* overwrite key inside locks, otherwise may screw up with
2003 * other operations, i.e: rehash */
2004 cfs_hash_keycpy(hs, new_key, hnode);
2006 cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2007 cfs_hash_unlock(hs, 0);
2009 EXPORT_SYMBOL(cfs_hash_rehash_key);
2011 void cfs_hash_debug_header(struct seq_file *m)
2013 seq_printf(m, "%-*s cur min max theta t-min t-max flags rehash count maxdep maxdepb distribution\n",
2014 CFS_HASH_BIGNAME_LEN, "name");
2016 EXPORT_SYMBOL(cfs_hash_debug_header);
2018 static struct cfs_hash_bucket **
2019 cfs_hash_full_bkts(struct cfs_hash *hs)
2021 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2022 if (hs->hs_rehash_buckets == NULL)
2023 return hs->hs_buckets;
2025 LASSERT(hs->hs_rehash_bits != 0);
2026 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2027 hs->hs_rehash_buckets : hs->hs_buckets;
2031 cfs_hash_full_nbkt(struct cfs_hash *hs)
2033 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2034 if (hs->hs_rehash_buckets == NULL)
2035 return CFS_HASH_NBKT(hs);
2037 LASSERT(hs->hs_rehash_bits != 0);
2038 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2039 CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2042 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2044 int dist[8] = { 0, };
2051 cfs_hash_lock(hs, 0);
2052 theta = __cfs_hash_theta(hs);
2054 seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ",
2055 CFS_HASH_BIGNAME_LEN, hs->hs_name,
2056 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2057 1 << hs->hs_max_bits,
2058 __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2059 __cfs_hash_theta_int(hs->hs_min_theta),
2060 __cfs_hash_theta_frac(hs->hs_min_theta),
2061 __cfs_hash_theta_int(hs->hs_max_theta),
2062 __cfs_hash_theta_frac(hs->hs_max_theta),
2063 hs->hs_flags, hs->hs_rehash_count);
2066 * The distribution is a summary of the chained hash depth in
2067 * each of the libcfs hash buckets. Each buckets hsb_count is
2068 * divided by the hash theta value and used to generate a
2069 * histogram of the hash distribution. A uniform hash will
2070 * result in all hash buckets being close to the average thus
2071 * only the first few entries in the histogram will be non-zero.
2072 * If you hash function results in a non-uniform hash the will
2073 * be observable by outlier bucks in the distribution histogram.
2075 * Uniform hash distribution: 128/128/0/0/0/0/0/0
2076 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
2078 for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2079 struct cfs_hash_bd bd;
2081 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2082 cfs_hash_bd_lock(hs, &bd, 0);
2083 if (maxdep < bd.bd_bucket->hsb_depmax) {
2084 maxdep = bd.bd_bucket->hsb_depmax;
2085 maxdepb = ffz(~maxdep);
2087 total += bd.bd_bucket->hsb_count;
2088 dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2089 cfs_hash_bd_unlock(hs, &bd, 0);
2092 seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2093 for (i = 0; i < 8; i++)
2094 seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/');
2096 cfs_hash_unlock(hs, 0);
2098 EXPORT_SYMBOL(cfs_hash_debug_str);