4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/include/libcfs/libcfs_hash.h
42 #ifndef __LIBCFS_HASH_H__
43 #define __LIBCFS_HASH_H__
45 * Knuth recommends primes in approximately golden ratio to the maximum
46 * integer representable by a machine word for multiplicative hashing.
47 * Chuck Lever verified the effectiveness of this technique:
48 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
50 * These primes are chosen to be bit-sparse, that is operations on
51 * them can use shifts and additions instead of multiplications for
52 * machines where multiplications are slow.
54 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
55 #define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL
56 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
57 #define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
60 * Ideally we would use HAVE_HASH_LONG for this, but on linux we configure
61 * the linux kernel and user space at the same time, so we need to differentiate
62 * between them explicitely. If this is not needed on other architectures, then
63 * we'll need to move the functions to archi specific headers.
66 #include <linux/hash.h>
68 #define cfs_hash_long(val, bits) hash_long(val, bits)
71 #define CFS_HASH_DEBUG_NONE 0
72 /** record hash depth and output to console when it's too deep,
73 * computing overhead is low but consume more memory */
74 #define CFS_HASH_DEBUG_1 1
75 /** expensive, check key validation */
76 #define CFS_HASH_DEBUG_2 2
78 #define CFS_HASH_DEBUG_LEVEL CFS_HASH_DEBUG_NONE
81 struct cfs_hash_lock_ops;
82 struct cfs_hash_hlist_ops;
85 rwlock_t rw; /**< rwlock */
86 spinlock_t spin; /**< spinlock */
90 * cfs_hash_bucket is a container of:
92 * - array of hash-head starting from hsb_head[0], hash-head can be one of
94 * . cfs_hash_head_dep_t
96 * . cfs_hash_dhead_dep_t
97 * which depends on requirement of user
98 * - some extra bytes (caller can require it while creating hash)
100 typedef struct cfs_hash_bucket {
101 cfs_hash_lock_t hsb_lock; /**< bucket lock */
102 __u32 hsb_count; /**< current entries */
103 __u32 hsb_version; /**< change version */
104 unsigned int hsb_index; /**< index of bucket */
105 int hsb_depmax; /**< max depth on bucket */
106 long hsb_head[0]; /**< hash-head array */
110 * cfs_hash bucket descriptor, it's normally in stack of caller
112 typedef struct cfs_hash_bd {
113 cfs_hash_bucket_t *bd_bucket; /**< address of bucket */
114 unsigned int bd_offset; /**< offset in bucket */
117 #define CFS_HASH_NAME_LEN 16 /**< default name length */
118 #define CFS_HASH_BIGNAME_LEN 64 /**< bigname for param tree */
120 #define CFS_HASH_BKT_BITS 3 /**< default bits of bucket */
121 #define CFS_HASH_BITS_MAX 30 /**< max bits of bucket */
122 #define CFS_HASH_BITS_MIN CFS_HASH_BKT_BITS
125 * common hash attributes.
129 * don't need any lock, caller will protect operations with it's
130 * own lock. With this flag:
131 * . CFS_HASH_NO_BKTLOCK, CFS_HASH_RW_BKTLOCK, CFS_HASH_SPIN_BKTLOCK
133 * . Some functions will be disabled with this flag, i.e:
134 * cfs_hash_for_each_empty, cfs_hash_rehash
136 CFS_HASH_NO_LOCK = 1 << 0,
137 /** no bucket lock, use one spinlock to protect the whole hash */
138 CFS_HASH_NO_BKTLOCK = 1 << 1,
139 /** rwlock to protect bucket */
140 CFS_HASH_RW_BKTLOCK = 1 << 2,
141 /** spinlcok to protect bucket */
142 CFS_HASH_SPIN_BKTLOCK = 1 << 3,
143 /** always add new item to tail */
144 CFS_HASH_ADD_TAIL = 1 << 4,
145 /** hash-table doesn't have refcount on item */
146 CFS_HASH_NO_ITEMREF = 1 << 5,
147 /** big name for param-tree */
148 CFS_HASH_BIGNAME = 1 << 6,
149 /** track global count */
150 CFS_HASH_COUNTER = 1 << 7,
151 /** rehash item by new key */
152 CFS_HASH_REHASH_KEY = 1 << 8,
153 /** Enable dynamic hash resizing */
154 CFS_HASH_REHASH = 1 << 9,
155 /** can shrink hash-size */
156 CFS_HASH_SHRINK = 1 << 10,
157 /** assert hash is empty on exit */
158 CFS_HASH_ASSERT_EMPTY = 1 << 11,
159 /** record hlist depth */
160 CFS_HASH_DEPTH = 1 << 12,
162 * rehash is always scheduled in a different thread, so current
163 * change on hash table is non-blocking
165 CFS_HASH_NBLK_CHANGE = 1 << 13,
166 /** NB, we typed hs_flags as __u16, please change it
167 * if you need to extend >=16 flags */
170 /** most used attributes */
171 #define CFS_HASH_DEFAULT (CFS_HASH_RW_BKTLOCK | \
172 CFS_HASH_COUNTER | CFS_HASH_REHASH)
175 * cfs_hash is a hash-table implementation for general purpose, it can support:
176 * . two refcount modes
177 * hash-table with & without refcount
179 * nolock, one-spinlock, rw-bucket-lock, spin-bucket-lock
180 * . general operations
181 * lookup, add(add_tail or add_head), delete
185 * locked iteration and unlocked iteration
187 * support long name hash
189 * trace max searching depth
192 * When the htable grows or shrinks, a separate task (cfs_hash_rehash_worker)
193 * is spawned to handle the rehash in the background, it's possible that other
194 * processes can concurrently perform additions, deletions, and lookups
195 * without being blocked on rehash completion, because rehash will release
196 * the global wrlock for each bucket.
198 * rehash and iteration can't run at the same time because it's too tricky
199 * to keep both of them safe and correct.
200 * As they are relatively rare operations, so:
201 * . if iteration is in progress while we try to launch rehash, then
202 * it just giveup, iterator will launch rehash at the end.
203 * . if rehash is in progress while we try to iterate the hash table,
204 * then we just wait (shouldn't be very long time), anyway, nobody
205 * should expect iteration of whole hash-table to be non-blocking.
207 * During rehashing, a (key,object) pair may be in one of two buckets,
208 * depending on whether the worker task has yet to transfer the object
209 * to its new location in the table. Lookups and deletions need to search both
210 * locations; additions must take care to only insert into the new bucket.
213 typedef struct cfs_hash {
214 /** serialize with rehash, or serialize all operations if
215 * the hash-table has CFS_HASH_NO_BKTLOCK */
216 cfs_hash_lock_t hs_lock;
217 /** hash operations */
218 struct cfs_hash_ops *hs_ops;
219 /** hash lock operations */
220 struct cfs_hash_lock_ops *hs_lops;
221 /** hash list operations */
222 struct cfs_hash_hlist_ops *hs_hops;
223 /** hash buckets-table */
224 cfs_hash_bucket_t **hs_buckets;
225 /** total number of items on this hash-table */
227 /** hash flags, see cfs_hash_tag for detail */
229 /** # of extra-bytes for bucket, for user saving extended attributes */
230 __u16 hs_extra_bytes;
231 /** wants to iterate */
233 /** hash-table is dying */
235 /** current hash bits */
241 /** bits for rehash */
243 /** bits for each bucket */
245 /** resize min threshold */
247 /** resize max threshold */
250 __u32 hs_rehash_count;
251 /** # of iterators (caller of cfs_hash_for_each_*) */
253 /** rehash workitem */
254 cfs_workitem_t hs_rehash_wi;
255 /** refcount on this hash table */
256 atomic_t hs_refcount;
257 /** rehash buckets-table */
258 cfs_hash_bucket_t **hs_rehash_buckets;
259 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
260 /** serialize debug members */
261 spinlock_t hs_dep_lock;
263 unsigned int hs_dep_max;
264 /** id of the deepest bucket */
265 unsigned int hs_dep_bkt;
266 /** offset in the deepest bucket */
267 unsigned int hs_dep_off;
268 /** bits when we found the max depth */
269 unsigned int hs_dep_bits;
270 /** workitem to output max depth */
271 cfs_workitem_t hs_dep_wi;
273 /** name of htable */
277 typedef struct cfs_hash_lock_ops {
278 /** lock the hash table */
279 void (*hs_lock)(cfs_hash_lock_t *lock, int exclusive);
280 /** unlock the hash table */
281 void (*hs_unlock)(cfs_hash_lock_t *lock, int exclusive);
282 /** lock the hash bucket */
283 void (*hs_bkt_lock)(cfs_hash_lock_t *lock, int exclusive);
284 /** unlock the hash bucket */
285 void (*hs_bkt_unlock)(cfs_hash_lock_t *lock, int exclusive);
286 } cfs_hash_lock_ops_t;
288 typedef struct cfs_hash_hlist_ops {
289 /** return hlist_head of hash-head of @bd */
290 struct hlist_head *(*hop_hhead)(cfs_hash_t *hs, cfs_hash_bd_t *bd);
291 /** return hash-head size */
292 int (*hop_hhead_size)(cfs_hash_t *hs);
293 /** add @hnode to hash-head of @bd */
294 int (*hop_hnode_add)(cfs_hash_t *hs,
295 cfs_hash_bd_t *bd, struct hlist_node *hnode);
296 /** remove @hnode from hash-head of @bd */
297 int (*hop_hnode_del)(cfs_hash_t *hs,
298 cfs_hash_bd_t *bd, struct hlist_node *hnode);
299 } cfs_hash_hlist_ops_t;
301 typedef struct cfs_hash_ops {
302 /** return hashed value from @key */
303 unsigned (*hs_hash)(cfs_hash_t *hs, const void *key, unsigned mask);
304 /** return key address of @hnode */
305 void * (*hs_key)(struct hlist_node *hnode);
306 /** copy key from @hnode to @key */
307 void (*hs_keycpy)(struct hlist_node *hnode, void *key);
309 * compare @key with key of @hnode
310 * returns 1 on a match
312 int (*hs_keycmp)(const void *key, struct hlist_node *hnode);
313 /** return object address of @hnode, i.e: container_of(...hnode) */
314 void * (*hs_object)(struct hlist_node *hnode);
315 /** get refcount of item, always called with holding bucket-lock */
316 void (*hs_get)(cfs_hash_t *hs, struct hlist_node *hnode);
317 /** release refcount of item */
318 void (*hs_put)(cfs_hash_t *hs, struct hlist_node *hnode);
319 /** release refcount of item, always called with holding bucket-lock */
320 void (*hs_put_locked)(cfs_hash_t *hs, struct hlist_node *hnode);
321 /** it's called before removing of @hnode */
322 void (*hs_exit)(cfs_hash_t *hs, struct hlist_node *hnode);
325 /** total number of buckets in @hs */
326 #define CFS_HASH_NBKT(hs) \
327 (1U << ((hs)->hs_cur_bits - (hs)->hs_bkt_bits))
329 /** total number of buckets in @hs while rehashing */
330 #define CFS_HASH_RH_NBKT(hs) \
331 (1U << ((hs)->hs_rehash_bits - (hs)->hs_bkt_bits))
333 /** number of hlist for in bucket */
334 #define CFS_HASH_BKT_NHLIST(hs) (1U << (hs)->hs_bkt_bits)
336 /** total number of hlist in @hs */
337 #define CFS_HASH_NHLIST(hs) (1U << (hs)->hs_cur_bits)
339 /** total number of hlist in @hs while rehashing */
340 #define CFS_HASH_RH_NHLIST(hs) (1U << (hs)->hs_rehash_bits)
343 cfs_hash_with_no_lock(cfs_hash_t *hs)
345 /* caller will serialize all operations for this hash-table */
346 return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0;
350 cfs_hash_with_no_bktlock(cfs_hash_t *hs)
352 /* no bucket lock, one single lock to protect the hash-table */
353 return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0;
357 cfs_hash_with_rw_bktlock(cfs_hash_t *hs)
359 /* rwlock to protect hash bucket */
360 return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0;
364 cfs_hash_with_spin_bktlock(cfs_hash_t *hs)
366 /* spinlock to protect hash bucket */
367 return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0;
371 cfs_hash_with_add_tail(cfs_hash_t *hs)
373 return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
377 cfs_hash_with_no_itemref(cfs_hash_t *hs)
379 /* hash-table doesn't keep refcount on item,
380 * item can't be removed from hash unless it's
382 return (hs->hs_flags & CFS_HASH_NO_ITEMREF) != 0;
386 cfs_hash_with_bigname(cfs_hash_t *hs)
388 return (hs->hs_flags & CFS_HASH_BIGNAME) != 0;
392 cfs_hash_with_counter(cfs_hash_t *hs)
394 return (hs->hs_flags & CFS_HASH_COUNTER) != 0;
398 cfs_hash_with_rehash(cfs_hash_t *hs)
400 return (hs->hs_flags & CFS_HASH_REHASH) != 0;
404 cfs_hash_with_rehash_key(cfs_hash_t *hs)
406 return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0;
410 cfs_hash_with_shrink(cfs_hash_t *hs)
412 return (hs->hs_flags & CFS_HASH_SHRINK) != 0;
416 cfs_hash_with_assert_empty(cfs_hash_t *hs)
418 return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0;
422 cfs_hash_with_depth(cfs_hash_t *hs)
424 return (hs->hs_flags & CFS_HASH_DEPTH) != 0;
428 cfs_hash_with_nblk_change(cfs_hash_t *hs)
430 return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0;
434 cfs_hash_is_exiting(cfs_hash_t *hs)
435 { /* cfs_hash_destroy is called */
436 return hs->hs_exiting;
440 cfs_hash_is_rehashing(cfs_hash_t *hs)
441 { /* rehash is launched */
442 return hs->hs_rehash_bits != 0;
446 cfs_hash_is_iterating(cfs_hash_t *hs)
447 { /* someone is calling cfs_hash_for_each_* */
448 return hs->hs_iterating || hs->hs_iterators != 0;
452 cfs_hash_bkt_size(cfs_hash_t *hs)
454 return offsetof(cfs_hash_bucket_t, hsb_head[0]) +
455 hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) +
459 #define CFS_HOP(hs, op) (hs)->hs_ops->hs_ ## op
461 static inline unsigned
462 cfs_hash_id(cfs_hash_t *hs, const void *key, unsigned mask)
464 return CFS_HOP(hs, hash)(hs, key, mask);
468 cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode)
470 return CFS_HOP(hs, key)(hnode);
474 cfs_hash_keycpy(cfs_hash_t *hs, struct hlist_node *hnode, void *key)
476 if (CFS_HOP(hs, keycpy) != NULL)
477 CFS_HOP(hs, keycpy)(hnode, key);
481 * Returns 1 on a match,
484 cfs_hash_keycmp(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
486 return CFS_HOP(hs, keycmp)(key, hnode);
490 cfs_hash_object(cfs_hash_t *hs, struct hlist_node *hnode)
492 return CFS_HOP(hs, object)(hnode);
496 cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
498 return CFS_HOP(hs, get)(hs, hnode);
502 cfs_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
504 LASSERT(CFS_HOP(hs, put_locked) != NULL);
506 return CFS_HOP(hs, put_locked)(hs, hnode);
510 cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
512 LASSERT(CFS_HOP(hs, put) != NULL);
514 return CFS_HOP(hs, put)(hs, hnode);
518 cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
520 if (CFS_HOP(hs, exit))
521 CFS_HOP(hs, exit)(hs, hnode);
524 static inline void cfs_hash_lock(cfs_hash_t *hs, int excl)
526 hs->hs_lops->hs_lock(&hs->hs_lock, excl);
529 static inline void cfs_hash_unlock(cfs_hash_t *hs, int excl)
531 hs->hs_lops->hs_unlock(&hs->hs_lock, excl);
534 static inline int cfs_hash_dec_and_lock(cfs_hash_t *hs,
537 LASSERT(cfs_hash_with_no_bktlock(hs));
538 return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
541 static inline void cfs_hash_bd_lock(cfs_hash_t *hs,
542 cfs_hash_bd_t *bd, int excl)
544 hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl);
547 static inline void cfs_hash_bd_unlock(cfs_hash_t *hs,
548 cfs_hash_bd_t *bd, int excl)
550 hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl);
554 * operations on cfs_hash bucket (bd: bucket descriptor),
555 * they are normally for hash-table without rehash
557 void cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd);
559 static inline void cfs_hash_bd_get_and_lock(cfs_hash_t *hs, const void *key,
560 cfs_hash_bd_t *bd, int excl)
562 cfs_hash_bd_get(hs, key, bd);
563 cfs_hash_bd_lock(hs, bd, excl);
566 static inline unsigned cfs_hash_bd_index_get(cfs_hash_t *hs, cfs_hash_bd_t *bd)
568 return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits);
571 static inline void cfs_hash_bd_index_set(cfs_hash_t *hs,
572 unsigned index, cfs_hash_bd_t *bd)
574 bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
575 bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U);
579 cfs_hash_bd_extra_get(cfs_hash_t *hs, cfs_hash_bd_t *bd)
581 return (void *)bd->bd_bucket +
582 cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
586 cfs_hash_bd_version_get(cfs_hash_bd_t *bd)
588 /* need hold cfs_hash_bd_lock */
589 return bd->bd_bucket->hsb_version;
593 cfs_hash_bd_count_get(cfs_hash_bd_t *bd)
595 /* need hold cfs_hash_bd_lock */
596 return bd->bd_bucket->hsb_count;
600 cfs_hash_bd_depmax_get(cfs_hash_bd_t *bd)
602 return bd->bd_bucket->hsb_depmax;
606 cfs_hash_bd_compare(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
608 if (bd1->bd_bucket->hsb_index != bd2->bd_bucket->hsb_index)
609 return bd1->bd_bucket->hsb_index - bd2->bd_bucket->hsb_index;
611 if (bd1->bd_offset != bd2->bd_offset)
612 return bd1->bd_offset - bd2->bd_offset;
617 void cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
618 struct hlist_node *hnode);
619 void cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
620 struct hlist_node *hnode);
621 void cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
622 cfs_hash_bd_t *bd_new, struct hlist_node *hnode);
624 static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, cfs_hash_bd_t *bd,
627 LASSERT(cfs_hash_with_spin_bktlock(hs));
628 return atomic_dec_and_lock(condition,
629 &bd->bd_bucket->hsb_lock.spin);
632 static inline struct hlist_head *cfs_hash_bd_hhead(cfs_hash_t *hs,
635 return hs->hs_hops->hop_hhead(hs, bd);
638 struct hlist_node *cfs_hash_bd_lookup_locked(cfs_hash_t *hs,
639 cfs_hash_bd_t *bd, const void *key);
640 struct hlist_node *cfs_hash_bd_peek_locked(cfs_hash_t *hs,
641 cfs_hash_bd_t *bd, const void *key);
642 struct hlist_node *cfs_hash_bd_findadd_locked(cfs_hash_t *hs,
643 cfs_hash_bd_t *bd, const void *key,
644 struct hlist_node *hnode,
646 struct hlist_node *cfs_hash_bd_finddel_locked(cfs_hash_t *hs,
647 cfs_hash_bd_t *bd, const void *key,
648 struct hlist_node *hnode);
651 * operations on cfs_hash bucket (bd: bucket descriptor),
652 * they are safe for hash-table with rehash
654 void cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds);
655 void cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl);
656 void cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl);
658 static inline void cfs_hash_dual_bd_get_and_lock(cfs_hash_t *hs, const void *key,
659 cfs_hash_bd_t *bds, int excl)
661 cfs_hash_dual_bd_get(hs, key, bds);
662 cfs_hash_dual_bd_lock(hs, bds, excl);
665 struct hlist_node *cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs,
668 struct hlist_node *cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs,
671 struct hlist_node *hnode,
673 struct hlist_node *cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs,
676 struct hlist_node *hnode);
678 /* Hash init/cleanup functions */
679 cfs_hash_t *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
680 unsigned bkt_bits, unsigned extra_bytes,
681 unsigned min_theta, unsigned max_theta,
682 cfs_hash_ops_t *ops, unsigned flags);
684 cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs);
685 void cfs_hash_putref(cfs_hash_t *hs);
687 /* Hash addition functions */
688 void cfs_hash_add(cfs_hash_t *hs, const void *key,
689 struct hlist_node *hnode);
690 int cfs_hash_add_unique(cfs_hash_t *hs, const void *key,
691 struct hlist_node *hnode);
692 void *cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
693 struct hlist_node *hnode);
695 /* Hash deletion functions */
696 void *cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode);
697 void *cfs_hash_del_key(cfs_hash_t *hs, const void *key);
699 /* Hash lookup/for_each functions */
700 #define CFS_HASH_LOOP_HOG 1024
702 typedef int (*cfs_hash_for_each_cb_t)(cfs_hash_t *hs, cfs_hash_bd_t *bd,
703 struct hlist_node *node, void *data);
704 void *cfs_hash_lookup(cfs_hash_t *hs, const void *key);
705 void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
706 void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
707 int cfs_hash_for_each_nolock(cfs_hash_t *hs,
708 cfs_hash_for_each_cb_t, void *data);
709 int cfs_hash_for_each_empty(cfs_hash_t *hs,
710 cfs_hash_for_each_cb_t, void *data);
711 void cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
712 cfs_hash_for_each_cb_t, void *data);
713 typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
714 void cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t, void *data);
716 void cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
717 cfs_hash_for_each_cb_t, void *data);
718 int cfs_hash_is_empty(cfs_hash_t *hs);
719 __u64 cfs_hash_size_get(cfs_hash_t *hs);
722 * Rehash - Theta is calculated to be the average chained
723 * hash depth assuming a perfectly uniform hash function.
725 void cfs_hash_rehash_cancel_locked(cfs_hash_t *hs);
726 void cfs_hash_rehash_cancel(cfs_hash_t *hs);
727 int cfs_hash_rehash(cfs_hash_t *hs, int do_rehash);
728 void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
729 void *new_key, struct hlist_node *hnode);
731 #if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1
732 /* Validate hnode references the correct key */
734 cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
735 struct hlist_node *hnode)
737 LASSERT(cfs_hash_keycmp(hs, key, hnode));
740 /* Validate hnode is in the correct bucket */
742 cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd,
743 struct hlist_node *hnode)
745 cfs_hash_bd_t bds[2];
747 cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds);
748 LASSERT(bds[0].bd_bucket == bd->bd_bucket ||
749 bds[1].bd_bucket == bd->bd_bucket);
752 #else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */
755 cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
756 struct hlist_node *hnode) {}
759 cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd,
760 struct hlist_node *hnode) {}
762 #endif /* CFS_HASH_DEBUG_LEVEL */
764 #define CFS_HASH_THETA_BITS 10
765 #define CFS_HASH_MIN_THETA (1U << (CFS_HASH_THETA_BITS - 1))
766 #define CFS_HASH_MAX_THETA (1U << (CFS_HASH_THETA_BITS + 1))
768 /* Return integer component of theta */
769 static inline int __cfs_hash_theta_int(int theta)
771 return (theta >> CFS_HASH_THETA_BITS);
774 /* Return a fractional value between 0 and 999 */
775 static inline int __cfs_hash_theta_frac(int theta)
777 return ((theta * 1000) >> CFS_HASH_THETA_BITS) -
778 (__cfs_hash_theta_int(theta) * 1000);
781 static inline int __cfs_hash_theta(cfs_hash_t *hs)
783 return (atomic_read(&hs->hs_count) <<
784 CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
787 static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
790 hs->hs_min_theta = (__u16)min;
791 hs->hs_max_theta = (__u16)max;
794 /* Generic debug formatting routines mainly for proc handler */
796 int cfs_hash_debug_header(struct seq_file *m);
797 int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m);
800 * Generic djb2 hash algorithm for character arrays.
802 static inline unsigned
803 cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
805 unsigned i, hash = 5381;
807 LASSERT(key != NULL);
809 for (i = 0; i < size; i++)
810 hash = hash * 33 + ((char *)key)[i];
812 return (hash & mask);
816 * Generic u32 hash algorithm.
818 static inline unsigned
819 cfs_hash_u32_hash(const __u32 key, unsigned mask)
821 return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
825 * Generic u64 hash algorithm.
827 static inline unsigned
828 cfs_hash_u64_hash(const __u64 key, unsigned mask)
830 return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
833 /** iterate over all buckets in @bds (array of cfs_hash_bd_t) */
834 #define cfs_hash_for_each_bd(bds, n, i) \
835 for (i = 0; i < n && (bds)[i].bd_bucket != NULL; i++)
837 /** iterate over all buckets of @hs */
838 #define cfs_hash_for_each_bucket(hs, bd, pos) \
840 pos < CFS_HASH_NBKT(hs) && \
841 ((bd)->bd_bucket = (hs)->hs_buckets[pos]) != NULL; pos++)
843 /** iterate over all hlist of bucket @bd */
844 #define cfs_hash_bd_for_each_hlist(hs, bd, hlist) \
845 for ((bd)->bd_offset = 0; \
846 (bd)->bd_offset < CFS_HASH_BKT_NHLIST(hs) && \
847 (hlist = cfs_hash_bd_hhead(hs, bd)) != NULL; \
850 /* !__LIBCFS__HASH_H__ */