1 #ifndef CEPH_CRUSH_CRUSH_H
2 #define CEPH_CRUSH_CRUSH_H
5 # include <linux/types.h>
7 # include "crush_compat.h"
11 * CRUSH is a pseudo-random data distribution algorithm that
12 * efficiently distributes input values (typically, data objects)
13 * across a heterogeneous, structured storage cluster.
15 * The algorithm was originally described in detail in this paper
16 * (although the algorithm has evolved somewhat since then):
18 * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
24 #define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */
26 #define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */
27 #define CRUSH_MAX_RULESET (1<<8) /* max crush ruleset number */
28 #define CRUSH_MAX_RULES CRUSH_MAX_RULESET /* should be the same as max rulesets */
30 #define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u)
31 #define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u)
33 #define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */
34 #define CRUSH_ITEM_NONE 0x7fffffff /* no result */
37 * CRUSH uses user-defined "rules" to describe how inputs should be
38 * mapped to devices. A rule consists of sequence of steps to perform
39 * to generate the set of output devices.
41 struct crush_rule_step {
50 CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */
51 CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */
53 CRUSH_RULE_CHOOSE_INDEP = 3, /* same */
54 CRUSH_RULE_EMIT = 4, /* no args */
55 CRUSH_RULE_CHOOSELEAF_FIRSTN = 6,
56 CRUSH_RULE_CHOOSELEAF_INDEP = 7,
58 CRUSH_RULE_SET_CHOOSE_TRIES = 8, /* override choose_total_tries */
59 CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
60 CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
61 CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
62 CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12,
63 CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13
67 * for specifying choose num (arg1) relative to the max parameter
70 #define CRUSH_CHOOSE_N 0
71 #define CRUSH_CHOOSE_N_MINUS(x) (-(x))
74 * The rule mask is used to describe what the rule is intended for.
75 * Given a ruleset and size of output set, we search through the
76 * rule list for a matching rule_mask.
78 struct crush_rule_mask {
87 struct crush_rule_mask mask;
88 struct crush_rule_step steps[0];
91 #define crush_rule_size(len) (sizeof(struct crush_rule) + \
92 (len)*sizeof(struct crush_rule_step))
97 * A bucket is a named container of other items (either devices or
98 * other buckets). Items within a bucket are chosen using one of a
99 * few different algorithms. The table summarizes how the speed of
100 * each option measures up against mapping stability when items are
103 * Bucket Alg Speed Additions Removals
104 * ------------------------------------------------
105 * uniform O(1) poor poor
106 * list O(n) optimal poor
107 * tree O(log n) good good
108 * straw O(n) better better
109 * straw2 O(n) optimal optimal
112 CRUSH_BUCKET_UNIFORM = 1,
113 CRUSH_BUCKET_LIST = 2,
114 CRUSH_BUCKET_TREE = 3,
115 CRUSH_BUCKET_STRAW = 4,
116 CRUSH_BUCKET_STRAW2 = 5,
118 extern const char *crush_bucket_alg_name(int alg);
121 * although tree was a legacy algorithm, it has been buggy, so
124 #define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS ( \
125 (1 << CRUSH_BUCKET_UNIFORM) | \
126 (1 << CRUSH_BUCKET_LIST) | \
127 (1 << CRUSH_BUCKET_STRAW))
129 struct crush_bucket {
130 __s32 id; /* this'll be negative */
131 __u16 type; /* non-zero; type=0 is reserved for devices */
132 __u8 alg; /* one of CRUSH_BUCKET_* */
133 __u8 hash; /* which hash function to use, CRUSH_HASH_* */
134 __u32 weight; /* 16-bit fixed point */
135 __u32 size; /* num items */
140 struct crush_bucket_uniform {
141 struct crush_bucket h;
142 __u32 item_weight; /* 16-bit fixed point; all items equally weighted */
145 struct crush_bucket_list {
146 struct crush_bucket h;
147 __u32 *item_weights; /* 16-bit fixed point */
148 __u32 *sum_weights; /* 16-bit fixed point. element i is sum
149 of weights 0..i, inclusive */
152 struct crush_bucket_tree {
153 struct crush_bucket h; /* note: h.size is _tree_ size, not number of
159 struct crush_bucket_straw {
160 struct crush_bucket h;
161 __u32 *item_weights; /* 16-bit fixed point */
162 __u32 *straws; /* 16-bit fixed point */
165 struct crush_bucket_straw2 {
166 struct crush_bucket h;
167 __u32 *item_weights; /* 16-bit fixed point */
173 * CRUSH map includes all buckets, rules, etc.
176 struct crush_bucket **buckets;
177 struct crush_rule **rules;
183 /* choose local retries before re-descent */
184 __u32 choose_local_tries;
185 /* choose local attempts using a fallback permutation before
187 __u32 choose_local_fallback_tries;
188 /* choose attempts before giving up */
189 __u32 choose_total_tries;
190 /* attempt chooseleaf inner descent once for firstn mode; on
191 * reject retry outer descent. Note that this does *not*
192 * apply to a collision: in that case we will retry as we used
194 __u32 chooseleaf_descend_once;
196 /* if non-zero, feed r into chooseleaf, bit-shifted right by (r-1)
197 * bits. a value of 1 is best for new clusters. for legacy clusters
198 * that want to limit reshuffling, a value of 3 or 4 will make the
199 * mappings line up a bit better with previous mappings. */
200 __u8 chooseleaf_vary_r;
202 /* if true, it makes chooseleaf firstn to return stable results (if
203 * no local retry) so that data migrations would be optimal when some
205 __u8 chooseleaf_stable;
208 * This value is calculated after decode or construction by
209 * the builder. It is exposed here (rather than having a
210 * 'build CRUSH working space' function) so that callers can
211 * reserve a static buffer, allocate space on the stack, or
212 * otherwise avoid calling into the heap allocator if they
213 * want to. The size of the working space depends on the map,
214 * while the size of the scratch vector passed to the mapper
215 * depends on the size of the desired result set.
217 * Nothing stops the caller from allocating both in one swell
218 * foop and passing in two points, though.
224 * version 0 (original) of straw_calc has various flaws. version 1
225 * fixes a few of them.
227 __u8 straw_calc_version;
230 * allowed bucket algs is a bitmask, here the bit positions
231 * are CRUSH_BUCKET_*. note that these are *bits* and
232 * CRUSH_BUCKET_* values are not, so we need to or together (1
233 * << CRUSH_BUCKET_WHATEVER). The 0th bit is not used to
234 * minimize confusion (bucket type values start at 1).
236 __u32 allowed_bucket_algs;
244 extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos);
245 extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
246 extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
247 extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
248 extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
249 extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b);
250 extern void crush_destroy_bucket(struct crush_bucket *b);
251 extern void crush_destroy_rule(struct crush_rule *r);
252 extern void crush_destroy(struct crush_map *map);
254 static inline int crush_calc_tree_node(int i)
256 return ((i+1) << 1)-1;
260 * These data structures are private to the CRUSH implementation. They
261 * are exposed in this header file because builder needs their
262 * definitions to calculate the total working size.
264 * Moving this out of the crush map allow us to treat the CRUSH map as
265 * immutable within the mapper and removes the requirement for a CRUSH
268 struct crush_work_bucket {
269 __u32 perm_x; /* @x for which *perm is defined */
270 __u32 perm_n; /* num elements of *perm that are permuted/defined */
271 __u32 *perm; /* Permutation of the bucket's items */
275 struct crush_work_bucket **work; /* Per-bucket working store */