6 #include "crush/hash.h"
7 #include "crush/mapper.h"
9 #include "ceph_debug.h"
11 char *ceph_osdmap_state_str(char *str, int len, int state)
20 if (state & CEPH_OSD_EXISTS) {
21 snprintf(str, len, "exists");
24 if (state & CEPH_OSD_UP) {
25 snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""),
30 snprintf(str, len, "doesn't exist");
38 static int calc_bits_of(unsigned t)
49 * the foo_mask is the smallest value 2^n-1 that is >= foo.
51 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
53 pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
55 (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
57 (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
59 (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
65 static int crush_decode_uniform_bucket(void **p, void *end,
66 struct crush_bucket_uniform *b)
68 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
69 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
70 b->item_weight = ceph_decode_32(p);
76 static int crush_decode_list_bucket(void **p, void *end,
77 struct crush_bucket_list *b)
80 dout("crush_decode_list_bucket %p to %p\n", *p, end);
81 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
82 if (b->item_weights == NULL)
84 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
85 if (b->sum_weights == NULL)
87 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
88 for (j = 0; j < b->h.size; j++) {
89 b->item_weights[j] = ceph_decode_32(p);
90 b->sum_weights[j] = ceph_decode_32(p);
97 static int crush_decode_tree_bucket(void **p, void *end,
98 struct crush_bucket_tree *b)
101 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
102 ceph_decode_32_safe(p, end, b->num_nodes, bad);
103 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
104 if (b->node_weights == NULL)
106 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
107 for (j = 0; j < b->num_nodes; j++)
108 b->node_weights[j] = ceph_decode_32(p);
114 static int crush_decode_straw_bucket(void **p, void *end,
115 struct crush_bucket_straw *b)
118 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
119 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
120 if (b->item_weights == NULL)
122 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
123 if (b->straws == NULL)
125 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
126 for (j = 0; j < b->h.size; j++) {
127 b->item_weights[j] = ceph_decode_32(p);
128 b->straws[j] = ceph_decode_32(p);
135 static struct crush_map *crush_decode(void *pbyval, void *end)
141 void *start = pbyval;
144 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
146 c = kzalloc(sizeof(*c), GFP_NOFS);
148 return ERR_PTR(-ENOMEM);
150 ceph_decode_need(p, end, 4*sizeof(u32), bad);
151 magic = ceph_decode_32(p);
152 if (magic != CRUSH_MAGIC) {
153 pr_err("crush_decode magic %x != current %x\n",
154 (unsigned)magic, (unsigned)CRUSH_MAGIC);
157 c->max_buckets = ceph_decode_32(p);
158 c->max_rules = ceph_decode_32(p);
159 c->max_devices = ceph_decode_32(p);
161 c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
162 if (c->device_parents == NULL)
164 c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
165 if (c->bucket_parents == NULL)
168 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
169 if (c->buckets == NULL)
171 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
172 if (c->rules == NULL)
176 for (i = 0; i < c->max_buckets; i++) {
179 struct crush_bucket *b;
181 ceph_decode_32_safe(p, end, alg, bad);
183 c->buckets[i] = NULL;
186 dout("crush_decode bucket %d off %x %p to %p\n",
187 i, (int)(*p-start), *p, end);
190 case CRUSH_BUCKET_UNIFORM:
191 size = sizeof(struct crush_bucket_uniform);
193 case CRUSH_BUCKET_LIST:
194 size = sizeof(struct crush_bucket_list);
196 case CRUSH_BUCKET_TREE:
197 size = sizeof(struct crush_bucket_tree);
199 case CRUSH_BUCKET_STRAW:
200 size = sizeof(struct crush_bucket_straw);
207 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
211 ceph_decode_need(p, end, 4*sizeof(u32), bad);
212 b->id = ceph_decode_32(p);
213 b->type = ceph_decode_16(p);
214 b->alg = ceph_decode_8(p);
215 b->hash = ceph_decode_8(p);
216 b->weight = ceph_decode_32(p);
217 b->size = ceph_decode_32(p);
219 dout("crush_decode bucket size %d off %x %p to %p\n",
220 b->size, (int)(*p-start), *p, end);
222 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
223 if (b->items == NULL)
225 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
230 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
231 for (j = 0; j < b->size; j++)
232 b->items[j] = ceph_decode_32(p);
235 case CRUSH_BUCKET_UNIFORM:
236 err = crush_decode_uniform_bucket(p, end,
237 (struct crush_bucket_uniform *)b);
241 case CRUSH_BUCKET_LIST:
242 err = crush_decode_list_bucket(p, end,
243 (struct crush_bucket_list *)b);
247 case CRUSH_BUCKET_TREE:
248 err = crush_decode_tree_bucket(p, end,
249 (struct crush_bucket_tree *)b);
253 case CRUSH_BUCKET_STRAW:
254 err = crush_decode_straw_bucket(p, end,
255 (struct crush_bucket_straw *)b);
263 dout("rule vec is %p\n", c->rules);
264 for (i = 0; i < c->max_rules; i++) {
266 struct crush_rule *r;
268 ceph_decode_32_safe(p, end, yes, bad);
270 dout("crush_decode NO rule %d off %x %p to %p\n",
271 i, (int)(*p-start), *p, end);
276 dout("crush_decode rule %d off %x %p to %p\n",
277 i, (int)(*p-start), *p, end);
280 ceph_decode_32_safe(p, end, yes, bad);
281 #if BITS_PER_LONG == 32
283 if (yes > ULONG_MAX / sizeof(struct crush_rule_step))
286 r = c->rules[i] = kmalloc(sizeof(*r) +
287 yes*sizeof(struct crush_rule_step),
291 dout(" rule %d is at %p\n", i, r);
293 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
294 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
295 for (j = 0; j < r->len; j++) {
296 r->steps[j].op = ceph_decode_32(p);
297 r->steps[j].arg1 = ceph_decode_32(p);
298 r->steps[j].arg2 = ceph_decode_32(p);
302 /* ignore trailing name maps. */
304 dout("crush_decode success\n");
310 dout("crush_decode fail %d\n", err);
319 void ceph_osdmap_destroy(struct ceph_osdmap *map)
321 dout("osdmap_destroy %p\n", map);
323 crush_destroy(map->crush);
324 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
325 struct ceph_pg_mapping *pg =
326 rb_entry(rb_first(&map->pg_temp),
327 struct ceph_pg_mapping, node);
328 rb_erase(&pg->node, &map->pg_temp);
331 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
332 struct ceph_pg_pool_info *pi =
333 rb_entry(rb_first(&map->pg_pools),
334 struct ceph_pg_pool_info, node);
335 rb_erase(&pi->node, &map->pg_pools);
338 kfree(map->osd_state);
339 kfree(map->osd_weight);
340 kfree(map->osd_addr);
345 * adjust max osd value. reallocate arrays.
347 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
350 struct ceph_entity_addr *addr;
353 state = kcalloc(max, sizeof(*state), GFP_NOFS);
354 addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
355 weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
356 if (state == NULL || addr == NULL || weight == NULL) {
364 if (map->osd_state) {
365 memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
366 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
367 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
368 kfree(map->osd_state);
369 kfree(map->osd_addr);
370 kfree(map->osd_weight);
373 map->osd_state = state;
374 map->osd_weight = weight;
375 map->osd_addr = addr;
381 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
384 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
396 static int __insert_pg_mapping(struct ceph_pg_mapping *new,
397 struct rb_root *root)
399 struct rb_node **p = &root->rb_node;
400 struct rb_node *parent = NULL;
401 struct ceph_pg_mapping *pg = NULL;
406 pg = rb_entry(parent, struct ceph_pg_mapping, node);
407 c = pgid_cmp(new->pgid, pg->pgid);
416 rb_link_node(&new->node, parent, p);
417 rb_insert_color(&new->node, root);
421 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
424 struct rb_node *n = root->rb_node;
425 struct ceph_pg_mapping *pg;
429 pg = rb_entry(n, struct ceph_pg_mapping, node);
430 c = pgid_cmp(pgid, pg->pgid);
442 * rbtree of pg pool info
444 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
446 struct rb_node **p = &root->rb_node;
447 struct rb_node *parent = NULL;
448 struct ceph_pg_pool_info *pi = NULL;
452 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
453 if (new->id < pi->id)
455 else if (new->id > pi->id)
461 rb_link_node(&new->node, parent, p);
462 rb_insert_color(&new->node, root);
466 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
468 struct ceph_pg_pool_info *pi;
469 struct rb_node *n = root->rb_node;
472 pi = rb_entry(n, struct ceph_pg_pool_info, node);
475 else if (id > pi->id)
486 struct ceph_osdmap *osdmap_decode(void **p, void *end)
488 struct ceph_osdmap *map;
494 struct ceph_pg_pool_info *pi;
496 dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p));
498 map = kzalloc(sizeof(*map), GFP_NOFS);
500 return ERR_PTR(-ENOMEM);
501 map->pg_temp = RB_ROOT;
503 ceph_decode_16_safe(p, end, version, bad);
504 if (version > CEPH_OSDMAP_VERSION) {
505 pr_warning("got unknown v %d > %d of osdmap\n", version,
506 CEPH_OSDMAP_VERSION);
510 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad);
511 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
512 map->epoch = ceph_decode_32(p);
513 ceph_decode_copy(p, &map->created, sizeof(map->created));
514 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
516 ceph_decode_32_safe(p, end, max, bad);
518 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
519 pi = kmalloc(sizeof(*pi), GFP_NOFS);
522 pi->id = ceph_decode_32(p);
523 ev = ceph_decode_8(p); /* encoding version */
524 if (ev > CEPH_PG_POOL_VERSION) {
525 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
526 ev, CEPH_PG_POOL_VERSION);
529 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
530 __insert_pg_pool(&map->pg_pools, pi);
532 *p += le32_to_cpu(pi->v.num_snaps) * sizeof(u64);
533 *p += le32_to_cpu(pi->v.num_removed_snap_intervals)
536 ceph_decode_32_safe(p, end, map->pool_max, bad);
538 ceph_decode_32_safe(p, end, map->flags, bad);
540 max = ceph_decode_32(p);
542 /* (re)alloc osd arrays */
543 err = osdmap_set_max_osd(map, max);
546 dout("osdmap_decode max_osd = %d\n", map->max_osd);
550 ceph_decode_need(p, end, 3*sizeof(u32) +
551 map->max_osd*(1 + sizeof(*map->osd_weight) +
552 sizeof(*map->osd_addr)), bad);
553 *p += 4; /* skip length field (should match max) */
554 ceph_decode_copy(p, map->osd_state, map->max_osd);
556 *p += 4; /* skip length field (should match max) */
557 for (i = 0; i < map->max_osd; i++)
558 map->osd_weight[i] = ceph_decode_32(p);
560 *p += 4; /* skip length field (should match max) */
561 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
562 for (i = 0; i < map->max_osd; i++)
563 ceph_decode_addr(&map->osd_addr[i]);
566 ceph_decode_32_safe(p, end, len, bad);
567 for (i = 0; i < len; i++) {
570 struct ceph_pg_mapping *pg;
572 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
573 ceph_decode_copy(p, &pgid, sizeof(pgid));
574 n = ceph_decode_32(p);
575 ceph_decode_need(p, end, n * sizeof(u32), bad);
577 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
582 for (j = 0; j < n; j++)
583 pg->osds[j] = ceph_decode_32(p);
585 err = __insert_pg_mapping(pg, &map->pg_temp);
588 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len);
592 ceph_decode_32_safe(p, end, len, bad);
593 dout("osdmap_decode crush len %d from off 0x%x\n", len,
595 ceph_decode_need(p, end, len, bad);
596 map->crush = crush_decode(*p, end);
598 if (IS_ERR(map->crush)) {
599 err = PTR_ERR(map->crush);
604 /* ignore the rest of the map */
607 dout("osdmap_decode done %p %p\n", *p, end);
611 dout("osdmap_decode fail\n");
612 ceph_osdmap_destroy(map);
617 * decode and apply an incremental map update.
619 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
620 struct ceph_osdmap *map,
621 struct ceph_messenger *msgr)
623 struct crush_map *newcrush = NULL;
624 struct ceph_fsid fsid;
626 struct ceph_timespec modified;
628 __s32 new_pool_max, new_flags, max;
634 ceph_decode_16_safe(p, end, version, bad);
635 if (version > CEPH_OSDMAP_INC_VERSION) {
636 pr_warning("got unknown v %d > %d of inc osdmap\n", version,
637 CEPH_OSDMAP_INC_VERSION);
641 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
643 ceph_decode_copy(p, &fsid, sizeof(fsid));
644 epoch = ceph_decode_32(p);
645 BUG_ON(epoch != map->epoch+1);
646 ceph_decode_copy(p, &modified, sizeof(modified));
647 new_pool_max = ceph_decode_32(p);
648 new_flags = ceph_decode_32(p);
651 ceph_decode_32_safe(p, end, len, bad);
653 dout("apply_incremental full map len %d, %p to %p\n",
655 return osdmap_decode(p, min(*p+len, end));
659 ceph_decode_32_safe(p, end, len, bad);
661 dout("apply_incremental new crush map len %d, %p to %p\n",
663 newcrush = crush_decode(*p, min(*p+len, end));
664 if (IS_ERR(newcrush))
665 return ERR_PTR(PTR_ERR(newcrush));
670 map->flags = new_flags;
671 if (new_pool_max >= 0)
672 map->pool_max = new_pool_max;
674 ceph_decode_need(p, end, 5*sizeof(u32), bad);
677 max = ceph_decode_32(p);
679 err = osdmap_set_max_osd(map, max);
685 map->modified = map->modified;
688 crush_destroy(map->crush);
689 map->crush = newcrush;
694 ceph_decode_32_safe(p, end, len, bad);
697 struct ceph_pg_pool_info *pi;
699 ceph_decode_32_safe(p, end, pool, bad);
700 ceph_decode_need(p, end, 1 + sizeof(pi->v), bad);
701 ev = ceph_decode_8(p); /* encoding version */
702 if (ev > CEPH_PG_POOL_VERSION) {
703 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
704 ev, CEPH_PG_POOL_VERSION);
707 pi = __lookup_pg_pool(&map->pg_pools, pool);
709 pi = kmalloc(sizeof(*pi), GFP_NOFS);
715 __insert_pg_pool(&map->pg_pools, pi);
717 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
722 ceph_decode_32_safe(p, end, len, bad);
724 struct ceph_pg_pool_info *pi;
726 ceph_decode_32_safe(p, end, pool, bad);
727 pi = __lookup_pg_pool(&map->pg_pools, pool);
729 rb_erase(&pi->node, &map->pg_pools);
736 ceph_decode_32_safe(p, end, len, bad);
739 struct ceph_entity_addr addr;
740 ceph_decode_32_safe(p, end, osd, bad);
741 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
742 ceph_decode_addr(&addr);
743 pr_info("osd%d up\n", osd);
744 BUG_ON(osd >= map->max_osd);
745 map->osd_state[osd] |= CEPH_OSD_UP;
746 map->osd_addr[osd] = addr;
750 ceph_decode_32_safe(p, end, len, bad);
753 ceph_decode_32_safe(p, end, osd, bad);
754 (*p)++; /* clean flag */
755 pr_info("osd%d down\n", osd);
756 if (osd < map->max_osd)
757 map->osd_state[osd] &= ~CEPH_OSD_UP;
761 ceph_decode_32_safe(p, end, len, bad);
764 ceph_decode_need(p, end, sizeof(u32)*2, bad);
765 osd = ceph_decode_32(p);
766 off = ceph_decode_32(p);
767 pr_info("osd%d weight 0x%x %s\n", osd, off,
768 off == CEPH_OSD_IN ? "(in)" :
769 (off == CEPH_OSD_OUT ? "(out)" : ""));
770 if (osd < map->max_osd)
771 map->osd_weight[osd] = off;
775 rbp = rb_first(&map->pg_temp);
776 ceph_decode_32_safe(p, end, len, bad);
778 struct ceph_pg_mapping *pg;
782 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
783 ceph_decode_copy(p, &pgid, sizeof(pgid));
784 pglen = ceph_decode_32(p);
787 while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
788 node)->pgid, pgid) <= 0) {
789 struct rb_node *cur = rbp;
791 dout(" removed pg_temp %llx\n",
792 *(u64 *)&rb_entry(cur, struct ceph_pg_mapping,
794 rb_erase(cur, &map->pg_temp);
799 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
800 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
807 for (j = 0; j < pglen; j++)
808 pg->osds[j] = ceph_decode_32(p);
809 err = __insert_pg_mapping(pg, &map->pg_temp);
812 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
817 struct rb_node *cur = rbp;
819 dout(" removed pg_temp %llx\n",
820 *(u64 *)&rb_entry(cur, struct ceph_pg_mapping,
822 rb_erase(cur, &map->pg_temp);
825 /* ignore the rest */
830 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
831 epoch, (int)(*p - start), *p, start, end);
832 print_hex_dump(KERN_DEBUG, "osdmap: ",
833 DUMP_PREFIX_OFFSET, 16, 1,
834 start, end - start, true);
836 crush_destroy(newcrush);
844 * calculate file layout from given offset, length.
845 * fill in correct oid, logical length, and object extent
848 * for now, we write only a single su, until we can
849 * pass a stride back to the caller.
851 void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
854 u64 *oxoff, u64 *oxlen)
856 u32 osize = le32_to_cpu(layout->fl_object_size);
857 u32 su = le32_to_cpu(layout->fl_stripe_unit);
858 u32 sc = le32_to_cpu(layout->fl_stripe_count);
859 u32 bl, stripeno, stripepos, objsetno;
863 dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen,
865 su_per_object = osize / su;
866 dout("osize %u / su %u = su_per_object %u\n", osize, su,
869 BUG_ON((su & ~PAGE_MASK) != 0);
870 /* bl = *off / su; */
874 dout("off %llu / su %u = bl %u\n", off, su, bl);
878 objsetno = stripeno / su_per_object;
880 *ono = objsetno * sc + stripepos;
881 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono);
883 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
885 su_offset = do_div(t, su);
886 *oxoff = su_offset + (stripeno % su_per_object) * su;
889 * Calculate the length of the extent being written to the selected
890 * object. This is the minimum of the full length requested (plen) or
891 * the remainder of the current stripe being written to.
893 *oxlen = min_t(u64, *plen, su - su_offset);
896 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
900 * calculate an object layout (i.e. pgid) from an oid,
901 * file_layout, and osdmap
903 int ceph_calc_object_layout(struct ceph_object_layout *ol,
905 struct ceph_file_layout *fl,
906 struct ceph_osdmap *osdmap)
908 unsigned num, num_mask;
910 s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
911 int poolid = le32_to_cpu(fl->fl_pg_pool);
912 struct ceph_pg_pool_info *pool;
917 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
920 ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
921 if (preferred >= 0) {
923 num = le32_to_cpu(pool->v.lpg_num);
924 num_mask = pool->lpg_num_mask;
926 num = le32_to_cpu(pool->v.pg_num);
927 num_mask = pool->pg_num_mask;
930 pgid.ps = cpu_to_le16(ps);
931 pgid.preferred = cpu_to_le16(preferred);
932 pgid.pool = fl->fl_pg_pool;
934 dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps,
937 dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
940 ol->ol_stripe_unit = fl->fl_object_stripe_unit;
945 * Calculate raw osd vector for the given pgid. Return pointer to osd
946 * array, or NULL on failure.
948 static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
951 struct ceph_pg_mapping *pg;
952 struct ceph_pg_pool_info *pool;
954 unsigned poolid, ps, pps;
958 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
965 poolid = le32_to_cpu(pgid.pool);
966 ps = le16_to_cpu(pgid.ps);
967 preferred = (s16)le16_to_cpu(pgid.preferred);
969 /* don't forcefeed bad device ids to crush */
970 if (preferred >= osdmap->max_osd ||
971 preferred >= osdmap->crush->max_devices)
974 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
977 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
978 pool->v.type, pool->v.size);
980 pr_err("no crush rule pool %d type %d size %d\n",
981 poolid, pool->v.type, pool->v.size);
986 pps = ceph_stable_mod(ps,
987 le32_to_cpu(pool->v.lpgp_num),
988 pool->lpgp_num_mask);
990 pps = ceph_stable_mod(ps,
991 le32_to_cpu(pool->v.pgp_num),
994 *num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
995 min_t(int, pool->v.size, *num),
996 preferred, osdmap->osd_weight);
1001 * Return primary osd for given pgid, or -1 if none.
1003 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
1005 int rawosds[10], *osds;
1006 int i, num = ARRAY_SIZE(rawosds);
1008 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1012 /* primary is first up osd */
1013 for (i = 0; i < num; i++)
1014 if (ceph_osd_is_up(osdmap, osds[i])) {