3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44 #include <linux/idr.h>
45 #include <linux/workqueue.h>
47 #include "rbd_types.h"
49 #define RBD_DEBUG /* Activate rbd_assert() calls */
52 * The basic unit of block I/O is a sector. It is interpreted in a
53 * number of contexts in Linux (blk, bio, genhd), but the default is
54 * universally 512 bytes. These symbols are just slightly more
55 * meaningful than the bare numbers they represent.
57 #define SECTOR_SHIFT 9
58 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
61 * Increment the given counter and return its updated value.
62 * If the counter is already 0 it will not be incremented.
63 * If the counter is already at its maximum value returns
64 * -EINVAL without updating it.
66 static int atomic_inc_return_safe(atomic_t *v)
70 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
71 if (counter <= (unsigned int)INT_MAX)
79 /* Decrement the counter. Return the resulting value, or -EINVAL */
80 static int atomic_dec_return_safe(atomic_t *v)
84 counter = atomic_dec_return(v);
93 #define RBD_DRV_NAME "rbd"
95 #define RBD_MINORS_PER_MAJOR 256
96 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
98 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
99 #define RBD_MAX_SNAP_NAME_LEN \
100 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
102 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
104 #define RBD_SNAP_HEAD_NAME "-"
106 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
108 /* This allows a single page to hold an image name sent by OSD */
109 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
110 #define RBD_IMAGE_ID_LEN_MAX 64
112 #define RBD_OBJ_PREFIX_LEN_MAX 64
116 #define RBD_FEATURE_LAYERING (1<<0)
117 #define RBD_FEATURE_STRIPINGV2 (1<<1)
118 #define RBD_FEATURES_ALL \
119 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
121 /* Features supported by this (client software) implementation. */
123 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
126 * An RBD device name will be "rbd#", where the "rbd" comes from
127 * RBD_DRV_NAME above, and # is a unique integer identifier.
128 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
129 * enough to hold all possible device names.
131 #define DEV_NAME_LEN 32
132 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
135 * block device image metadata (in-memory version)
137 struct rbd_image_header {
138 /* These six fields never change for a given rbd image */
145 u64 features; /* Might be changeable someday? */
147 /* The remaining fields need to be updated occasionally */
149 struct ceph_snap_context *snapc;
150 char *snap_names; /* format 1 only */
151 u64 *snap_sizes; /* format 1 only */
155 * An rbd image specification.
157 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
158 * identify an image. Each rbd_dev structure includes a pointer to
159 * an rbd_spec structure that encapsulates this identity.
161 * Each of the id's in an rbd_spec has an associated name. For a
162 * user-mapped image, the names are supplied and the id's associated
163 * with them are looked up. For a layered image, a parent image is
164 * defined by the tuple, and the names are looked up.
166 * An rbd_dev structure contains a parent_spec pointer which is
167 * non-null if the image it represents is a child in a layered
168 * image. This pointer will refer to the rbd_spec structure used
169 * by the parent rbd_dev for its own identity (i.e., the structure
170 * is shared between the parent and child).
172 * Since these structures are populated once, during the discovery
173 * phase of image construction, they are effectively immutable so
174 * we make no effort to synchronize access to them.
176 * Note that code herein does not assume the image name is known (it
177 * could be a null pointer).
181 const char *pool_name;
183 const char *image_id;
184 const char *image_name;
187 const char *snap_name;
193 * an instance of the client. multiple devices may share an rbd client.
196 struct ceph_client *client;
198 struct list_head node;
201 struct rbd_img_request;
202 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
204 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
206 struct rbd_obj_request;
207 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
209 enum obj_request_type {
210 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
213 enum obj_operation_type {
220 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
221 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
222 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
223 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
226 struct rbd_obj_request {
227 const char *object_name;
228 u64 offset; /* object start byte */
229 u64 length; /* bytes from offset */
233 * An object request associated with an image will have its
234 * img_data flag set; a standalone object request will not.
236 * A standalone object request will have which == BAD_WHICH
237 * and a null obj_request pointer.
239 * An object request initiated in support of a layered image
240 * object (to check for its existence before a write) will
241 * have which == BAD_WHICH and a non-null obj_request pointer.
243 * Finally, an object request for rbd image data will have
244 * which != BAD_WHICH, and will have a non-null img_request
245 * pointer. The value of which will be in the range
246 * 0..(img_request->obj_request_count-1).
249 struct rbd_obj_request *obj_request; /* STAT op */
251 struct rbd_img_request *img_request;
253 /* links for img_request->obj_requests list */
254 struct list_head links;
257 u32 which; /* posn image request list */
259 enum obj_request_type type;
261 struct bio *bio_list;
267 struct page **copyup_pages;
268 u32 copyup_page_count;
270 struct ceph_osd_request *osd_req;
272 u64 xferred; /* bytes transferred */
275 rbd_obj_callback_t callback;
276 struct completion completion;
282 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
283 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
284 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
285 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
288 struct rbd_img_request {
289 struct rbd_device *rbd_dev;
290 u64 offset; /* starting image byte offset */
291 u64 length; /* byte count from offset */
294 u64 snap_id; /* for reads */
295 struct ceph_snap_context *snapc; /* for writes */
298 struct request *rq; /* block request */
299 struct rbd_obj_request *obj_request; /* obj req initiator */
301 struct page **copyup_pages;
302 u32 copyup_page_count;
303 spinlock_t completion_lock;/* protects next_completion */
305 rbd_img_callback_t callback;
306 u64 xferred;/* aggregate bytes transferred */
307 int result; /* first nonzero obj_request result */
309 u32 obj_request_count;
310 struct list_head obj_requests; /* rbd_obj_request structs */
315 #define for_each_obj_request(ireq, oreq) \
316 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
317 #define for_each_obj_request_from(ireq, oreq) \
318 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
319 #define for_each_obj_request_safe(ireq, oreq, n) \
320 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
332 int dev_id; /* blkdev unique id */
334 int major; /* blkdev assigned major */
336 struct gendisk *disk; /* blkdev's gendisk and rq */
338 u32 image_format; /* Either 1 or 2 */
339 struct rbd_client *rbd_client;
341 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
343 struct list_head rq_queue; /* incoming rq queue */
344 spinlock_t lock; /* queue, flags, open_count */
345 struct workqueue_struct *rq_wq;
346 struct work_struct rq_work;
348 struct rbd_image_header header;
349 unsigned long flags; /* possibly lock protected */
350 struct rbd_spec *spec;
354 struct ceph_file_layout layout;
356 struct ceph_osd_event *watch_event;
357 struct rbd_obj_request *watch_request;
359 struct rbd_spec *parent_spec;
362 struct rbd_device *parent;
364 /* protects updating the header */
365 struct rw_semaphore header_rwsem;
367 struct rbd_mapping mapping;
369 struct list_head node;
373 unsigned long open_count; /* protected by lock */
377 * Flag bits for rbd_dev->flags. If atomicity is required,
378 * rbd_dev->lock is used to protect access.
380 * Currently, only the "removing" flag (which is coupled with the
381 * "open_count" field) requires atomic access.
384 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
385 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
388 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
390 static LIST_HEAD(rbd_dev_list); /* devices */
391 static DEFINE_SPINLOCK(rbd_dev_list_lock);
393 static LIST_HEAD(rbd_client_list); /* clients */
394 static DEFINE_SPINLOCK(rbd_client_list_lock);
396 /* Slab caches for frequently-allocated structures */
398 static struct kmem_cache *rbd_img_request_cache;
399 static struct kmem_cache *rbd_obj_request_cache;
400 static struct kmem_cache *rbd_segment_name_cache;
402 static int rbd_major;
403 static DEFINE_IDA(rbd_dev_id_ida);
406 * Default to false for now, as single-major requires >= 0.75 version of
407 * userspace rbd utility.
409 static bool single_major = false;
410 module_param(single_major, bool, S_IRUGO);
411 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
413 static int rbd_img_request_submit(struct rbd_img_request *img_request);
415 static void rbd_dev_device_release(struct device *dev);
417 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
419 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
421 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
423 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
425 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
426 static void rbd_spec_put(struct rbd_spec *spec);
428 static int rbd_dev_id_to_minor(int dev_id)
430 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
433 static int minor_to_rbd_dev_id(int minor)
435 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
438 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
439 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
440 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
441 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
443 static struct attribute *rbd_bus_attrs[] = {
445 &bus_attr_remove.attr,
446 &bus_attr_add_single_major.attr,
447 &bus_attr_remove_single_major.attr,
451 static umode_t rbd_bus_is_visible(struct kobject *kobj,
452 struct attribute *attr, int index)
455 (attr == &bus_attr_add_single_major.attr ||
456 attr == &bus_attr_remove_single_major.attr))
462 static const struct attribute_group rbd_bus_group = {
463 .attrs = rbd_bus_attrs,
464 .is_visible = rbd_bus_is_visible,
466 __ATTRIBUTE_GROUPS(rbd_bus);
468 static struct bus_type rbd_bus_type = {
470 .bus_groups = rbd_bus_groups,
473 static void rbd_root_dev_release(struct device *dev)
477 static struct device rbd_root_dev = {
479 .release = rbd_root_dev_release,
482 static __printf(2, 3)
483 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
485 struct va_format vaf;
493 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
494 else if (rbd_dev->disk)
495 printk(KERN_WARNING "%s: %s: %pV\n",
496 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
497 else if (rbd_dev->spec && rbd_dev->spec->image_name)
498 printk(KERN_WARNING "%s: image %s: %pV\n",
499 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
500 else if (rbd_dev->spec && rbd_dev->spec->image_id)
501 printk(KERN_WARNING "%s: id %s: %pV\n",
502 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
504 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
505 RBD_DRV_NAME, rbd_dev, &vaf);
510 #define rbd_assert(expr) \
511 if (unlikely(!(expr))) { \
512 printk(KERN_ERR "\nAssertion failure in %s() " \
514 "\trbd_assert(%s);\n\n", \
515 __func__, __LINE__, #expr); \
518 #else /* !RBD_DEBUG */
519 # define rbd_assert(expr) ((void) 0)
520 #endif /* !RBD_DEBUG */
522 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
523 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
524 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
526 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
527 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
528 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
529 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
530 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
532 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
533 u8 *order, u64 *snap_size);
534 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
536 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
538 static int rbd_open(struct block_device *bdev, fmode_t mode)
540 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
541 bool removing = false;
543 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
546 spin_lock_irq(&rbd_dev->lock);
547 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
550 rbd_dev->open_count++;
551 spin_unlock_irq(&rbd_dev->lock);
555 (void) get_device(&rbd_dev->dev);
560 static void rbd_release(struct gendisk *disk, fmode_t mode)
562 struct rbd_device *rbd_dev = disk->private_data;
563 unsigned long open_count_before;
565 spin_lock_irq(&rbd_dev->lock);
566 open_count_before = rbd_dev->open_count--;
567 spin_unlock_irq(&rbd_dev->lock);
568 rbd_assert(open_count_before > 0);
570 put_device(&rbd_dev->dev);
573 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
578 bool ro_changed = false;
580 /* get_user() may sleep, so call it before taking rbd_dev->lock */
581 if (get_user(val, (int __user *)(arg)))
584 ro = val ? true : false;
585 /* Snapshot doesn't allow to write*/
586 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
589 spin_lock_irq(&rbd_dev->lock);
590 /* prevent others open this device */
591 if (rbd_dev->open_count > 1) {
596 if (rbd_dev->mapping.read_only != ro) {
597 rbd_dev->mapping.read_only = ro;
602 spin_unlock_irq(&rbd_dev->lock);
603 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
604 if (ret == 0 && ro_changed)
605 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
610 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
611 unsigned int cmd, unsigned long arg)
613 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
618 ret = rbd_ioctl_set_ro(rbd_dev, arg);
628 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
629 unsigned int cmd, unsigned long arg)
631 return rbd_ioctl(bdev, mode, cmd, arg);
633 #endif /* CONFIG_COMPAT */
635 static const struct block_device_operations rbd_bd_ops = {
636 .owner = THIS_MODULE,
638 .release = rbd_release,
641 .compat_ioctl = rbd_compat_ioctl,
646 * Initialize an rbd client instance. Success or not, this function
647 * consumes ceph_opts. Caller holds client_mutex.
649 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
651 struct rbd_client *rbdc;
654 dout("%s:\n", __func__);
655 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
659 kref_init(&rbdc->kref);
660 INIT_LIST_HEAD(&rbdc->node);
662 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
663 if (IS_ERR(rbdc->client))
665 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
667 ret = ceph_open_session(rbdc->client);
671 spin_lock(&rbd_client_list_lock);
672 list_add_tail(&rbdc->node, &rbd_client_list);
673 spin_unlock(&rbd_client_list_lock);
675 dout("%s: rbdc %p\n", __func__, rbdc);
679 ceph_destroy_client(rbdc->client);
684 ceph_destroy_options(ceph_opts);
685 dout("%s: error %d\n", __func__, ret);
690 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
692 kref_get(&rbdc->kref);
698 * Find a ceph client with specific addr and configuration. If
699 * found, bump its reference count.
701 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
703 struct rbd_client *client_node;
706 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
709 spin_lock(&rbd_client_list_lock);
710 list_for_each_entry(client_node, &rbd_client_list, node) {
711 if (!ceph_compare_options(ceph_opts, client_node->client)) {
712 __rbd_get_client(client_node);
718 spin_unlock(&rbd_client_list_lock);
720 return found ? client_node : NULL;
730 /* string args above */
733 /* Boolean args above */
737 static match_table_t rbd_opts_tokens = {
739 /* string args above */
740 {Opt_read_only, "read_only"},
741 {Opt_read_only, "ro"}, /* Alternate spelling */
742 {Opt_read_write, "read_write"},
743 {Opt_read_write, "rw"}, /* Alternate spelling */
744 /* Boolean args above */
752 #define RBD_READ_ONLY_DEFAULT false
754 static int parse_rbd_opts_token(char *c, void *private)
756 struct rbd_options *rbd_opts = private;
757 substring_t argstr[MAX_OPT_ARGS];
758 int token, intval, ret;
760 token = match_token(c, rbd_opts_tokens, argstr);
764 if (token < Opt_last_int) {
765 ret = match_int(&argstr[0], &intval);
767 pr_err("bad mount option arg (not int) "
771 dout("got int token %d val %d\n", token, intval);
772 } else if (token > Opt_last_int && token < Opt_last_string) {
773 dout("got string token %d val %s\n", token,
775 } else if (token > Opt_last_string && token < Opt_last_bool) {
776 dout("got Boolean token %d\n", token);
778 dout("got token %d\n", token);
783 rbd_opts->read_only = true;
786 rbd_opts->read_only = false;
795 static char* obj_op_name(enum obj_operation_type op_type)
810 * Get a ceph client with specific addr and configuration, if one does
811 * not exist create it. Either way, ceph_opts is consumed by this
814 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
816 struct rbd_client *rbdc;
818 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
819 rbdc = rbd_client_find(ceph_opts);
820 if (rbdc) /* using an existing client */
821 ceph_destroy_options(ceph_opts);
823 rbdc = rbd_client_create(ceph_opts);
824 mutex_unlock(&client_mutex);
830 * Destroy ceph client
832 * Caller must hold rbd_client_list_lock.
834 static void rbd_client_release(struct kref *kref)
836 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
838 dout("%s: rbdc %p\n", __func__, rbdc);
839 spin_lock(&rbd_client_list_lock);
840 list_del(&rbdc->node);
841 spin_unlock(&rbd_client_list_lock);
843 ceph_destroy_client(rbdc->client);
848 * Drop reference to ceph client node. If it's not referenced anymore, release
851 static void rbd_put_client(struct rbd_client *rbdc)
854 kref_put(&rbdc->kref, rbd_client_release);
857 static bool rbd_image_format_valid(u32 image_format)
859 return image_format == 1 || image_format == 2;
862 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
867 /* The header has to start with the magic rbd header text */
868 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
871 /* The bio layer requires at least sector-sized I/O */
873 if (ondisk->options.order < SECTOR_SHIFT)
876 /* If we use u64 in a few spots we may be able to loosen this */
878 if (ondisk->options.order > 8 * sizeof (int) - 1)
882 * The size of a snapshot header has to fit in a size_t, and
883 * that limits the number of snapshots.
885 snap_count = le32_to_cpu(ondisk->snap_count);
886 size = SIZE_MAX - sizeof (struct ceph_snap_context);
887 if (snap_count > size / sizeof (__le64))
891 * Not only that, but the size of the entire the snapshot
892 * header must also be representable in a size_t.
894 size -= snap_count * sizeof (__le64);
895 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
902 * Fill an rbd image header with information from the given format 1
905 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
906 struct rbd_image_header_ondisk *ondisk)
908 struct rbd_image_header *header = &rbd_dev->header;
909 bool first_time = header->object_prefix == NULL;
910 struct ceph_snap_context *snapc;
911 char *object_prefix = NULL;
912 char *snap_names = NULL;
913 u64 *snap_sizes = NULL;
919 /* Allocate this now to avoid having to handle failure below */
924 len = strnlen(ondisk->object_prefix,
925 sizeof (ondisk->object_prefix));
926 object_prefix = kmalloc(len + 1, GFP_KERNEL);
929 memcpy(object_prefix, ondisk->object_prefix, len);
930 object_prefix[len] = '\0';
933 /* Allocate the snapshot context and fill it in */
935 snap_count = le32_to_cpu(ondisk->snap_count);
936 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
939 snapc->seq = le64_to_cpu(ondisk->snap_seq);
941 struct rbd_image_snap_ondisk *snaps;
942 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
944 /* We'll keep a copy of the snapshot names... */
946 if (snap_names_len > (u64)SIZE_MAX)
948 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
952 /* ...as well as the array of their sizes. */
954 size = snap_count * sizeof (*header->snap_sizes);
955 snap_sizes = kmalloc(size, GFP_KERNEL);
960 * Copy the names, and fill in each snapshot's id
963 * Note that rbd_dev_v1_header_info() guarantees the
964 * ondisk buffer we're working with has
965 * snap_names_len bytes beyond the end of the
966 * snapshot id array, this memcpy() is safe.
968 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
969 snaps = ondisk->snaps;
970 for (i = 0; i < snap_count; i++) {
971 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
972 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
976 /* We won't fail any more, fill in the header */
979 header->object_prefix = object_prefix;
980 header->obj_order = ondisk->options.order;
981 header->crypt_type = ondisk->options.crypt_type;
982 header->comp_type = ondisk->options.comp_type;
983 /* The rest aren't used for format 1 images */
984 header->stripe_unit = 0;
985 header->stripe_count = 0;
986 header->features = 0;
988 ceph_put_snap_context(header->snapc);
989 kfree(header->snap_names);
990 kfree(header->snap_sizes);
993 /* The remaining fields always get updated (when we refresh) */
995 header->image_size = le64_to_cpu(ondisk->image_size);
996 header->snapc = snapc;
997 header->snap_names = snap_names;
998 header->snap_sizes = snap_sizes;
1006 ceph_put_snap_context(snapc);
1007 kfree(object_prefix);
1012 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1014 const char *snap_name;
1016 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1018 /* Skip over names until we find the one we are looking for */
1020 snap_name = rbd_dev->header.snap_names;
1022 snap_name += strlen(snap_name) + 1;
1024 return kstrdup(snap_name, GFP_KERNEL);
1028 * Snapshot id comparison function for use with qsort()/bsearch().
1029 * Note that result is for snapshots in *descending* order.
1031 static int snapid_compare_reverse(const void *s1, const void *s2)
1033 u64 snap_id1 = *(u64 *)s1;
1034 u64 snap_id2 = *(u64 *)s2;
1036 if (snap_id1 < snap_id2)
1038 return snap_id1 == snap_id2 ? 0 : -1;
1042 * Search a snapshot context to see if the given snapshot id is
1045 * Returns the position of the snapshot id in the array if it's found,
1046 * or BAD_SNAP_INDEX otherwise.
1048 * Note: The snapshot array is in kept sorted (by the osd) in
1049 * reverse order, highest snapshot id first.
1051 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1053 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1056 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1057 sizeof (snap_id), snapid_compare_reverse);
1059 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1062 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1066 const char *snap_name;
1068 which = rbd_dev_snap_index(rbd_dev, snap_id);
1069 if (which == BAD_SNAP_INDEX)
1070 return ERR_PTR(-ENOENT);
1072 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1073 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1076 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1078 if (snap_id == CEPH_NOSNAP)
1079 return RBD_SNAP_HEAD_NAME;
1081 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1082 if (rbd_dev->image_format == 1)
1083 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1085 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1088 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1091 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1092 if (snap_id == CEPH_NOSNAP) {
1093 *snap_size = rbd_dev->header.image_size;
1094 } else if (rbd_dev->image_format == 1) {
1097 which = rbd_dev_snap_index(rbd_dev, snap_id);
1098 if (which == BAD_SNAP_INDEX)
1101 *snap_size = rbd_dev->header.snap_sizes[which];
1106 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1115 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1118 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1119 if (snap_id == CEPH_NOSNAP) {
1120 *snap_features = rbd_dev->header.features;
1121 } else if (rbd_dev->image_format == 1) {
1122 *snap_features = 0; /* No features for format 1 */
1127 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1131 *snap_features = features;
1136 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1138 u64 snap_id = rbd_dev->spec->snap_id;
1143 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1146 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1150 rbd_dev->mapping.size = size;
1151 rbd_dev->mapping.features = features;
1156 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1158 rbd_dev->mapping.size = 0;
1159 rbd_dev->mapping.features = 0;
1162 static void rbd_segment_name_free(const char *name)
1164 /* The explicit cast here is needed to drop the const qualifier */
1166 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1169 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1176 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1179 segment = offset >> rbd_dev->header.obj_order;
1180 name_format = "%s.%012llx";
1181 if (rbd_dev->image_format == 2)
1182 name_format = "%s.%016llx";
1183 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1184 rbd_dev->header.object_prefix, segment);
1185 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1186 pr_err("error formatting segment name for #%llu (%d)\n",
1188 rbd_segment_name_free(name);
1195 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1197 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1199 return offset & (segment_size - 1);
1202 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1203 u64 offset, u64 length)
1205 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1207 offset &= segment_size - 1;
1209 rbd_assert(length <= U64_MAX - offset);
1210 if (offset + length > segment_size)
1211 length = segment_size - offset;
1217 * returns the size of an object in the image
1219 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1221 return 1 << header->obj_order;
1228 static void bio_chain_put(struct bio *chain)
1234 chain = chain->bi_next;
1240 * zeros a bio chain, starting at specific offset
1242 static void zero_bio_chain(struct bio *chain, int start_ofs)
1245 struct bvec_iter iter;
1246 unsigned long flags;
1251 bio_for_each_segment(bv, chain, iter) {
1252 if (pos + bv.bv_len > start_ofs) {
1253 int remainder = max(start_ofs - pos, 0);
1254 buf = bvec_kmap_irq(&bv, &flags);
1255 memset(buf + remainder, 0,
1256 bv.bv_len - remainder);
1257 flush_dcache_page(bv.bv_page);
1258 bvec_kunmap_irq(buf, &flags);
1263 chain = chain->bi_next;
1268 * similar to zero_bio_chain(), zeros data defined by a page array,
1269 * starting at the given byte offset from the start of the array and
1270 * continuing up to the given end offset. The pages array is
1271 * assumed to be big enough to hold all bytes up to the end.
1273 static void zero_pages(struct page **pages, u64 offset, u64 end)
1275 struct page **page = &pages[offset >> PAGE_SHIFT];
1277 rbd_assert(end > offset);
1278 rbd_assert(end - offset <= (u64)SIZE_MAX);
1279 while (offset < end) {
1282 unsigned long flags;
1285 page_offset = offset & ~PAGE_MASK;
1286 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1287 local_irq_save(flags);
1288 kaddr = kmap_atomic(*page);
1289 memset(kaddr + page_offset, 0, length);
1290 flush_dcache_page(*page);
1291 kunmap_atomic(kaddr);
1292 local_irq_restore(flags);
1300 * Clone a portion of a bio, starting at the given byte offset
1301 * and continuing for the number of bytes indicated.
1303 static struct bio *bio_clone_range(struct bio *bio_src,
1304 unsigned int offset,
1310 bio = bio_clone(bio_src, gfpmask);
1312 return NULL; /* ENOMEM */
1314 bio_advance(bio, offset);
1315 bio->bi_iter.bi_size = len;
1321 * Clone a portion of a bio chain, starting at the given byte offset
1322 * into the first bio in the source chain and continuing for the
1323 * number of bytes indicated. The result is another bio chain of
1324 * exactly the given length, or a null pointer on error.
1326 * The bio_src and offset parameters are both in-out. On entry they
1327 * refer to the first source bio and the offset into that bio where
1328 * the start of data to be cloned is located.
1330 * On return, bio_src is updated to refer to the bio in the source
1331 * chain that contains first un-cloned byte, and *offset will
1332 * contain the offset of that byte within that bio.
1334 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1335 unsigned int *offset,
1339 struct bio *bi = *bio_src;
1340 unsigned int off = *offset;
1341 struct bio *chain = NULL;
1344 /* Build up a chain of clone bios up to the limit */
1346 if (!bi || off >= bi->bi_iter.bi_size || !len)
1347 return NULL; /* Nothing to clone */
1351 unsigned int bi_size;
1355 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1356 goto out_err; /* EINVAL; ran out of bio's */
1358 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1359 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1361 goto out_err; /* ENOMEM */
1364 end = &bio->bi_next;
1367 if (off == bi->bi_iter.bi_size) {
1378 bio_chain_put(chain);
1384 * The default/initial value for all object request flags is 0. For
1385 * each flag, once its value is set to 1 it is never reset to 0
1388 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1390 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1391 struct rbd_device *rbd_dev;
1393 rbd_dev = obj_request->img_request->rbd_dev;
1394 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1399 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1402 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1405 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1407 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1408 struct rbd_device *rbd_dev = NULL;
1410 if (obj_request_img_data_test(obj_request))
1411 rbd_dev = obj_request->img_request->rbd_dev;
1412 rbd_warn(rbd_dev, "obj_request %p already marked done",
1417 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1420 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1424 * This sets the KNOWN flag after (possibly) setting the EXISTS
1425 * flag. The latter is set based on the "exists" value provided.
1427 * Note that for our purposes once an object exists it never goes
1428 * away again. It's possible that the response from two existence
1429 * checks are separated by the creation of the target object, and
1430 * the first ("doesn't exist") response arrives *after* the second
1431 * ("does exist"). In that case we ignore the second one.
1433 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1437 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1438 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1442 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1445 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1448 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1451 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1454 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1456 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1458 return obj_request->img_offset <
1459 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1462 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1464 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1465 atomic_read(&obj_request->kref.refcount));
1466 kref_get(&obj_request->kref);
1469 static void rbd_obj_request_destroy(struct kref *kref);
1470 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1472 rbd_assert(obj_request != NULL);
1473 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1474 atomic_read(&obj_request->kref.refcount));
1475 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1478 static void rbd_img_request_get(struct rbd_img_request *img_request)
1480 dout("%s: img %p (was %d)\n", __func__, img_request,
1481 atomic_read(&img_request->kref.refcount));
1482 kref_get(&img_request->kref);
1485 static bool img_request_child_test(struct rbd_img_request *img_request);
1486 static void rbd_parent_request_destroy(struct kref *kref);
1487 static void rbd_img_request_destroy(struct kref *kref);
1488 static void rbd_img_request_put(struct rbd_img_request *img_request)
1490 rbd_assert(img_request != NULL);
1491 dout("%s: img %p (was %d)\n", __func__, img_request,
1492 atomic_read(&img_request->kref.refcount));
1493 if (img_request_child_test(img_request))
1494 kref_put(&img_request->kref, rbd_parent_request_destroy);
1496 kref_put(&img_request->kref, rbd_img_request_destroy);
1499 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1500 struct rbd_obj_request *obj_request)
1502 rbd_assert(obj_request->img_request == NULL);
1504 /* Image request now owns object's original reference */
1505 obj_request->img_request = img_request;
1506 obj_request->which = img_request->obj_request_count;
1507 rbd_assert(!obj_request_img_data_test(obj_request));
1508 obj_request_img_data_set(obj_request);
1509 rbd_assert(obj_request->which != BAD_WHICH);
1510 img_request->obj_request_count++;
1511 list_add_tail(&obj_request->links, &img_request->obj_requests);
1512 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1513 obj_request->which);
1516 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1517 struct rbd_obj_request *obj_request)
1519 rbd_assert(obj_request->which != BAD_WHICH);
1521 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1522 obj_request->which);
1523 list_del(&obj_request->links);
1524 rbd_assert(img_request->obj_request_count > 0);
1525 img_request->obj_request_count--;
1526 rbd_assert(obj_request->which == img_request->obj_request_count);
1527 obj_request->which = BAD_WHICH;
1528 rbd_assert(obj_request_img_data_test(obj_request));
1529 rbd_assert(obj_request->img_request == img_request);
1530 obj_request->img_request = NULL;
1531 obj_request->callback = NULL;
1532 rbd_obj_request_put(obj_request);
1535 static bool obj_request_type_valid(enum obj_request_type type)
1538 case OBJ_REQUEST_NODATA:
1539 case OBJ_REQUEST_BIO:
1540 case OBJ_REQUEST_PAGES:
1547 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1548 struct rbd_obj_request *obj_request)
1550 dout("%s %p\n", __func__, obj_request);
1551 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1554 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1556 dout("%s %p\n", __func__, obj_request);
1557 ceph_osdc_cancel_request(obj_request->osd_req);
1561 * Wait for an object request to complete. If interrupted, cancel the
1562 * underlying osd request.
1564 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1568 dout("%s %p\n", __func__, obj_request);
1570 ret = wait_for_completion_interruptible(&obj_request->completion);
1572 dout("%s %p interrupted\n", __func__, obj_request);
1573 rbd_obj_request_end(obj_request);
1577 dout("%s %p done\n", __func__, obj_request);
1581 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1584 dout("%s: img %p\n", __func__, img_request);
1587 * If no error occurred, compute the aggregate transfer
1588 * count for the image request. We could instead use
1589 * atomic64_cmpxchg() to update it as each object request
1590 * completes; not clear which way is better off hand.
1592 if (!img_request->result) {
1593 struct rbd_obj_request *obj_request;
1596 for_each_obj_request(img_request, obj_request)
1597 xferred += obj_request->xferred;
1598 img_request->xferred = xferred;
1601 if (img_request->callback)
1602 img_request->callback(img_request);
1604 rbd_img_request_put(img_request);
1608 * The default/initial value for all image request flags is 0. Each
1609 * is conditionally set to 1 at image request initialization time
1610 * and currently never change thereafter.
1612 static void img_request_write_set(struct rbd_img_request *img_request)
1614 set_bit(IMG_REQ_WRITE, &img_request->flags);
1618 static bool img_request_write_test(struct rbd_img_request *img_request)
1621 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1625 * Set the discard flag when the img_request is an discard request
1627 static void img_request_discard_set(struct rbd_img_request *img_request)
1629 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1633 static bool img_request_discard_test(struct rbd_img_request *img_request)
1636 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1639 static void img_request_child_set(struct rbd_img_request *img_request)
1641 set_bit(IMG_REQ_CHILD, &img_request->flags);
1645 static void img_request_child_clear(struct rbd_img_request *img_request)
1647 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1651 static bool img_request_child_test(struct rbd_img_request *img_request)
1654 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1657 static void img_request_layered_set(struct rbd_img_request *img_request)
1659 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1663 static void img_request_layered_clear(struct rbd_img_request *img_request)
1665 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1669 static bool img_request_layered_test(struct rbd_img_request *img_request)
1672 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1676 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1678 u64 xferred = obj_request->xferred;
1679 u64 length = obj_request->length;
1681 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1682 obj_request, obj_request->img_request, obj_request->result,
1685 * ENOENT means a hole in the image. We zero-fill the entire
1686 * length of the request. A short read also implies zero-fill
1687 * to the end of the request. An error requires the whole
1688 * length of the request to be reported finished with an error
1689 * to the block layer. In each case we update the xferred
1690 * count to indicate the whole request was satisfied.
1692 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1693 if (obj_request->result == -ENOENT) {
1694 if (obj_request->type == OBJ_REQUEST_BIO)
1695 zero_bio_chain(obj_request->bio_list, 0);
1697 zero_pages(obj_request->pages, 0, length);
1698 obj_request->result = 0;
1699 } else if (xferred < length && !obj_request->result) {
1700 if (obj_request->type == OBJ_REQUEST_BIO)
1701 zero_bio_chain(obj_request->bio_list, xferred);
1703 zero_pages(obj_request->pages, xferred, length);
1705 obj_request->xferred = length;
1706 obj_request_done_set(obj_request);
1709 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1711 dout("%s: obj %p cb %p\n", __func__, obj_request,
1712 obj_request->callback);
1713 if (obj_request->callback)
1714 obj_request->callback(obj_request);
1716 complete_all(&obj_request->completion);
1719 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1721 dout("%s: obj %p\n", __func__, obj_request);
1722 obj_request_done_set(obj_request);
1725 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1727 struct rbd_img_request *img_request = NULL;
1728 struct rbd_device *rbd_dev = NULL;
1729 bool layered = false;
1731 if (obj_request_img_data_test(obj_request)) {
1732 img_request = obj_request->img_request;
1733 layered = img_request && img_request_layered_test(img_request);
1734 rbd_dev = img_request->rbd_dev;
1737 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1738 obj_request, img_request, obj_request->result,
1739 obj_request->xferred, obj_request->length);
1740 if (layered && obj_request->result == -ENOENT &&
1741 obj_request->img_offset < rbd_dev->parent_overlap)
1742 rbd_img_parent_read(obj_request);
1743 else if (img_request)
1744 rbd_img_obj_request_read_callback(obj_request);
1746 obj_request_done_set(obj_request);
1749 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1751 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1752 obj_request->result, obj_request->length);
1754 * There is no such thing as a successful short write. Set
1755 * it to our originally-requested length.
1757 obj_request->xferred = obj_request->length;
1758 obj_request_done_set(obj_request);
1761 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1763 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1764 obj_request->result, obj_request->length);
1766 * There is no such thing as a successful short discard. Set
1767 * it to our originally-requested length.
1769 obj_request->xferred = obj_request->length;
1770 /* discarding a non-existent object is not a problem */
1771 if (obj_request->result == -ENOENT)
1772 obj_request->result = 0;
1773 obj_request_done_set(obj_request);
1777 * For a simple stat call there's nothing to do. We'll do more if
1778 * this is part of a write sequence for a layered image.
1780 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1782 dout("%s: obj %p\n", __func__, obj_request);
1783 obj_request_done_set(obj_request);
1786 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1787 struct ceph_msg *msg)
1789 struct rbd_obj_request *obj_request = osd_req->r_priv;
1792 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1793 rbd_assert(osd_req == obj_request->osd_req);
1794 if (obj_request_img_data_test(obj_request)) {
1795 rbd_assert(obj_request->img_request);
1796 rbd_assert(obj_request->which != BAD_WHICH);
1798 rbd_assert(obj_request->which == BAD_WHICH);
1801 if (osd_req->r_result < 0)
1802 obj_request->result = osd_req->r_result;
1804 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
1807 * We support a 64-bit length, but ultimately it has to be
1808 * passed to blk_end_request(), which takes an unsigned int.
1810 obj_request->xferred = osd_req->r_reply_op_len[0];
1811 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1813 opcode = osd_req->r_ops[0].op;
1815 case CEPH_OSD_OP_READ:
1816 rbd_osd_read_callback(obj_request);
1818 case CEPH_OSD_OP_SETALLOCHINT:
1819 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1821 case CEPH_OSD_OP_WRITE:
1822 rbd_osd_write_callback(obj_request);
1824 case CEPH_OSD_OP_STAT:
1825 rbd_osd_stat_callback(obj_request);
1827 case CEPH_OSD_OP_DELETE:
1828 case CEPH_OSD_OP_TRUNCATE:
1829 case CEPH_OSD_OP_ZERO:
1830 rbd_osd_discard_callback(obj_request);
1832 case CEPH_OSD_OP_CALL:
1833 case CEPH_OSD_OP_NOTIFY_ACK:
1834 case CEPH_OSD_OP_WATCH:
1835 rbd_osd_trivial_callback(obj_request);
1838 rbd_warn(NULL, "%s: unsupported op %hu",
1839 obj_request->object_name, (unsigned short) opcode);
1843 if (obj_request_done_test(obj_request))
1844 rbd_obj_request_complete(obj_request);
1847 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1849 struct rbd_img_request *img_request = obj_request->img_request;
1850 struct ceph_osd_request *osd_req = obj_request->osd_req;
1853 rbd_assert(osd_req != NULL);
1855 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1856 ceph_osdc_build_request(osd_req, obj_request->offset,
1857 NULL, snap_id, NULL);
1860 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1862 struct rbd_img_request *img_request = obj_request->img_request;
1863 struct ceph_osd_request *osd_req = obj_request->osd_req;
1864 struct ceph_snap_context *snapc;
1865 struct timespec mtime = CURRENT_TIME;
1867 rbd_assert(osd_req != NULL);
1869 snapc = img_request ? img_request->snapc : NULL;
1870 ceph_osdc_build_request(osd_req, obj_request->offset,
1871 snapc, CEPH_NOSNAP, &mtime);
1875 * Create an osd request. A read request has one osd op (read).
1876 * A write request has either one (watch) or two (hint+write) osd ops.
1877 * (All rbd data writes are prefixed with an allocation hint op, but
1878 * technically osd watch is a write request, hence this distinction.)
1880 static struct ceph_osd_request *rbd_osd_req_create(
1881 struct rbd_device *rbd_dev,
1882 enum obj_operation_type op_type,
1883 unsigned int num_ops,
1884 struct rbd_obj_request *obj_request)
1886 struct ceph_snap_context *snapc = NULL;
1887 struct ceph_osd_client *osdc;
1888 struct ceph_osd_request *osd_req;
1890 if (obj_request_img_data_test(obj_request) &&
1891 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1892 struct rbd_img_request *img_request = obj_request->img_request;
1893 if (op_type == OBJ_OP_WRITE) {
1894 rbd_assert(img_request_write_test(img_request));
1896 rbd_assert(img_request_discard_test(img_request));
1898 snapc = img_request->snapc;
1901 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1903 /* Allocate and initialize the request, for the num_ops ops */
1905 osdc = &rbd_dev->rbd_client->client->osdc;
1906 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1909 return NULL; /* ENOMEM */
1911 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
1912 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1914 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1916 osd_req->r_callback = rbd_osd_req_callback;
1917 osd_req->r_priv = obj_request;
1919 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1920 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1926 * Create a copyup osd request based on the information in the
1927 * object request supplied. A copyup request has three osd ops,
1928 * a copyup method call, a hint op, and a write op.
1930 static struct ceph_osd_request *
1931 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1933 struct rbd_img_request *img_request;
1934 struct ceph_snap_context *snapc;
1935 struct rbd_device *rbd_dev;
1936 struct ceph_osd_client *osdc;
1937 struct ceph_osd_request *osd_req;
1939 rbd_assert(obj_request_img_data_test(obj_request));
1940 img_request = obj_request->img_request;
1941 rbd_assert(img_request);
1942 rbd_assert(img_request_write_test(img_request));
1944 /* Allocate and initialize the request, for the three ops */
1946 snapc = img_request->snapc;
1947 rbd_dev = img_request->rbd_dev;
1948 osdc = &rbd_dev->rbd_client->client->osdc;
1949 osd_req = ceph_osdc_alloc_request(osdc, snapc, 3, false, GFP_ATOMIC);
1951 return NULL; /* ENOMEM */
1953 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1954 osd_req->r_callback = rbd_osd_req_callback;
1955 osd_req->r_priv = obj_request;
1957 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1958 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1964 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1966 ceph_osdc_put_request(osd_req);
1969 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1971 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1972 u64 offset, u64 length,
1973 enum obj_request_type type)
1975 struct rbd_obj_request *obj_request;
1979 rbd_assert(obj_request_type_valid(type));
1981 size = strlen(object_name) + 1;
1982 name = kmalloc(size, GFP_KERNEL);
1986 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1992 obj_request->object_name = memcpy(name, object_name, size);
1993 obj_request->offset = offset;
1994 obj_request->length = length;
1995 obj_request->flags = 0;
1996 obj_request->which = BAD_WHICH;
1997 obj_request->type = type;
1998 INIT_LIST_HEAD(&obj_request->links);
1999 init_completion(&obj_request->completion);
2000 kref_init(&obj_request->kref);
2002 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2003 offset, length, (int)type, obj_request);
2008 static void rbd_obj_request_destroy(struct kref *kref)
2010 struct rbd_obj_request *obj_request;
2012 obj_request = container_of(kref, struct rbd_obj_request, kref);
2014 dout("%s: obj %p\n", __func__, obj_request);
2016 rbd_assert(obj_request->img_request == NULL);
2017 rbd_assert(obj_request->which == BAD_WHICH);
2019 if (obj_request->osd_req)
2020 rbd_osd_req_destroy(obj_request->osd_req);
2022 rbd_assert(obj_request_type_valid(obj_request->type));
2023 switch (obj_request->type) {
2024 case OBJ_REQUEST_NODATA:
2025 break; /* Nothing to do */
2026 case OBJ_REQUEST_BIO:
2027 if (obj_request->bio_list)
2028 bio_chain_put(obj_request->bio_list);
2030 case OBJ_REQUEST_PAGES:
2031 if (obj_request->pages)
2032 ceph_release_page_vector(obj_request->pages,
2033 obj_request->page_count);
2037 kfree(obj_request->object_name);
2038 obj_request->object_name = NULL;
2039 kmem_cache_free(rbd_obj_request_cache, obj_request);
2042 /* It's OK to call this for a device with no parent */
2044 static void rbd_spec_put(struct rbd_spec *spec);
2045 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2047 rbd_dev_remove_parent(rbd_dev);
2048 rbd_spec_put(rbd_dev->parent_spec);
2049 rbd_dev->parent_spec = NULL;
2050 rbd_dev->parent_overlap = 0;
2054 * Parent image reference counting is used to determine when an
2055 * image's parent fields can be safely torn down--after there are no
2056 * more in-flight requests to the parent image. When the last
2057 * reference is dropped, cleaning them up is safe.
2059 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2063 if (!rbd_dev->parent_spec)
2066 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2070 /* Last reference; clean up parent data structures */
2073 rbd_dev_unparent(rbd_dev);
2075 rbd_warn(rbd_dev, "parent reference underflow");
2079 * If an image has a non-zero parent overlap, get a reference to its
2082 * We must get the reference before checking for the overlap to
2083 * coordinate properly with zeroing the parent overlap in
2084 * rbd_dev_v2_parent_info() when an image gets flattened. We
2085 * drop it again if there is no overlap.
2087 * Returns true if the rbd device has a parent with a non-zero
2088 * overlap and a reference for it was successfully taken, or
2091 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2095 if (!rbd_dev->parent_spec)
2098 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2099 if (counter > 0 && rbd_dev->parent_overlap)
2102 /* Image was flattened, but parent is not yet torn down */
2105 rbd_warn(rbd_dev, "parent reference overflow");
2111 * Caller is responsible for filling in the list of object requests
2112 * that comprises the image request, and the Linux request pointer
2113 * (if there is one).
2115 static struct rbd_img_request *rbd_img_request_create(
2116 struct rbd_device *rbd_dev,
2117 u64 offset, u64 length,
2118 enum obj_operation_type op_type,
2119 struct ceph_snap_context *snapc)
2121 struct rbd_img_request *img_request;
2123 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2127 img_request->rq = NULL;
2128 img_request->rbd_dev = rbd_dev;
2129 img_request->offset = offset;
2130 img_request->length = length;
2131 img_request->flags = 0;
2132 if (op_type == OBJ_OP_DISCARD) {
2133 img_request_discard_set(img_request);
2134 img_request->snapc = snapc;
2135 } else if (op_type == OBJ_OP_WRITE) {
2136 img_request_write_set(img_request);
2137 img_request->snapc = snapc;
2139 img_request->snap_id = rbd_dev->spec->snap_id;
2141 if (rbd_dev_parent_get(rbd_dev))
2142 img_request_layered_set(img_request);
2143 spin_lock_init(&img_request->completion_lock);
2144 img_request->next_completion = 0;
2145 img_request->callback = NULL;
2146 img_request->result = 0;
2147 img_request->obj_request_count = 0;
2148 INIT_LIST_HEAD(&img_request->obj_requests);
2149 kref_init(&img_request->kref);
2151 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2152 obj_op_name(op_type), offset, length, img_request);
2157 static void rbd_img_request_destroy(struct kref *kref)
2159 struct rbd_img_request *img_request;
2160 struct rbd_obj_request *obj_request;
2161 struct rbd_obj_request *next_obj_request;
2163 img_request = container_of(kref, struct rbd_img_request, kref);
2165 dout("%s: img %p\n", __func__, img_request);
2167 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2168 rbd_img_obj_request_del(img_request, obj_request);
2169 rbd_assert(img_request->obj_request_count == 0);
2171 if (img_request_layered_test(img_request)) {
2172 img_request_layered_clear(img_request);
2173 rbd_dev_parent_put(img_request->rbd_dev);
2176 if (img_request_write_test(img_request) ||
2177 img_request_discard_test(img_request))
2178 ceph_put_snap_context(img_request->snapc);
2180 kmem_cache_free(rbd_img_request_cache, img_request);
2183 static struct rbd_img_request *rbd_parent_request_create(
2184 struct rbd_obj_request *obj_request,
2185 u64 img_offset, u64 length)
2187 struct rbd_img_request *parent_request;
2188 struct rbd_device *rbd_dev;
2190 rbd_assert(obj_request->img_request);
2191 rbd_dev = obj_request->img_request->rbd_dev;
2193 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2194 length, OBJ_OP_READ, NULL);
2195 if (!parent_request)
2198 img_request_child_set(parent_request);
2199 rbd_obj_request_get(obj_request);
2200 parent_request->obj_request = obj_request;
2202 return parent_request;
2205 static void rbd_parent_request_destroy(struct kref *kref)
2207 struct rbd_img_request *parent_request;
2208 struct rbd_obj_request *orig_request;
2210 parent_request = container_of(kref, struct rbd_img_request, kref);
2211 orig_request = parent_request->obj_request;
2213 parent_request->obj_request = NULL;
2214 rbd_obj_request_put(orig_request);
2215 img_request_child_clear(parent_request);
2217 rbd_img_request_destroy(kref);
2220 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2222 struct rbd_img_request *img_request;
2223 unsigned int xferred;
2227 rbd_assert(obj_request_img_data_test(obj_request));
2228 img_request = obj_request->img_request;
2230 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2231 xferred = (unsigned int)obj_request->xferred;
2232 result = obj_request->result;
2234 struct rbd_device *rbd_dev = img_request->rbd_dev;
2235 enum obj_operation_type op_type;
2237 if (img_request_discard_test(img_request))
2238 op_type = OBJ_OP_DISCARD;
2239 else if (img_request_write_test(img_request))
2240 op_type = OBJ_OP_WRITE;
2242 op_type = OBJ_OP_READ;
2244 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2245 obj_op_name(op_type), obj_request->length,
2246 obj_request->img_offset, obj_request->offset);
2247 rbd_warn(rbd_dev, " result %d xferred %x",
2249 if (!img_request->result)
2250 img_request->result = result;
2253 /* Image object requests don't own their page array */
2255 if (obj_request->type == OBJ_REQUEST_PAGES) {
2256 obj_request->pages = NULL;
2257 obj_request->page_count = 0;
2260 if (img_request_child_test(img_request)) {
2261 rbd_assert(img_request->obj_request != NULL);
2262 more = obj_request->which < img_request->obj_request_count - 1;
2264 rbd_assert(img_request->rq != NULL);
2265 more = blk_end_request(img_request->rq, result, xferred);
2271 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2273 struct rbd_img_request *img_request;
2274 u32 which = obj_request->which;
2277 rbd_assert(obj_request_img_data_test(obj_request));
2278 img_request = obj_request->img_request;
2280 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2281 rbd_assert(img_request != NULL);
2282 rbd_assert(img_request->obj_request_count > 0);
2283 rbd_assert(which != BAD_WHICH);
2284 rbd_assert(which < img_request->obj_request_count);
2286 spin_lock_irq(&img_request->completion_lock);
2287 if (which != img_request->next_completion)
2290 for_each_obj_request_from(img_request, obj_request) {
2292 rbd_assert(which < img_request->obj_request_count);
2294 if (!obj_request_done_test(obj_request))
2296 more = rbd_img_obj_end_request(obj_request);
2300 rbd_assert(more ^ (which == img_request->obj_request_count));
2301 img_request->next_completion = which;
2303 spin_unlock_irq(&img_request->completion_lock);
2304 rbd_img_request_put(img_request);
2307 rbd_img_request_complete(img_request);
2311 * Split up an image request into one or more object requests, each
2312 * to a different object. The "type" parameter indicates whether
2313 * "data_desc" is the pointer to the head of a list of bio
2314 * structures, or the base of a page array. In either case this
2315 * function assumes data_desc describes memory sufficient to hold
2316 * all data described by the image request.
2318 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2319 enum obj_request_type type,
2322 struct rbd_device *rbd_dev = img_request->rbd_dev;
2323 struct rbd_obj_request *obj_request = NULL;
2324 struct rbd_obj_request *next_obj_request;
2325 struct bio *bio_list = NULL;
2326 unsigned int bio_offset = 0;
2327 struct page **pages = NULL;
2328 enum obj_operation_type op_type;
2329 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2335 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2336 (int)type, data_desc);
2338 img_offset = img_request->offset;
2339 resid = img_request->length;
2340 rbd_assert(resid > 0);
2342 if (type == OBJ_REQUEST_BIO) {
2343 bio_list = data_desc;
2344 rbd_assert(img_offset ==
2345 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2346 } else if (type == OBJ_REQUEST_PAGES) {
2351 struct ceph_osd_request *osd_req;
2352 const char *object_name;
2355 unsigned int which = 0;
2357 object_name = rbd_segment_name(rbd_dev, img_offset);
2360 offset = rbd_segment_offset(rbd_dev, img_offset);
2361 length = rbd_segment_length(rbd_dev, img_offset, resid);
2362 obj_request = rbd_obj_request_create(object_name,
2363 offset, length, type);
2364 /* object request has its own copy of the object name */
2365 rbd_segment_name_free(object_name);
2370 * set obj_request->img_request before creating the
2371 * osd_request so that it gets the right snapc
2373 rbd_img_obj_request_add(img_request, obj_request);
2375 if (type == OBJ_REQUEST_BIO) {
2376 unsigned int clone_size;
2378 rbd_assert(length <= (u64)UINT_MAX);
2379 clone_size = (unsigned int)length;
2380 obj_request->bio_list =
2381 bio_chain_clone_range(&bio_list,
2385 if (!obj_request->bio_list)
2387 } else if (type == OBJ_REQUEST_PAGES) {
2388 unsigned int page_count;
2390 obj_request->pages = pages;
2391 page_count = (u32)calc_pages_for(offset, length);
2392 obj_request->page_count = page_count;
2393 if ((offset + length) & ~PAGE_MASK)
2394 page_count--; /* more on last page */
2395 pages += page_count;
2398 if (img_request_discard_test(img_request)) {
2399 op_type = OBJ_OP_DISCARD;
2400 if (!offset && (length == object_size)
2401 && (!img_request_layered_test(img_request) ||
2402 (rbd_dev->parent_overlap <=
2403 obj_request->img_offset))) {
2404 opcode = CEPH_OSD_OP_DELETE;
2405 } else if ((offset + length == object_size)) {
2406 opcode = CEPH_OSD_OP_TRUNCATE;
2408 down_read(&rbd_dev->header_rwsem);
2409 img_end = rbd_dev->header.image_size;
2410 up_read(&rbd_dev->header_rwsem);
2412 if (obj_request->img_offset + length == img_end)
2413 opcode = CEPH_OSD_OP_TRUNCATE;
2415 opcode = CEPH_OSD_OP_ZERO;
2417 } else if (img_request_write_test(img_request)) {
2418 op_type = OBJ_OP_WRITE;
2419 opcode = CEPH_OSD_OP_WRITE;
2421 op_type = OBJ_OP_READ;
2422 opcode = CEPH_OSD_OP_READ;
2425 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2426 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2430 obj_request->osd_req = osd_req;
2431 obj_request->callback = rbd_img_obj_callback;
2432 rbd_img_request_get(img_request);
2434 if (op_type == OBJ_OP_WRITE) {
2435 osd_req_op_alloc_hint_init(osd_req, which,
2436 rbd_obj_bytes(&rbd_dev->header),
2437 rbd_obj_bytes(&rbd_dev->header));
2441 osd_req_op_extent_init(osd_req, which, opcode, offset, length,
2443 if (type == OBJ_REQUEST_BIO)
2444 osd_req_op_extent_osd_data_bio(osd_req, which,
2445 obj_request->bio_list, length);
2446 else if (type == OBJ_REQUEST_PAGES)
2447 osd_req_op_extent_osd_data_pages(osd_req, which,
2448 obj_request->pages, length,
2449 offset & ~PAGE_MASK, false, false);
2451 /* Discards are also writes */
2452 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2453 rbd_osd_req_format_write(obj_request);
2455 rbd_osd_req_format_read(obj_request);
2457 obj_request->img_offset = img_offset;
2459 img_offset += length;
2466 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2467 rbd_img_obj_request_del(img_request, obj_request);
2473 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2475 struct rbd_img_request *img_request;
2476 struct rbd_device *rbd_dev;
2477 struct page **pages;
2480 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2481 rbd_assert(obj_request_img_data_test(obj_request));
2482 img_request = obj_request->img_request;
2483 rbd_assert(img_request);
2485 rbd_dev = img_request->rbd_dev;
2486 rbd_assert(rbd_dev);
2488 pages = obj_request->copyup_pages;
2489 rbd_assert(pages != NULL);
2490 obj_request->copyup_pages = NULL;
2491 page_count = obj_request->copyup_page_count;
2492 rbd_assert(page_count);
2493 obj_request->copyup_page_count = 0;
2494 ceph_release_page_vector(pages, page_count);
2497 * We want the transfer count to reflect the size of the
2498 * original write request. There is no such thing as a
2499 * successful short write, so if the request was successful
2500 * we can just set it to the originally-requested length.
2502 if (!obj_request->result)
2503 obj_request->xferred = obj_request->length;
2505 /* Finish up with the normal image object callback */
2507 rbd_img_obj_callback(obj_request);
2511 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2513 struct rbd_obj_request *orig_request;
2514 struct ceph_osd_request *osd_req;
2515 struct ceph_osd_client *osdc;
2516 struct rbd_device *rbd_dev;
2517 struct page **pages;
2524 rbd_assert(img_request_child_test(img_request));
2526 /* First get what we need from the image request */
2528 pages = img_request->copyup_pages;
2529 rbd_assert(pages != NULL);
2530 img_request->copyup_pages = NULL;
2531 page_count = img_request->copyup_page_count;
2532 rbd_assert(page_count);
2533 img_request->copyup_page_count = 0;
2535 orig_request = img_request->obj_request;
2536 rbd_assert(orig_request != NULL);
2537 rbd_assert(obj_request_type_valid(orig_request->type));
2538 img_result = img_request->result;
2539 parent_length = img_request->length;
2540 rbd_assert(parent_length == img_request->xferred);
2541 rbd_img_request_put(img_request);
2543 rbd_assert(orig_request->img_request);
2544 rbd_dev = orig_request->img_request->rbd_dev;
2545 rbd_assert(rbd_dev);
2548 * If the overlap has become 0 (most likely because the
2549 * image has been flattened) we need to free the pages
2550 * and re-submit the original write request.
2552 if (!rbd_dev->parent_overlap) {
2553 struct ceph_osd_client *osdc;
2555 ceph_release_page_vector(pages, page_count);
2556 osdc = &rbd_dev->rbd_client->client->osdc;
2557 img_result = rbd_obj_request_submit(osdc, orig_request);
2566 * The original osd request is of no use to use any more.
2567 * We need a new one that can hold the three ops in a copyup
2568 * request. Allocate the new copyup osd request for the
2569 * original request, and release the old one.
2571 img_result = -ENOMEM;
2572 osd_req = rbd_osd_req_create_copyup(orig_request);
2575 rbd_osd_req_destroy(orig_request->osd_req);
2576 orig_request->osd_req = osd_req;
2577 orig_request->copyup_pages = pages;
2578 orig_request->copyup_page_count = page_count;
2580 /* Initialize the copyup op */
2582 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2583 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2586 /* Then the hint op */
2588 osd_req_op_alloc_hint_init(osd_req, 1, rbd_obj_bytes(&rbd_dev->header),
2589 rbd_obj_bytes(&rbd_dev->header));
2591 /* And the original write request op */
2593 offset = orig_request->offset;
2594 length = orig_request->length;
2595 osd_req_op_extent_init(osd_req, 2, CEPH_OSD_OP_WRITE,
2596 offset, length, 0, 0);
2597 if (orig_request->type == OBJ_REQUEST_BIO)
2598 osd_req_op_extent_osd_data_bio(osd_req, 2,
2599 orig_request->bio_list, length);
2601 osd_req_op_extent_osd_data_pages(osd_req, 2,
2602 orig_request->pages, length,
2603 offset & ~PAGE_MASK, false, false);
2605 rbd_osd_req_format_write(orig_request);
2607 /* All set, send it off. */
2609 orig_request->callback = rbd_img_obj_copyup_callback;
2610 osdc = &rbd_dev->rbd_client->client->osdc;
2611 img_result = rbd_obj_request_submit(osdc, orig_request);
2615 /* Record the error code and complete the request */
2617 orig_request->result = img_result;
2618 orig_request->xferred = 0;
2619 obj_request_done_set(orig_request);
2620 rbd_obj_request_complete(orig_request);
2624 * Read from the parent image the range of data that covers the
2625 * entire target of the given object request. This is used for
2626 * satisfying a layered image write request when the target of an
2627 * object request from the image request does not exist.
2629 * A page array big enough to hold the returned data is allocated
2630 * and supplied to rbd_img_request_fill() as the "data descriptor."
2631 * When the read completes, this page array will be transferred to
2632 * the original object request for the copyup operation.
2634 * If an error occurs, record it as the result of the original
2635 * object request and mark it done so it gets completed.
2637 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2639 struct rbd_img_request *img_request = NULL;
2640 struct rbd_img_request *parent_request = NULL;
2641 struct rbd_device *rbd_dev;
2644 struct page **pages = NULL;
2648 rbd_assert(obj_request_img_data_test(obj_request));
2649 rbd_assert(obj_request_type_valid(obj_request->type));
2651 img_request = obj_request->img_request;
2652 rbd_assert(img_request != NULL);
2653 rbd_dev = img_request->rbd_dev;
2654 rbd_assert(rbd_dev->parent != NULL);
2657 * Determine the byte range covered by the object in the
2658 * child image to which the original request was to be sent.
2660 img_offset = obj_request->img_offset - obj_request->offset;
2661 length = (u64)1 << rbd_dev->header.obj_order;
2664 * There is no defined parent data beyond the parent
2665 * overlap, so limit what we read at that boundary if
2668 if (img_offset + length > rbd_dev->parent_overlap) {
2669 rbd_assert(img_offset < rbd_dev->parent_overlap);
2670 length = rbd_dev->parent_overlap - img_offset;
2674 * Allocate a page array big enough to receive the data read
2677 page_count = (u32)calc_pages_for(0, length);
2678 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2679 if (IS_ERR(pages)) {
2680 result = PTR_ERR(pages);
2686 parent_request = rbd_parent_request_create(obj_request,
2687 img_offset, length);
2688 if (!parent_request)
2691 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2694 parent_request->copyup_pages = pages;
2695 parent_request->copyup_page_count = page_count;
2697 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2698 result = rbd_img_request_submit(parent_request);
2702 parent_request->copyup_pages = NULL;
2703 parent_request->copyup_page_count = 0;
2704 parent_request->obj_request = NULL;
2705 rbd_obj_request_put(obj_request);
2708 ceph_release_page_vector(pages, page_count);
2710 rbd_img_request_put(parent_request);
2711 obj_request->result = result;
2712 obj_request->xferred = 0;
2713 obj_request_done_set(obj_request);
2718 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2720 struct rbd_obj_request *orig_request;
2721 struct rbd_device *rbd_dev;
2724 rbd_assert(!obj_request_img_data_test(obj_request));
2727 * All we need from the object request is the original
2728 * request and the result of the STAT op. Grab those, then
2729 * we're done with the request.
2731 orig_request = obj_request->obj_request;
2732 obj_request->obj_request = NULL;
2733 rbd_obj_request_put(orig_request);
2734 rbd_assert(orig_request);
2735 rbd_assert(orig_request->img_request);
2737 result = obj_request->result;
2738 obj_request->result = 0;
2740 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2741 obj_request, orig_request, result,
2742 obj_request->xferred, obj_request->length);
2743 rbd_obj_request_put(obj_request);
2746 * If the overlap has become 0 (most likely because the
2747 * image has been flattened) we need to free the pages
2748 * and re-submit the original write request.
2750 rbd_dev = orig_request->img_request->rbd_dev;
2751 if (!rbd_dev->parent_overlap) {
2752 struct ceph_osd_client *osdc;
2754 osdc = &rbd_dev->rbd_client->client->osdc;
2755 result = rbd_obj_request_submit(osdc, orig_request);
2761 * Our only purpose here is to determine whether the object
2762 * exists, and we don't want to treat the non-existence as
2763 * an error. If something else comes back, transfer the
2764 * error to the original request and complete it now.
2767 obj_request_existence_set(orig_request, true);
2768 } else if (result == -ENOENT) {
2769 obj_request_existence_set(orig_request, false);
2770 } else if (result) {
2771 orig_request->result = result;
2776 * Resubmit the original request now that we have recorded
2777 * whether the target object exists.
2779 orig_request->result = rbd_img_obj_request_submit(orig_request);
2781 if (orig_request->result)
2782 rbd_obj_request_complete(orig_request);
2785 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2787 struct rbd_obj_request *stat_request;
2788 struct rbd_device *rbd_dev;
2789 struct ceph_osd_client *osdc;
2790 struct page **pages = NULL;
2796 * The response data for a STAT call consists of:
2803 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2804 page_count = (u32)calc_pages_for(0, size);
2805 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2807 return PTR_ERR(pages);
2810 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2815 rbd_obj_request_get(obj_request);
2816 stat_request->obj_request = obj_request;
2817 stat_request->pages = pages;
2818 stat_request->page_count = page_count;
2820 rbd_assert(obj_request->img_request);
2821 rbd_dev = obj_request->img_request->rbd_dev;
2822 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2824 if (!stat_request->osd_req)
2826 stat_request->callback = rbd_img_obj_exists_callback;
2828 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2829 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2831 rbd_osd_req_format_read(stat_request);
2833 osdc = &rbd_dev->rbd_client->client->osdc;
2834 ret = rbd_obj_request_submit(osdc, stat_request);
2837 rbd_obj_request_put(obj_request);
2842 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2844 struct rbd_img_request *img_request;
2845 struct rbd_device *rbd_dev;
2847 rbd_assert(obj_request_img_data_test(obj_request));
2849 img_request = obj_request->img_request;
2850 rbd_assert(img_request);
2851 rbd_dev = img_request->rbd_dev;
2854 if (!img_request_write_test(img_request) &&
2855 !img_request_discard_test(img_request))
2858 /* Non-layered writes */
2859 if (!img_request_layered_test(img_request))
2863 * Layered writes outside of the parent overlap range don't
2864 * share any data with the parent.
2866 if (!obj_request_overlaps_parent(obj_request))
2870 * Entire-object layered writes - we will overwrite whatever
2871 * parent data there is anyway.
2873 if (!obj_request->offset &&
2874 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2878 * If the object is known to already exist, its parent data has
2879 * already been copied.
2881 if (obj_request_known_test(obj_request) &&
2882 obj_request_exists_test(obj_request))
2888 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2890 if (img_obj_request_simple(obj_request)) {
2891 struct rbd_device *rbd_dev;
2892 struct ceph_osd_client *osdc;
2894 rbd_dev = obj_request->img_request->rbd_dev;
2895 osdc = &rbd_dev->rbd_client->client->osdc;
2897 return rbd_obj_request_submit(osdc, obj_request);
2901 * It's a layered write. The target object might exist but
2902 * we may not know that yet. If we know it doesn't exist,
2903 * start by reading the data for the full target object from
2904 * the parent so we can use it for a copyup to the target.
2906 if (obj_request_known_test(obj_request))
2907 return rbd_img_obj_parent_read_full(obj_request);
2909 /* We don't know whether the target exists. Go find out. */
2911 return rbd_img_obj_exists_submit(obj_request);
2914 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2916 struct rbd_obj_request *obj_request;
2917 struct rbd_obj_request *next_obj_request;
2919 dout("%s: img %p\n", __func__, img_request);
2920 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2923 ret = rbd_img_obj_request_submit(obj_request);
2931 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2933 struct rbd_obj_request *obj_request;
2934 struct rbd_device *rbd_dev;
2939 rbd_assert(img_request_child_test(img_request));
2941 /* First get what we need from the image request and release it */
2943 obj_request = img_request->obj_request;
2944 img_xferred = img_request->xferred;
2945 img_result = img_request->result;
2946 rbd_img_request_put(img_request);
2949 * If the overlap has become 0 (most likely because the
2950 * image has been flattened) we need to re-submit the
2953 rbd_assert(obj_request);
2954 rbd_assert(obj_request->img_request);
2955 rbd_dev = obj_request->img_request->rbd_dev;
2956 if (!rbd_dev->parent_overlap) {
2957 struct ceph_osd_client *osdc;
2959 osdc = &rbd_dev->rbd_client->client->osdc;
2960 img_result = rbd_obj_request_submit(osdc, obj_request);
2965 obj_request->result = img_result;
2966 if (obj_request->result)
2970 * We need to zero anything beyond the parent overlap
2971 * boundary. Since rbd_img_obj_request_read_callback()
2972 * will zero anything beyond the end of a short read, an
2973 * easy way to do this is to pretend the data from the
2974 * parent came up short--ending at the overlap boundary.
2976 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2977 obj_end = obj_request->img_offset + obj_request->length;
2978 if (obj_end > rbd_dev->parent_overlap) {
2981 if (obj_request->img_offset < rbd_dev->parent_overlap)
2982 xferred = rbd_dev->parent_overlap -
2983 obj_request->img_offset;
2985 obj_request->xferred = min(img_xferred, xferred);
2987 obj_request->xferred = img_xferred;
2990 rbd_img_obj_request_read_callback(obj_request);
2991 rbd_obj_request_complete(obj_request);
2994 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2996 struct rbd_img_request *img_request;
2999 rbd_assert(obj_request_img_data_test(obj_request));
3000 rbd_assert(obj_request->img_request != NULL);
3001 rbd_assert(obj_request->result == (s32) -ENOENT);
3002 rbd_assert(obj_request_type_valid(obj_request->type));
3004 /* rbd_read_finish(obj_request, obj_request->length); */
3005 img_request = rbd_parent_request_create(obj_request,
3006 obj_request->img_offset,
3007 obj_request->length);
3012 if (obj_request->type == OBJ_REQUEST_BIO)
3013 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3014 obj_request->bio_list);
3016 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3017 obj_request->pages);
3021 img_request->callback = rbd_img_parent_read_callback;
3022 result = rbd_img_request_submit(img_request);
3029 rbd_img_request_put(img_request);
3030 obj_request->result = result;
3031 obj_request->xferred = 0;
3032 obj_request_done_set(obj_request);
3035 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
3037 struct rbd_obj_request *obj_request;
3038 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3041 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3042 OBJ_REQUEST_NODATA);
3047 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3049 if (!obj_request->osd_req)
3052 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
3054 rbd_osd_req_format_read(obj_request);
3056 ret = rbd_obj_request_submit(osdc, obj_request);
3059 ret = rbd_obj_request_wait(obj_request);
3061 rbd_obj_request_put(obj_request);
3066 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3068 struct rbd_device *rbd_dev = (struct rbd_device *)data;
3074 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
3075 rbd_dev->header_name, (unsigned long long)notify_id,
3076 (unsigned int)opcode);
3079 * Until adequate refresh error handling is in place, there is
3080 * not much we can do here, except warn.
3082 * See http://tracker.ceph.com/issues/5040
3084 ret = rbd_dev_refresh(rbd_dev);
3086 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3088 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3090 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
3094 * Send a (un)watch request and wait for the ack. Return a request
3095 * with a ref held on success or error.
3097 static struct rbd_obj_request *rbd_obj_watch_request_helper(
3098 struct rbd_device *rbd_dev,
3101 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3102 struct rbd_obj_request *obj_request;
3105 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3106 OBJ_REQUEST_NODATA);
3108 return ERR_PTR(-ENOMEM);
3110 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
3112 if (!obj_request->osd_req) {
3117 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3118 rbd_dev->watch_event->cookie, 0, watch);
3119 rbd_osd_req_format_write(obj_request);
3122 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3124 ret = rbd_obj_request_submit(osdc, obj_request);
3128 ret = rbd_obj_request_wait(obj_request);
3132 ret = obj_request->result;
3135 rbd_obj_request_end(obj_request);
3142 rbd_obj_request_put(obj_request);
3143 return ERR_PTR(ret);
3147 * Initiate a watch request, synchronously.
3149 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3151 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3152 struct rbd_obj_request *obj_request;
3155 rbd_assert(!rbd_dev->watch_event);
3156 rbd_assert(!rbd_dev->watch_request);
3158 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3159 &rbd_dev->watch_event);
3163 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3164 if (IS_ERR(obj_request)) {
3165 ceph_osdc_cancel_event(rbd_dev->watch_event);
3166 rbd_dev->watch_event = NULL;
3167 return PTR_ERR(obj_request);
3171 * A watch request is set to linger, so the underlying osd
3172 * request won't go away until we unregister it. We retain
3173 * a pointer to the object request during that time (in
3174 * rbd_dev->watch_request), so we'll keep a reference to it.
3175 * We'll drop that reference after we've unregistered it in
3176 * rbd_dev_header_unwatch_sync().
3178 rbd_dev->watch_request = obj_request;
3184 * Tear down a watch request, synchronously.
3186 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3188 struct rbd_obj_request *obj_request;
3190 rbd_assert(rbd_dev->watch_event);
3191 rbd_assert(rbd_dev->watch_request);
3193 rbd_obj_request_end(rbd_dev->watch_request);
3194 rbd_obj_request_put(rbd_dev->watch_request);
3195 rbd_dev->watch_request = NULL;
3197 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3198 if (!IS_ERR(obj_request))
3199 rbd_obj_request_put(obj_request);
3201 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3202 PTR_ERR(obj_request));
3204 ceph_osdc_cancel_event(rbd_dev->watch_event);
3205 rbd_dev->watch_event = NULL;
3209 * Synchronous osd object method call. Returns the number of bytes
3210 * returned in the outbound buffer, or a negative error code.
3212 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3213 const char *object_name,
3214 const char *class_name,
3215 const char *method_name,
3216 const void *outbound,
3217 size_t outbound_size,
3219 size_t inbound_size)
3221 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3222 struct rbd_obj_request *obj_request;
3223 struct page **pages;
3228 * Method calls are ultimately read operations. The result
3229 * should placed into the inbound buffer provided. They
3230 * also supply outbound data--parameters for the object
3231 * method. Currently if this is present it will be a
3234 page_count = (u32)calc_pages_for(0, inbound_size);
3235 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3237 return PTR_ERR(pages);
3240 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3245 obj_request->pages = pages;
3246 obj_request->page_count = page_count;
3248 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3250 if (!obj_request->osd_req)
3253 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3254 class_name, method_name);
3255 if (outbound_size) {
3256 struct ceph_pagelist *pagelist;
3258 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3262 ceph_pagelist_init(pagelist);
3263 ceph_pagelist_append(pagelist, outbound, outbound_size);
3264 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3267 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3268 obj_request->pages, inbound_size,
3270 rbd_osd_req_format_read(obj_request);
3272 ret = rbd_obj_request_submit(osdc, obj_request);
3275 ret = rbd_obj_request_wait(obj_request);
3279 ret = obj_request->result;
3283 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3284 ret = (int)obj_request->xferred;
3285 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3288 rbd_obj_request_put(obj_request);
3290 ceph_release_page_vector(pages, page_count);
3295 static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
3297 struct rbd_img_request *img_request;
3298 struct ceph_snap_context *snapc = NULL;
3299 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3300 u64 length = blk_rq_bytes(rq);
3301 enum obj_operation_type op_type;
3305 if (rq->cmd_flags & REQ_DISCARD)
3306 op_type = OBJ_OP_DISCARD;
3307 else if (rq->cmd_flags & REQ_WRITE)
3308 op_type = OBJ_OP_WRITE;
3310 op_type = OBJ_OP_READ;
3312 /* Ignore/skip any zero-length requests */
3315 dout("%s: zero-length request\n", __func__);
3320 /* Only reads are allowed to a read-only device */
3322 if (op_type != OBJ_OP_READ) {
3323 if (rbd_dev->mapping.read_only) {
3327 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3331 * Quit early if the mapped snapshot no longer exists. It's
3332 * still possible the snapshot will have disappeared by the
3333 * time our request arrives at the osd, but there's no sense in
3334 * sending it if we already know.
3336 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3337 dout("request for non-existent snapshot");
3338 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3343 if (offset && length > U64_MAX - offset + 1) {
3344 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3347 goto err_rq; /* Shouldn't happen */
3350 down_read(&rbd_dev->header_rwsem);
3351 mapping_size = rbd_dev->mapping.size;
3352 if (op_type != OBJ_OP_READ) {
3353 snapc = rbd_dev->header.snapc;
3354 ceph_get_snap_context(snapc);
3356 up_read(&rbd_dev->header_rwsem);
3358 if (offset + length > mapping_size) {
3359 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3360 length, mapping_size);
3365 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3371 img_request->rq = rq;
3373 if (op_type == OBJ_OP_DISCARD)
3374 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3377 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3380 goto err_img_request;
3382 result = rbd_img_request_submit(img_request);
3384 goto err_img_request;
3389 rbd_img_request_put(img_request);
3392 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3393 obj_op_name(op_type), length, offset, result);
3395 ceph_put_snap_context(snapc);
3396 blk_end_request_all(rq, result);
3399 static void rbd_request_workfn(struct work_struct *work)
3401 struct rbd_device *rbd_dev =
3402 container_of(work, struct rbd_device, rq_work);
3403 struct request *rq, *next;
3404 LIST_HEAD(requests);
3406 spin_lock_irq(&rbd_dev->lock); /* rq->q->queue_lock */
3407 list_splice_init(&rbd_dev->rq_queue, &requests);
3408 spin_unlock_irq(&rbd_dev->lock);
3410 list_for_each_entry_safe(rq, next, &requests, queuelist) {
3411 list_del_init(&rq->queuelist);
3412 rbd_handle_request(rbd_dev, rq);
3417 * Called with q->queue_lock held and interrupts disabled, possibly on
3418 * the way to schedule(). Do not sleep here!
3420 static void rbd_request_fn(struct request_queue *q)
3422 struct rbd_device *rbd_dev = q->queuedata;
3426 rbd_assert(rbd_dev);
3428 while ((rq = blk_fetch_request(q))) {
3429 /* Ignore any non-FS requests that filter through. */
3430 if (rq->cmd_type != REQ_TYPE_FS) {
3431 dout("%s: non-fs request type %d\n", __func__,
3432 (int) rq->cmd_type);
3433 __blk_end_request_all(rq, 0);
3437 list_add_tail(&rq->queuelist, &rbd_dev->rq_queue);
3442 queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work);
3446 * a queue callback. Makes sure that we don't create a bio that spans across
3447 * multiple osd objects. One exception would be with a single page bios,
3448 * which we handle later at bio_chain_clone_range()
3450 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3451 struct bio_vec *bvec)
3453 struct rbd_device *rbd_dev = q->queuedata;
3454 sector_t sector_offset;
3455 sector_t sectors_per_obj;
3456 sector_t obj_sector_offset;
3460 * Find how far into its rbd object the partition-relative
3461 * bio start sector is to offset relative to the enclosing
3464 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3465 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3466 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3469 * Compute the number of bytes from that offset to the end
3470 * of the object. Account for what's already used by the bio.
3472 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3473 if (ret > bmd->bi_size)
3474 ret -= bmd->bi_size;
3479 * Don't send back more than was asked for. And if the bio
3480 * was empty, let the whole thing through because: "Note
3481 * that a block device *must* allow a single page to be
3482 * added to an empty bio."
3484 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3485 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3486 ret = (int) bvec->bv_len;
3491 static void rbd_free_disk(struct rbd_device *rbd_dev)
3493 struct gendisk *disk = rbd_dev->disk;
3498 rbd_dev->disk = NULL;
3499 if (disk->flags & GENHD_FL_UP) {
3502 blk_cleanup_queue(disk->queue);
3507 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3508 const char *object_name,
3509 u64 offset, u64 length, void *buf)
3512 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3513 struct rbd_obj_request *obj_request;
3514 struct page **pages = NULL;
3519 page_count = (u32) calc_pages_for(offset, length);
3520 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3522 ret = PTR_ERR(pages);
3525 obj_request = rbd_obj_request_create(object_name, offset, length,
3530 obj_request->pages = pages;
3531 obj_request->page_count = page_count;
3533 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3535 if (!obj_request->osd_req)
3538 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3539 offset, length, 0, 0);
3540 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3542 obj_request->length,
3543 obj_request->offset & ~PAGE_MASK,
3545 rbd_osd_req_format_read(obj_request);
3547 ret = rbd_obj_request_submit(osdc, obj_request);
3550 ret = rbd_obj_request_wait(obj_request);
3554 ret = obj_request->result;
3558 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3559 size = (size_t) obj_request->xferred;
3560 ceph_copy_from_page_vector(pages, buf, 0, size);
3561 rbd_assert(size <= (size_t)INT_MAX);
3565 rbd_obj_request_put(obj_request);
3567 ceph_release_page_vector(pages, page_count);
3573 * Read the complete header for the given rbd device. On successful
3574 * return, the rbd_dev->header field will contain up-to-date
3575 * information about the image.
3577 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3579 struct rbd_image_header_ondisk *ondisk = NULL;
3586 * The complete header will include an array of its 64-bit
3587 * snapshot ids, followed by the names of those snapshots as
3588 * a contiguous block of NUL-terminated strings. Note that
3589 * the number of snapshots could change by the time we read
3590 * it in, in which case we re-read it.
3597 size = sizeof (*ondisk);
3598 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3600 ondisk = kmalloc(size, GFP_KERNEL);
3604 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3608 if ((size_t)ret < size) {
3610 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3614 if (!rbd_dev_ondisk_valid(ondisk)) {
3616 rbd_warn(rbd_dev, "invalid header");
3620 names_size = le64_to_cpu(ondisk->snap_names_len);
3621 want_count = snap_count;
3622 snap_count = le32_to_cpu(ondisk->snap_count);
3623 } while (snap_count != want_count);
3625 ret = rbd_header_from_disk(rbd_dev, ondisk);
3633 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3634 * has disappeared from the (just updated) snapshot context.
3636 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3640 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3643 snap_id = rbd_dev->spec->snap_id;
3644 if (snap_id == CEPH_NOSNAP)
3647 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3648 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3651 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3657 * Don't hold the lock while doing disk operations,
3658 * or lock ordering will conflict with the bdev mutex via:
3659 * rbd_add() -> blkdev_get() -> rbd_open()
3661 spin_lock_irq(&rbd_dev->lock);
3662 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3663 spin_unlock_irq(&rbd_dev->lock);
3665 * If the device is being removed, rbd_dev->disk has
3666 * been destroyed, so don't try to update its size
3669 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3670 dout("setting size to %llu sectors", (unsigned long long)size);
3671 set_capacity(rbd_dev->disk, size);
3672 revalidate_disk(rbd_dev->disk);
3676 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3681 down_write(&rbd_dev->header_rwsem);
3682 mapping_size = rbd_dev->mapping.size;
3684 ret = rbd_dev_header_info(rbd_dev);
3689 * If there is a parent, see if it has disappeared due to the
3690 * mapped image getting flattened.
3692 if (rbd_dev->parent) {
3693 ret = rbd_dev_v2_parent_info(rbd_dev);
3698 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3699 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
3700 rbd_dev->mapping.size = rbd_dev->header.image_size;
3702 /* validate mapped snapshot's EXISTS flag */
3703 rbd_exists_validate(rbd_dev);
3706 up_write(&rbd_dev->header_rwsem);
3708 if (mapping_size != rbd_dev->mapping.size)
3709 rbd_dev_update_size(rbd_dev);
3714 static int rbd_init_disk(struct rbd_device *rbd_dev)
3716 struct gendisk *disk;
3717 struct request_queue *q;
3720 /* create gendisk info */
3721 disk = alloc_disk(single_major ?
3722 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3723 RBD_MINORS_PER_MAJOR);
3727 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3729 disk->major = rbd_dev->major;
3730 disk->first_minor = rbd_dev->minor;
3732 disk->flags |= GENHD_FL_EXT_DEVT;
3733 disk->fops = &rbd_bd_ops;
3734 disk->private_data = rbd_dev;
3736 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3740 /* We use the default size, but let's be explicit about it. */
3741 blk_queue_physical_block_size(q, SECTOR_SIZE);
3743 /* set io sizes to object size */
3744 segment_size = rbd_obj_bytes(&rbd_dev->header);
3745 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3746 blk_queue_max_segment_size(q, segment_size);
3747 blk_queue_io_min(q, segment_size);
3748 blk_queue_io_opt(q, segment_size);
3750 /* enable the discard support */
3751 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3752 q->limits.discard_granularity = segment_size;
3753 q->limits.discard_alignment = segment_size;
3755 blk_queue_merge_bvec(q, rbd_merge_bvec);
3758 q->queuedata = rbd_dev;
3760 rbd_dev->disk = disk;
3773 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3775 return container_of(dev, struct rbd_device, dev);
3778 static ssize_t rbd_size_show(struct device *dev,
3779 struct device_attribute *attr, char *buf)
3781 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3783 return sprintf(buf, "%llu\n",
3784 (unsigned long long)rbd_dev->mapping.size);
3788 * Note this shows the features for whatever's mapped, which is not
3789 * necessarily the base image.
3791 static ssize_t rbd_features_show(struct device *dev,
3792 struct device_attribute *attr, char *buf)
3794 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3796 return sprintf(buf, "0x%016llx\n",
3797 (unsigned long long)rbd_dev->mapping.features);
3800 static ssize_t rbd_major_show(struct device *dev,
3801 struct device_attribute *attr, char *buf)
3803 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3806 return sprintf(buf, "%d\n", rbd_dev->major);
3808 return sprintf(buf, "(none)\n");
3811 static ssize_t rbd_minor_show(struct device *dev,
3812 struct device_attribute *attr, char *buf)
3814 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3816 return sprintf(buf, "%d\n", rbd_dev->minor);
3819 static ssize_t rbd_client_id_show(struct device *dev,
3820 struct device_attribute *attr, char *buf)
3822 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3824 return sprintf(buf, "client%lld\n",
3825 ceph_client_id(rbd_dev->rbd_client->client));
3828 static ssize_t rbd_pool_show(struct device *dev,
3829 struct device_attribute *attr, char *buf)
3831 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3833 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3836 static ssize_t rbd_pool_id_show(struct device *dev,
3837 struct device_attribute *attr, char *buf)
3839 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3841 return sprintf(buf, "%llu\n",
3842 (unsigned long long) rbd_dev->spec->pool_id);
3845 static ssize_t rbd_name_show(struct device *dev,
3846 struct device_attribute *attr, char *buf)
3848 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3850 if (rbd_dev->spec->image_name)
3851 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3853 return sprintf(buf, "(unknown)\n");
3856 static ssize_t rbd_image_id_show(struct device *dev,
3857 struct device_attribute *attr, char *buf)
3859 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3861 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3865 * Shows the name of the currently-mapped snapshot (or
3866 * RBD_SNAP_HEAD_NAME for the base image).
3868 static ssize_t rbd_snap_show(struct device *dev,
3869 struct device_attribute *attr,
3872 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3874 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3878 * For a v2 image, shows the chain of parent images, separated by empty
3879 * lines. For v1 images or if there is no parent, shows "(no parent
3882 static ssize_t rbd_parent_show(struct device *dev,
3883 struct device_attribute *attr,
3886 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3889 if (!rbd_dev->parent)
3890 return sprintf(buf, "(no parent image)\n");
3892 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3893 struct rbd_spec *spec = rbd_dev->parent_spec;
3895 count += sprintf(&buf[count], "%s"
3896 "pool_id %llu\npool_name %s\n"
3897 "image_id %s\nimage_name %s\n"
3898 "snap_id %llu\nsnap_name %s\n"
3900 !count ? "" : "\n", /* first? */
3901 spec->pool_id, spec->pool_name,
3902 spec->image_id, spec->image_name ?: "(unknown)",
3903 spec->snap_id, spec->snap_name,
3904 rbd_dev->parent_overlap);
3910 static ssize_t rbd_image_refresh(struct device *dev,
3911 struct device_attribute *attr,
3915 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3918 ret = rbd_dev_refresh(rbd_dev);
3925 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3926 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3927 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3928 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3929 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3930 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3931 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3932 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3933 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3934 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3935 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3936 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3938 static struct attribute *rbd_attrs[] = {
3939 &dev_attr_size.attr,
3940 &dev_attr_features.attr,
3941 &dev_attr_major.attr,
3942 &dev_attr_minor.attr,
3943 &dev_attr_client_id.attr,
3944 &dev_attr_pool.attr,
3945 &dev_attr_pool_id.attr,
3946 &dev_attr_name.attr,
3947 &dev_attr_image_id.attr,
3948 &dev_attr_current_snap.attr,
3949 &dev_attr_parent.attr,
3950 &dev_attr_refresh.attr,
3954 static struct attribute_group rbd_attr_group = {
3958 static const struct attribute_group *rbd_attr_groups[] = {
3963 static void rbd_sysfs_dev_release(struct device *dev)
3967 static struct device_type rbd_device_type = {
3969 .groups = rbd_attr_groups,
3970 .release = rbd_sysfs_dev_release,
3973 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3975 kref_get(&spec->kref);
3980 static void rbd_spec_free(struct kref *kref);
3981 static void rbd_spec_put(struct rbd_spec *spec)
3984 kref_put(&spec->kref, rbd_spec_free);
3987 static struct rbd_spec *rbd_spec_alloc(void)
3989 struct rbd_spec *spec;
3991 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3995 spec->pool_id = CEPH_NOPOOL;
3996 spec->snap_id = CEPH_NOSNAP;
3997 kref_init(&spec->kref);
4002 static void rbd_spec_free(struct kref *kref)
4004 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4006 kfree(spec->pool_name);
4007 kfree(spec->image_id);
4008 kfree(spec->image_name);
4009 kfree(spec->snap_name);
4013 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4014 struct rbd_spec *spec)
4016 struct rbd_device *rbd_dev;
4018 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4022 spin_lock_init(&rbd_dev->lock);
4023 INIT_LIST_HEAD(&rbd_dev->rq_queue);
4024 INIT_WORK(&rbd_dev->rq_work, rbd_request_workfn);
4026 atomic_set(&rbd_dev->parent_ref, 0);
4027 INIT_LIST_HEAD(&rbd_dev->node);
4028 init_rwsem(&rbd_dev->header_rwsem);
4030 rbd_dev->spec = spec;
4031 rbd_dev->rbd_client = rbdc;
4033 /* Initialize the layout used for all rbd requests */
4035 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4036 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4037 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4038 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4043 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4045 rbd_put_client(rbd_dev->rbd_client);
4046 rbd_spec_put(rbd_dev->spec);
4051 * Get the size and object order for an image snapshot, or if
4052 * snap_id is CEPH_NOSNAP, gets this information for the base
4055 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4056 u8 *order, u64 *snap_size)
4058 __le64 snapid = cpu_to_le64(snap_id);
4063 } __attribute__ ((packed)) size_buf = { 0 };
4065 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4067 &snapid, sizeof (snapid),
4068 &size_buf, sizeof (size_buf));
4069 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4072 if (ret < sizeof (size_buf))
4076 *order = size_buf.order;
4077 dout(" order %u", (unsigned int)*order);
4079 *snap_size = le64_to_cpu(size_buf.size);
4081 dout(" snap_id 0x%016llx snap_size = %llu\n",
4082 (unsigned long long)snap_id,
4083 (unsigned long long)*snap_size);
4088 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4090 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4091 &rbd_dev->header.obj_order,
4092 &rbd_dev->header.image_size);
4095 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4101 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4105 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4106 "rbd", "get_object_prefix", NULL, 0,
4107 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4108 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4113 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4114 p + ret, NULL, GFP_NOIO);
4117 if (IS_ERR(rbd_dev->header.object_prefix)) {
4118 ret = PTR_ERR(rbd_dev->header.object_prefix);
4119 rbd_dev->header.object_prefix = NULL;
4121 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4129 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4132 __le64 snapid = cpu_to_le64(snap_id);
4136 } __attribute__ ((packed)) features_buf = { 0 };
4140 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4141 "rbd", "get_features",
4142 &snapid, sizeof (snapid),
4143 &features_buf, sizeof (features_buf));
4144 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4147 if (ret < sizeof (features_buf))
4150 incompat = le64_to_cpu(features_buf.incompat);
4151 if (incompat & ~RBD_FEATURES_SUPPORTED)
4154 *snap_features = le64_to_cpu(features_buf.features);
4156 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4157 (unsigned long long)snap_id,
4158 (unsigned long long)*snap_features,
4159 (unsigned long long)le64_to_cpu(features_buf.incompat));
4164 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4166 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4167 &rbd_dev->header.features);
4170 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4172 struct rbd_spec *parent_spec;
4174 void *reply_buf = NULL;
4184 parent_spec = rbd_spec_alloc();
4188 size = sizeof (__le64) + /* pool_id */
4189 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4190 sizeof (__le64) + /* snap_id */
4191 sizeof (__le64); /* overlap */
4192 reply_buf = kmalloc(size, GFP_KERNEL);
4198 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4199 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4200 "rbd", "get_parent",
4201 &snapid, sizeof (snapid),
4203 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4208 end = reply_buf + ret;
4210 ceph_decode_64_safe(&p, end, pool_id, out_err);
4211 if (pool_id == CEPH_NOPOOL) {
4213 * Either the parent never existed, or we have
4214 * record of it but the image got flattened so it no
4215 * longer has a parent. When the parent of a
4216 * layered image disappears we immediately set the
4217 * overlap to 0. The effect of this is that all new
4218 * requests will be treated as if the image had no
4221 if (rbd_dev->parent_overlap) {
4222 rbd_dev->parent_overlap = 0;
4224 rbd_dev_parent_put(rbd_dev);
4225 pr_info("%s: clone image has been flattened\n",
4226 rbd_dev->disk->disk_name);
4229 goto out; /* No parent? No problem. */
4232 /* The ceph file layout needs to fit pool id in 32 bits */
4235 if (pool_id > (u64)U32_MAX) {
4236 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4237 (unsigned long long)pool_id, U32_MAX);
4241 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4242 if (IS_ERR(image_id)) {
4243 ret = PTR_ERR(image_id);
4246 ceph_decode_64_safe(&p, end, snap_id, out_err);
4247 ceph_decode_64_safe(&p, end, overlap, out_err);
4250 * The parent won't change (except when the clone is
4251 * flattened, already handled that). So we only need to
4252 * record the parent spec we have not already done so.
4254 if (!rbd_dev->parent_spec) {
4255 parent_spec->pool_id = pool_id;
4256 parent_spec->image_id = image_id;
4257 parent_spec->snap_id = snap_id;
4258 rbd_dev->parent_spec = parent_spec;
4259 parent_spec = NULL; /* rbd_dev now owns this */
4265 * We always update the parent overlap. If it's zero we
4266 * treat it specially.
4268 rbd_dev->parent_overlap = overlap;
4272 /* A null parent_spec indicates it's the initial probe */
4276 * The overlap has become zero, so the clone
4277 * must have been resized down to 0 at some
4278 * point. Treat this the same as a flatten.
4280 rbd_dev_parent_put(rbd_dev);
4281 pr_info("%s: clone image now standalone\n",
4282 rbd_dev->disk->disk_name);
4285 * For the initial probe, if we find the
4286 * overlap is zero we just pretend there was
4289 rbd_warn(rbd_dev, "ignoring parent with overlap 0");
4296 rbd_spec_put(parent_spec);
4301 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4305 __le64 stripe_count;
4306 } __attribute__ ((packed)) striping_info_buf = { 0 };
4307 size_t size = sizeof (striping_info_buf);
4314 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4315 "rbd", "get_stripe_unit_count", NULL, 0,
4316 (char *)&striping_info_buf, size);
4317 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4324 * We don't actually support the "fancy striping" feature
4325 * (STRIPINGV2) yet, but if the striping sizes are the
4326 * defaults the behavior is the same as before. So find
4327 * out, and only fail if the image has non-default values.
4330 obj_size = (u64)1 << rbd_dev->header.obj_order;
4331 p = &striping_info_buf;
4332 stripe_unit = ceph_decode_64(&p);
4333 if (stripe_unit != obj_size) {
4334 rbd_warn(rbd_dev, "unsupported stripe unit "
4335 "(got %llu want %llu)",
4336 stripe_unit, obj_size);
4339 stripe_count = ceph_decode_64(&p);
4340 if (stripe_count != 1) {
4341 rbd_warn(rbd_dev, "unsupported stripe count "
4342 "(got %llu want 1)", stripe_count);
4345 rbd_dev->header.stripe_unit = stripe_unit;
4346 rbd_dev->header.stripe_count = stripe_count;
4351 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4353 size_t image_id_size;
4358 void *reply_buf = NULL;
4360 char *image_name = NULL;
4363 rbd_assert(!rbd_dev->spec->image_name);
4365 len = strlen(rbd_dev->spec->image_id);
4366 image_id_size = sizeof (__le32) + len;
4367 image_id = kmalloc(image_id_size, GFP_KERNEL);
4372 end = image_id + image_id_size;
4373 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4375 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4376 reply_buf = kmalloc(size, GFP_KERNEL);
4380 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4381 "rbd", "dir_get_name",
4382 image_id, image_id_size,
4387 end = reply_buf + ret;
4389 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4390 if (IS_ERR(image_name))
4393 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4401 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4403 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4404 const char *snap_name;
4407 /* Skip over names until we find the one we are looking for */
4409 snap_name = rbd_dev->header.snap_names;
4410 while (which < snapc->num_snaps) {
4411 if (!strcmp(name, snap_name))
4412 return snapc->snaps[which];
4413 snap_name += strlen(snap_name) + 1;
4419 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4421 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4426 for (which = 0; !found && which < snapc->num_snaps; which++) {
4427 const char *snap_name;
4429 snap_id = snapc->snaps[which];
4430 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4431 if (IS_ERR(snap_name)) {
4432 /* ignore no-longer existing snapshots */
4433 if (PTR_ERR(snap_name) == -ENOENT)
4438 found = !strcmp(name, snap_name);
4441 return found ? snap_id : CEPH_NOSNAP;
4445 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4446 * no snapshot by that name is found, or if an error occurs.
4448 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4450 if (rbd_dev->image_format == 1)
4451 return rbd_v1_snap_id_by_name(rbd_dev, name);
4453 return rbd_v2_snap_id_by_name(rbd_dev, name);
4457 * An image being mapped will have everything but the snap id.
4459 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4461 struct rbd_spec *spec = rbd_dev->spec;
4463 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4464 rbd_assert(spec->image_id && spec->image_name);
4465 rbd_assert(spec->snap_name);
4467 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4470 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4471 if (snap_id == CEPH_NOSNAP)
4474 spec->snap_id = snap_id;
4476 spec->snap_id = CEPH_NOSNAP;
4483 * A parent image will have all ids but none of the names.
4485 * All names in an rbd spec are dynamically allocated. It's OK if we
4486 * can't figure out the name for an image id.
4488 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4490 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4491 struct rbd_spec *spec = rbd_dev->spec;
4492 const char *pool_name;
4493 const char *image_name;
4494 const char *snap_name;
4497 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4498 rbd_assert(spec->image_id);
4499 rbd_assert(spec->snap_id != CEPH_NOSNAP);
4501 /* Get the pool name; we have to make our own copy of this */
4503 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4505 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4508 pool_name = kstrdup(pool_name, GFP_KERNEL);
4512 /* Fetch the image name; tolerate failure here */
4514 image_name = rbd_dev_image_name(rbd_dev);
4516 rbd_warn(rbd_dev, "unable to get image name");
4518 /* Fetch the snapshot name */
4520 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4521 if (IS_ERR(snap_name)) {
4522 ret = PTR_ERR(snap_name);
4526 spec->pool_name = pool_name;
4527 spec->image_name = image_name;
4528 spec->snap_name = snap_name;
4538 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4547 struct ceph_snap_context *snapc;
4551 * We'll need room for the seq value (maximum snapshot id),
4552 * snapshot count, and array of that many snapshot ids.
4553 * For now we have a fixed upper limit on the number we're
4554 * prepared to receive.
4556 size = sizeof (__le64) + sizeof (__le32) +
4557 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4558 reply_buf = kzalloc(size, GFP_KERNEL);
4562 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4563 "rbd", "get_snapcontext", NULL, 0,
4565 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4570 end = reply_buf + ret;
4572 ceph_decode_64_safe(&p, end, seq, out);
4573 ceph_decode_32_safe(&p, end, snap_count, out);
4576 * Make sure the reported number of snapshot ids wouldn't go
4577 * beyond the end of our buffer. But before checking that,
4578 * make sure the computed size of the snapshot context we
4579 * allocate is representable in a size_t.
4581 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4586 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4590 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4596 for (i = 0; i < snap_count; i++)
4597 snapc->snaps[i] = ceph_decode_64(&p);
4599 ceph_put_snap_context(rbd_dev->header.snapc);
4600 rbd_dev->header.snapc = snapc;
4602 dout(" snap context seq = %llu, snap_count = %u\n",
4603 (unsigned long long)seq, (unsigned int)snap_count);
4610 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4621 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4622 reply_buf = kmalloc(size, GFP_KERNEL);
4624 return ERR_PTR(-ENOMEM);
4626 snapid = cpu_to_le64(snap_id);
4627 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4628 "rbd", "get_snapshot_name",
4629 &snapid, sizeof (snapid),
4631 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4633 snap_name = ERR_PTR(ret);
4638 end = reply_buf + ret;
4639 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4640 if (IS_ERR(snap_name))
4643 dout(" snap_id 0x%016llx snap_name = %s\n",
4644 (unsigned long long)snap_id, snap_name);
4651 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4653 bool first_time = rbd_dev->header.object_prefix == NULL;
4656 ret = rbd_dev_v2_image_size(rbd_dev);
4661 ret = rbd_dev_v2_header_onetime(rbd_dev);
4666 ret = rbd_dev_v2_snap_context(rbd_dev);
4667 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4672 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4674 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4676 if (rbd_dev->image_format == 1)
4677 return rbd_dev_v1_header_info(rbd_dev);
4679 return rbd_dev_v2_header_info(rbd_dev);
4682 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4687 dev = &rbd_dev->dev;
4688 dev->bus = &rbd_bus_type;
4689 dev->type = &rbd_device_type;
4690 dev->parent = &rbd_root_dev;
4691 dev->release = rbd_dev_device_release;
4692 dev_set_name(dev, "%d", rbd_dev->dev_id);
4693 ret = device_register(dev);
4698 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4700 device_unregister(&rbd_dev->dev);
4704 * Get a unique rbd identifier for the given new rbd_dev, and add
4705 * the rbd_dev to the global list.
4707 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4711 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4712 0, minor_to_rbd_dev_id(1 << MINORBITS),
4717 rbd_dev->dev_id = new_dev_id;
4719 spin_lock(&rbd_dev_list_lock);
4720 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4721 spin_unlock(&rbd_dev_list_lock);
4723 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4729 * Remove an rbd_dev from the global list, and record that its
4730 * identifier is no longer in use.
4732 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4734 spin_lock(&rbd_dev_list_lock);
4735 list_del_init(&rbd_dev->node);
4736 spin_unlock(&rbd_dev_list_lock);
4738 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4740 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4744 * Skips over white space at *buf, and updates *buf to point to the
4745 * first found non-space character (if any). Returns the length of
4746 * the token (string of non-white space characters) found. Note
4747 * that *buf must be terminated with '\0'.
4749 static inline size_t next_token(const char **buf)
4752 * These are the characters that produce nonzero for
4753 * isspace() in the "C" and "POSIX" locales.
4755 const char *spaces = " \f\n\r\t\v";
4757 *buf += strspn(*buf, spaces); /* Find start of token */
4759 return strcspn(*buf, spaces); /* Return token length */
4763 * Finds the next token in *buf, and if the provided token buffer is
4764 * big enough, copies the found token into it. The result, if
4765 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4766 * must be terminated with '\0' on entry.
4768 * Returns the length of the token found (not including the '\0').
4769 * Return value will be 0 if no token is found, and it will be >=
4770 * token_size if the token would not fit.
4772 * The *buf pointer will be updated to point beyond the end of the
4773 * found token. Note that this occurs even if the token buffer is
4774 * too small to hold it.
4776 static inline size_t copy_token(const char **buf,
4782 len = next_token(buf);
4783 if (len < token_size) {
4784 memcpy(token, *buf, len);
4785 *(token + len) = '\0';
4793 * Finds the next token in *buf, dynamically allocates a buffer big
4794 * enough to hold a copy of it, and copies the token into the new
4795 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4796 * that a duplicate buffer is created even for a zero-length token.
4798 * Returns a pointer to the newly-allocated duplicate, or a null
4799 * pointer if memory for the duplicate was not available. If
4800 * the lenp argument is a non-null pointer, the length of the token
4801 * (not including the '\0') is returned in *lenp.
4803 * If successful, the *buf pointer will be updated to point beyond
4804 * the end of the found token.
4806 * Note: uses GFP_KERNEL for allocation.
4808 static inline char *dup_token(const char **buf, size_t *lenp)
4813 len = next_token(buf);
4814 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4817 *(dup + len) = '\0';
4827 * Parse the options provided for an "rbd add" (i.e., rbd image
4828 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4829 * and the data written is passed here via a NUL-terminated buffer.
4830 * Returns 0 if successful or an error code otherwise.
4832 * The information extracted from these options is recorded in
4833 * the other parameters which return dynamically-allocated
4836 * The address of a pointer that will refer to a ceph options
4837 * structure. Caller must release the returned pointer using
4838 * ceph_destroy_options() when it is no longer needed.
4840 * Address of an rbd options pointer. Fully initialized by
4841 * this function; caller must release with kfree().
4843 * Address of an rbd image specification pointer. Fully
4844 * initialized by this function based on parsed options.
4845 * Caller must release with rbd_spec_put().
4847 * The options passed take this form:
4848 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4851 * A comma-separated list of one or more monitor addresses.
4852 * A monitor address is an ip address, optionally followed
4853 * by a port number (separated by a colon).
4854 * I.e.: ip1[:port1][,ip2[:port2]...]
4856 * A comma-separated list of ceph and/or rbd options.
4858 * The name of the rados pool containing the rbd image.
4860 * The name of the image in that pool to map.
4862 * An optional snapshot id. If provided, the mapping will
4863 * present data from the image at the time that snapshot was
4864 * created. The image head is used if no snapshot id is
4865 * provided. Snapshot mappings are always read-only.
4867 static int rbd_add_parse_args(const char *buf,
4868 struct ceph_options **ceph_opts,
4869 struct rbd_options **opts,
4870 struct rbd_spec **rbd_spec)
4874 const char *mon_addrs;
4876 size_t mon_addrs_size;
4877 struct rbd_spec *spec = NULL;
4878 struct rbd_options *rbd_opts = NULL;
4879 struct ceph_options *copts;
4882 /* The first four tokens are required */
4884 len = next_token(&buf);
4886 rbd_warn(NULL, "no monitor address(es) provided");
4890 mon_addrs_size = len + 1;
4894 options = dup_token(&buf, NULL);
4898 rbd_warn(NULL, "no options provided");
4902 spec = rbd_spec_alloc();
4906 spec->pool_name = dup_token(&buf, NULL);
4907 if (!spec->pool_name)
4909 if (!*spec->pool_name) {
4910 rbd_warn(NULL, "no pool name provided");
4914 spec->image_name = dup_token(&buf, NULL);
4915 if (!spec->image_name)
4917 if (!*spec->image_name) {
4918 rbd_warn(NULL, "no image name provided");
4923 * Snapshot name is optional; default is to use "-"
4924 * (indicating the head/no snapshot).
4926 len = next_token(&buf);
4928 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4929 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4930 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4931 ret = -ENAMETOOLONG;
4934 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4937 *(snap_name + len) = '\0';
4938 spec->snap_name = snap_name;
4940 /* Initialize all rbd options to the defaults */
4942 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4946 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4948 copts = ceph_parse_options(options, mon_addrs,
4949 mon_addrs + mon_addrs_size - 1,
4950 parse_rbd_opts_token, rbd_opts);
4951 if (IS_ERR(copts)) {
4952 ret = PTR_ERR(copts);
4973 * Return pool id (>= 0) or a negative error code.
4975 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4978 unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
4983 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4984 if (ret == -ENOENT && tries++ < 1) {
4985 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4990 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4991 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4992 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4993 newest_epoch, timeout);
4996 /* the osdmap we have is new enough */
5005 * An rbd format 2 image has a unique identifier, distinct from the
5006 * name given to it by the user. Internally, that identifier is
5007 * what's used to specify the names of objects related to the image.
5009 * A special "rbd id" object is used to map an rbd image name to its
5010 * id. If that object doesn't exist, then there is no v2 rbd image
5011 * with the supplied name.
5013 * This function will record the given rbd_dev's image_id field if
5014 * it can be determined, and in that case will return 0. If any
5015 * errors occur a negative errno will be returned and the rbd_dev's
5016 * image_id field will be unchanged (and should be NULL).
5018 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5027 * When probing a parent image, the image id is already
5028 * known (and the image name likely is not). There's no
5029 * need to fetch the image id again in this case. We
5030 * do still need to set the image format though.
5032 if (rbd_dev->spec->image_id) {
5033 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5039 * First, see if the format 2 image id file exists, and if
5040 * so, get the image's persistent id from it.
5042 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
5043 object_name = kmalloc(size, GFP_NOIO);
5046 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
5047 dout("rbd id object name is %s\n", object_name);
5049 /* Response will be an encoded string, which includes a length */
5051 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5052 response = kzalloc(size, GFP_NOIO);
5058 /* If it doesn't exist we'll assume it's a format 1 image */
5060 ret = rbd_obj_method_sync(rbd_dev, object_name,
5061 "rbd", "get_id", NULL, 0,
5062 response, RBD_IMAGE_ID_LEN_MAX);
5063 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5064 if (ret == -ENOENT) {
5065 image_id = kstrdup("", GFP_KERNEL);
5066 ret = image_id ? 0 : -ENOMEM;
5068 rbd_dev->image_format = 1;
5069 } else if (ret >= 0) {
5072 image_id = ceph_extract_encoded_string(&p, p + ret,
5074 ret = PTR_ERR_OR_ZERO(image_id);
5076 rbd_dev->image_format = 2;
5080 rbd_dev->spec->image_id = image_id;
5081 dout("image_id is %s\n", image_id);
5091 * Undo whatever state changes are made by v1 or v2 header info
5094 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5096 struct rbd_image_header *header;
5098 /* Drop parent reference unless it's already been done (or none) */
5100 if (rbd_dev->parent_overlap)
5101 rbd_dev_parent_put(rbd_dev);
5103 /* Free dynamic fields from the header, then zero it out */
5105 header = &rbd_dev->header;
5106 ceph_put_snap_context(header->snapc);
5107 kfree(header->snap_sizes);
5108 kfree(header->snap_names);
5109 kfree(header->object_prefix);
5110 memset(header, 0, sizeof (*header));
5113 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5117 ret = rbd_dev_v2_object_prefix(rbd_dev);
5122 * Get the and check features for the image. Currently the
5123 * features are assumed to never change.
5125 ret = rbd_dev_v2_features(rbd_dev);
5129 /* If the image supports fancy striping, get its parameters */
5131 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5132 ret = rbd_dev_v2_striping_info(rbd_dev);
5136 /* No support for crypto and compression type format 2 images */
5140 rbd_dev->header.features = 0;
5141 kfree(rbd_dev->header.object_prefix);
5142 rbd_dev->header.object_prefix = NULL;
5147 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
5149 struct rbd_device *parent = NULL;
5150 struct rbd_spec *parent_spec;
5151 struct rbd_client *rbdc;
5154 if (!rbd_dev->parent_spec)
5157 * We need to pass a reference to the client and the parent
5158 * spec when creating the parent rbd_dev. Images related by
5159 * parent/child relationships always share both.
5161 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
5162 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5165 parent = rbd_dev_create(rbdc, parent_spec);
5169 ret = rbd_dev_image_probe(parent, false);
5172 rbd_dev->parent = parent;
5173 atomic_set(&rbd_dev->parent_ref, 1);
5178 rbd_dev_unparent(rbd_dev);
5179 kfree(rbd_dev->header_name);
5180 rbd_dev_destroy(parent);
5182 rbd_put_client(rbdc);
5183 rbd_spec_put(parent_spec);
5189 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5193 /* Get an id and fill in device name. */
5195 ret = rbd_dev_id_get(rbd_dev);
5199 BUILD_BUG_ON(DEV_NAME_LEN
5200 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5201 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5203 /* Record our major and minor device numbers. */
5205 if (!single_major) {
5206 ret = register_blkdev(0, rbd_dev->name);
5210 rbd_dev->major = ret;
5213 rbd_dev->major = rbd_major;
5214 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5217 /* Set up the blkdev mapping. */
5219 ret = rbd_init_disk(rbd_dev);
5221 goto err_out_blkdev;
5223 ret = rbd_dev_mapping_set(rbd_dev);
5227 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5228 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5230 rbd_dev->rq_wq = alloc_workqueue("%s", 0, 0, rbd_dev->disk->disk_name);
5231 if (!rbd_dev->rq_wq) {
5233 goto err_out_mapping;
5236 ret = rbd_bus_add_dev(rbd_dev);
5238 goto err_out_workqueue;
5240 /* Everything's ready. Announce the disk to the world. */
5242 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5243 add_disk(rbd_dev->disk);
5245 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5246 (unsigned long long) rbd_dev->mapping.size);
5251 destroy_workqueue(rbd_dev->rq_wq);
5252 rbd_dev->rq_wq = NULL;
5254 rbd_dev_mapping_clear(rbd_dev);
5256 rbd_free_disk(rbd_dev);
5259 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5261 rbd_dev_id_put(rbd_dev);
5262 rbd_dev_mapping_clear(rbd_dev);
5267 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5269 struct rbd_spec *spec = rbd_dev->spec;
5272 /* Record the header object name for this rbd image. */
5274 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5276 if (rbd_dev->image_format == 1)
5277 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5279 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5281 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5282 if (!rbd_dev->header_name)
5285 if (rbd_dev->image_format == 1)
5286 sprintf(rbd_dev->header_name, "%s%s",
5287 spec->image_name, RBD_SUFFIX);
5289 sprintf(rbd_dev->header_name, "%s%s",
5290 RBD_HEADER_PREFIX, spec->image_id);
5294 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5296 rbd_dev_unprobe(rbd_dev);
5297 kfree(rbd_dev->header_name);
5298 rbd_dev->header_name = NULL;
5299 rbd_dev->image_format = 0;
5300 kfree(rbd_dev->spec->image_id);
5301 rbd_dev->spec->image_id = NULL;
5303 rbd_dev_destroy(rbd_dev);
5307 * Probe for the existence of the header object for the given rbd
5308 * device. If this image is the one being mapped (i.e., not a
5309 * parent), initiate a watch on its header object before using that
5310 * object to get detailed information about the rbd image.
5312 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5317 * Get the id from the image id object. Unless there's an
5318 * error, rbd_dev->spec->image_id will be filled in with
5319 * a dynamically-allocated string, and rbd_dev->image_format
5320 * will be set to either 1 or 2.
5322 ret = rbd_dev_image_id(rbd_dev);
5326 ret = rbd_dev_header_name(rbd_dev);
5328 goto err_out_format;
5331 ret = rbd_dev_header_watch_sync(rbd_dev);
5333 goto out_header_name;
5336 ret = rbd_dev_header_info(rbd_dev);
5341 * If this image is the one being mapped, we have pool name and
5342 * id, image name and id, and snap name - need to fill snap id.
5343 * Otherwise this is a parent image, identified by pool, image
5344 * and snap ids - need to fill in names for those ids.
5347 ret = rbd_spec_fill_snap_id(rbd_dev);
5349 ret = rbd_spec_fill_names(rbd_dev);
5353 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5354 ret = rbd_dev_v2_parent_info(rbd_dev);
5359 * Need to warn users if this image is the one being
5360 * mapped and has a parent.
5362 if (mapping && rbd_dev->parent_spec)
5364 "WARNING: kernel layering is EXPERIMENTAL!");
5367 ret = rbd_dev_probe_parent(rbd_dev);
5371 dout("discovered format %u image, header name is %s\n",
5372 rbd_dev->image_format, rbd_dev->header_name);
5376 rbd_dev_unprobe(rbd_dev);
5379 rbd_dev_header_unwatch_sync(rbd_dev);
5381 kfree(rbd_dev->header_name);
5382 rbd_dev->header_name = NULL;
5384 rbd_dev->image_format = 0;
5385 kfree(rbd_dev->spec->image_id);
5386 rbd_dev->spec->image_id = NULL;
5390 static ssize_t do_rbd_add(struct bus_type *bus,
5394 struct rbd_device *rbd_dev = NULL;
5395 struct ceph_options *ceph_opts = NULL;
5396 struct rbd_options *rbd_opts = NULL;
5397 struct rbd_spec *spec = NULL;
5398 struct rbd_client *rbdc;
5402 if (!try_module_get(THIS_MODULE))
5405 /* parse add command */
5406 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5408 goto err_out_module;
5409 read_only = rbd_opts->read_only;
5411 rbd_opts = NULL; /* done with this */
5413 rbdc = rbd_get_client(ceph_opts);
5420 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5422 goto err_out_client;
5423 spec->pool_id = (u64)rc;
5425 /* The ceph file layout needs to fit pool id in 32 bits */
5427 if (spec->pool_id > (u64)U32_MAX) {
5428 rbd_warn(NULL, "pool id too large (%llu > %u)",
5429 (unsigned long long)spec->pool_id, U32_MAX);
5431 goto err_out_client;
5434 rbd_dev = rbd_dev_create(rbdc, spec);
5436 goto err_out_client;
5437 rbdc = NULL; /* rbd_dev now owns this */
5438 spec = NULL; /* rbd_dev now owns this */
5440 rc = rbd_dev_image_probe(rbd_dev, true);
5442 goto err_out_rbd_dev;
5444 /* If we are mapping a snapshot it must be marked read-only */
5446 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5448 rbd_dev->mapping.read_only = read_only;
5450 rc = rbd_dev_device_setup(rbd_dev);
5453 * rbd_dev_header_unwatch_sync() can't be moved into
5454 * rbd_dev_image_release() without refactoring, see
5455 * commit 1f3ef78861ac.
5457 rbd_dev_header_unwatch_sync(rbd_dev);
5458 rbd_dev_image_release(rbd_dev);
5459 goto err_out_module;
5465 rbd_dev_destroy(rbd_dev);
5467 rbd_put_client(rbdc);
5471 module_put(THIS_MODULE);
5473 dout("Error adding device %s\n", buf);
5478 static ssize_t rbd_add(struct bus_type *bus,
5485 return do_rbd_add(bus, buf, count);
5488 static ssize_t rbd_add_single_major(struct bus_type *bus,
5492 return do_rbd_add(bus, buf, count);
5495 static void rbd_dev_device_release(struct device *dev)
5497 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5499 destroy_workqueue(rbd_dev->rq_wq);
5500 rbd_free_disk(rbd_dev);
5501 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5502 rbd_dev_mapping_clear(rbd_dev);
5504 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5505 rbd_dev_id_put(rbd_dev);
5506 rbd_dev_mapping_clear(rbd_dev);
5509 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5511 while (rbd_dev->parent) {
5512 struct rbd_device *first = rbd_dev;
5513 struct rbd_device *second = first->parent;
5514 struct rbd_device *third;
5517 * Follow to the parent with no grandparent and
5520 while (second && (third = second->parent)) {
5525 rbd_dev_image_release(second);
5526 first->parent = NULL;
5527 first->parent_overlap = 0;
5529 rbd_assert(first->parent_spec);
5530 rbd_spec_put(first->parent_spec);
5531 first->parent_spec = NULL;
5535 static ssize_t do_rbd_remove(struct bus_type *bus,
5539 struct rbd_device *rbd_dev = NULL;
5540 struct list_head *tmp;
5543 bool already = false;
5546 ret = kstrtoul(buf, 10, &ul);
5550 /* convert to int; abort if we lost anything in the conversion */
5556 spin_lock(&rbd_dev_list_lock);
5557 list_for_each(tmp, &rbd_dev_list) {
5558 rbd_dev = list_entry(tmp, struct rbd_device, node);
5559 if (rbd_dev->dev_id == dev_id) {
5565 spin_lock_irq(&rbd_dev->lock);
5566 if (rbd_dev->open_count)
5569 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5571 spin_unlock_irq(&rbd_dev->lock);
5573 spin_unlock(&rbd_dev_list_lock);
5574 if (ret < 0 || already)
5577 rbd_dev_header_unwatch_sync(rbd_dev);
5579 * flush remaining watch callbacks - these must be complete
5580 * before the osd_client is shutdown
5582 dout("%s: flushing notifies", __func__);
5583 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5586 * Don't free anything from rbd_dev->disk until after all
5587 * notifies are completely processed. Otherwise
5588 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5589 * in a potential use after free of rbd_dev->disk or rbd_dev.
5591 rbd_bus_del_dev(rbd_dev);
5592 rbd_dev_image_release(rbd_dev);
5593 module_put(THIS_MODULE);
5598 static ssize_t rbd_remove(struct bus_type *bus,
5605 return do_rbd_remove(bus, buf, count);
5608 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5612 return do_rbd_remove(bus, buf, count);
5616 * create control files in sysfs
5619 static int rbd_sysfs_init(void)
5623 ret = device_register(&rbd_root_dev);
5627 ret = bus_register(&rbd_bus_type);
5629 device_unregister(&rbd_root_dev);
5634 static void rbd_sysfs_cleanup(void)
5636 bus_unregister(&rbd_bus_type);
5637 device_unregister(&rbd_root_dev);
5640 static int rbd_slab_init(void)
5642 rbd_assert(!rbd_img_request_cache);
5643 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5644 sizeof (struct rbd_img_request),
5645 __alignof__(struct rbd_img_request),
5647 if (!rbd_img_request_cache)
5650 rbd_assert(!rbd_obj_request_cache);
5651 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5652 sizeof (struct rbd_obj_request),
5653 __alignof__(struct rbd_obj_request),
5655 if (!rbd_obj_request_cache)
5658 rbd_assert(!rbd_segment_name_cache);
5659 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5660 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5661 if (rbd_segment_name_cache)
5664 if (rbd_obj_request_cache) {
5665 kmem_cache_destroy(rbd_obj_request_cache);
5666 rbd_obj_request_cache = NULL;
5669 kmem_cache_destroy(rbd_img_request_cache);
5670 rbd_img_request_cache = NULL;
5675 static void rbd_slab_exit(void)
5677 rbd_assert(rbd_segment_name_cache);
5678 kmem_cache_destroy(rbd_segment_name_cache);
5679 rbd_segment_name_cache = NULL;
5681 rbd_assert(rbd_obj_request_cache);
5682 kmem_cache_destroy(rbd_obj_request_cache);
5683 rbd_obj_request_cache = NULL;
5685 rbd_assert(rbd_img_request_cache);
5686 kmem_cache_destroy(rbd_img_request_cache);
5687 rbd_img_request_cache = NULL;
5690 static int __init rbd_init(void)
5694 if (!libceph_compatible(NULL)) {
5695 rbd_warn(NULL, "libceph incompatibility (quitting)");
5699 rc = rbd_slab_init();
5704 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5705 if (rbd_major < 0) {
5711 rc = rbd_sysfs_init();
5713 goto err_out_blkdev;
5716 pr_info("loaded (major %d)\n", rbd_major);
5718 pr_info("loaded\n");
5724 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5730 static void __exit rbd_exit(void)
5732 ida_destroy(&rbd_dev_id_ida);
5733 rbd_sysfs_cleanup();
5735 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5739 module_init(rbd_init);
5740 module_exit(rbd_exit);
5742 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5743 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5744 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5745 /* following authorship retained from original osdblk.c */
5746 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5748 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5749 MODULE_LICENSE("GPL");